metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JoeD1999/udi-example3-poly",
"score": 3
} |
#### File: udi-example3-poly/nodes/count_child.py
```python
import udi_interface
import sys
LOGGER = udi_interface.LOGGER
Custom = udi_interface.Custom
'''
This is our Counter device node. All it does is update the count at the
poll interval.
'''
class CounterNode(udi_interface.Node):
id = 'child'
drivers = [
{'driver': 'ST', 'value': 1, 'uom': 2},
{'driver': 'GV0', 'value': 0, 'uom': 56},
{'driver': 'GV1', 'value': 0, 'uom': 56},
{'driver': 'GV2', 'value': 0, 'uom': 2},
{'driver': 'GV3', 'value': 1, 'uom': 7},
{'driver': 'GV4', 'value': 1, 'uom': 7},
]
def __init__(self, polyglot, parent, address, name):
super(CounterNode, self).__init__(polyglot, parent, address, name)
self.poly = polyglot
self.count = 0
self.Parameters = Custom(polyglot, 'customparams')
# subscribe to the events we want
polyglot.subscribe(polyglot.CUSTOMPARAMS, self.parameterHandler)
polyglot.subscribe(polyglot.POLL, self.poll)
'''
Read the user entered custom parameters. In this case, it is just
the 'multiplier' value that we want.
'''
def parameterHandler(self, params):
self.Parameters.load(params)
'''
This is where the real work happens. When we get a shortPoll, increment the
count, report the current count in GV0 and the current count multiplied by
the user defined value in GV1. Then display a notice on the dashboard.
'''
def poll(self, polltype):
if 'shortPoll' in polltype:
if self.Parameters['multiplier'] is not None:
mult = int(self.Parameters['multiplier'])
else:
mult = 1
self.count += 1
self.setDriver('GV0', self.count, True, True)
self.setDriver('GV1', (self.count * mult), True, True)
self.setDriver('GV2', 1, True, True)
self.setDriver('GV3', 1000, True, True)
self.setDriver('GV4', '2000', True, True)
# be fancy and display a notice on the polyglot dashboard
self.poly.Notices[self.name] = '{}: Current count is {}'.format(self.name, self.count)
```
#### File: udi-example3-poly/nodes/pp_ctl.py
```python
import udi_interface
import sys
import time
from nodes import iaq
LOGGER = udi_interface.LOGGER
Custom = udi_interface.Custom
'''
Controller is interfacing with both Polyglot and the device. In this
case the device is just a count that has two values, the count and the count
multiplied by a user defined multiplier. These get updated at every
shortPoll interval.
'''
class Controller(udi_interface.Node):
id = 'ctl'
drivers = [
{'driver': 'ST', 'value': 1, 'uom': 2},
{'driver': 'GV0', 'value': 0, 'uom': 56},
{'driver': 'GV1', 'value': 0, 'uom': 2},
{'driver': 'GV2', 'value': 1, 'uom': 7},
{'driver': 'GV3', 'value': 1, 'uom': 7},
]
def __init__(self, polyglot, parent, address, name):
super(Controller, self).__init__(polyglot, parent, address, name)
self.poly = polyglot
self.count = 0
self.n_queue = []
self.Parameters = Custom(polyglot, 'customparams')
# subscribe to the events we want
polyglot.subscribe(polyglot.CUSTOMPARAMS, self.parameterHandler)
polyglot.subscribe(polyglot.STOP, self.stop)
polyglot.subscribe(polyglot.START, self.start, address)
polyglot.subscribe(polyglot.ADDNODEDONE, self.node_queue)
# start processing events and create add our controller node
polyglot.ready()
self.poly.addNode(self)
#Create the IAQ node.
LOGGER.info('Creating IAQ node'.format()
#node = iaq.IAQ_Node(self.poly, 'iaq', 'iaq', 'Indoor Air Quality')
#self.poly.addNode(node)
self.poly.addNode(iaq.IAQ_Node(self.poly, self.address, 'iaq', 'Indoor Air Quality')
self.wait_for_node_done()
'''
node_queue() and wait_for_node_event() create a simple way to wait
for a node to be created. The nodeAdd() API call is asynchronous and
will return before the node is fully created. Using this, we can wait
until it is fully created before we try to use it.
'''
def node_queue(self, data):
self.n_queue.append(data['address'])
def wait_for_node_done(self):
while len(self.n_queue) == 0:
time.sleep(0.1)
self.n_queue.pop()
'''
Read the user entered custom parameters. Here is where the user will
configure the number of child nodes that they want created.
'''
def parameterHandler(self, params):
self.Parameters.load(params)
validChildren = False
'''
if self.Parameters['nodes'] is not None:
if int(self.Parameters['nodes']) > 0:
validChildren = True
else:
LOGGER.error('Invalid number of nodes {}'.format(self.Parameters['nodes']))
else:
LOGGER.error('Missing number of node parameter')
if validChildren:
self.createChildren(int(self.Parameters['nodes']))
self.poly.Notices.clear()
else:
self.poly.Notices['nodes'] = 'Please configure the number of child nodes to create.'
'''
'''
This is called when the node is added to the interface module. It is
run in a separate thread. This is only run once so you should do any
setup that needs to be run initially. For example, if you need to
start a thread to monitor device status, do it here.
Here we load the custom parameter configuration document and push
the profiles to the ISY.
'''
def start(self):
self.poly.setCustomParamsDoc()
self.poly.updateProfile()
'''
Create the children nodes. Since this will be called anytime the
user changes the number of nodes and the new number may be less
than the previous number, we need to make sure we create the right
number of nodes. Because this is just a simple example, we'll first
delete any existing nodes then create the number requested.
'''
'''
def createChildren(self):
# delete any existing nodes
nodes = self.poly.getNodes()
for node in nodes:
if node != 'controller': # but not the controller node
self.poly.delNode(node)
LOGGER.info('Creating {} children counters'.format(how_many))
address = 'IAQ'.format(i)
title = 'Indoor Air Quality'.format(i)
try:
node = IAQ.CounterNode(self.poly, self.address, address, title)
self.poly.addNode(node)
self.wait_for_node_done()
except Exception as e:
LOGGER.error('Failed to create {}: {}'.format(title, e))
'''
'''
Change all the child node active status drivers to false
'''
def stop(self):
nodes = self.poly.getNodes()
for node in nodes:
if node != 'controller': # but not the controller node
nodes[node].setDriver('ST', 0, True, True)
self.poly.stop()
'''
Just to show how commands are implemented. The commands here need to
match what is in the nodedef profile file.
'''
def noop(self):
LOGGER.info('Discover not implemented')
commands = {'DISCOVER': noop}
'''
Set the Damper, Exhaust and Makeup Air CFM
'''
def poll(self, polltype):
if 'shortPoll' in polltype:
'''
if self.Parameters['multiplier'] is not None:
mult = int(self.Parameters['multiplier'])
else:
mult = 1
self.count += 1
self.setDriver('GV1', 1, True, True)
self.setDriver('GV2', 1000, True, True)
self.setDriver('GV3', '2000', True, True)
# be fancy and display a notice on the polyglot dashboard
self.poly.Notices[self.name] = '{}: Current controller count is {}'.format(self.name, self.count)
'''
``` |
{
"source": "joedanz/flask-bpm",
"score": 3
} |
#### File: flask-bpm/app/__init__.py
```python
import json, urllib, urllib2, sqlite3, datetime
from flask import Flask, render_template, g
app = Flask(__name__)
# configuration details
num_songs = 7
lastfm_apikey = '<KEY>'
echonest_apikey = '<KEY>'
@app.before_request
def db_connect():
# SQLit3 Database initialization
DATABASE = 'bpm.db'
g.db = sqlite3.connect(DATABASE)
@app.teardown_request
def db_disconnect(exception=None):
g.db.close()
@app.route('/', defaults={'username': 'joedanz'})
@app.route('/<username>')
def get_recent(username):
# get recently played list from last.fm api
recent_list = urllib2.urlopen('http://ws.audioscrobbler.com/2.0/?method=user.getrecenttracks&api_key=' \
+ lastfm_apikey + '&format=json&limit=' + str(num_songs) +'&user=' + username)
recent_data = recent_list.read()
recent_list.close()
parsed_recent = json.loads(recent_data)
print "Retrieved list from last.fm..."
# create tables
g.db.execute("DROP TABLE IF EXISTS songs")
g.db.execute("CREATE TABLE songs (ID INT PRIMARY KEY, ARTIST CHAR(250), TITLE CHAR(250), \
BPM INTEGER, LISTENED CHAR(50), IMAGE CHAR(250))")
# loop over songs from last.fm, don't include first in case currently playing
for i in range(num_songs):
print str(i+1) + ')',
print parsed_recent['recenttracks']['track'][i]['artist']['#text'] + ' :',
print parsed_recent['recenttracks']['track'][i]['name'],
try:
listenedAt = datetime.datetime.strptime(parsed_recent['recenttracks']['track'][i]['date']['#text'], '%d %b %Y, %H:%M')
except:
listenedAt = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
# search for echo nest id
echo_url1 = 'http://developer.echonest.com/api/v4/song/search?api_key='+echonest_apikey+'&artist=' \
+ urllib.quote(parsed_recent['recenttracks']['track'][i]['artist']['#text']) \
+ '&title=' + urllib.quote(parsed_recent['recenttracks']['track'][i]['name'].encode('utf8'))
echo_track = urllib2.urlopen(echo_url1)
echo_data = echo_track.read()
echo_track.close()
parsed_echo = json.loads(echo_data)
# get tempo data from echo nest song detail
echo_url2 = 'http://developer.echonest.com/api/v4/song/profile?api_key='+echonest_apikey+'&id=' \
+ parsed_echo['response']['songs'][0]['id'] + '&bucket=audio_summary'
echo_bpm = urllib2.urlopen(echo_url2)
bpm_data = echo_bpm.read()
echo_bpm.close()
parsed_bpm = json.loads(bpm_data)
print '(' + str(parsed_bpm['response']['songs'][0]['audio_summary']['tempo']) + ') -',
# use placeholder image if no album image and for now playing
image_url = parsed_recent['recenttracks']['track'][i]['image'][2]['#text']
if image_url == '':
image_url = 'http://ticc.net/img/turntable.png'
# insert into database
g.db.execute("INSERT INTO songs (ARTIST, TITLE, BPM, LISTENED, IMAGE) VALUES ('" \
+ parsed_recent['recenttracks']['track'][i]['artist']['#text'] + "', '" \
+ parsed_recent['recenttracks']['track'][i]['name'] + "', '" \
+ str(int(parsed_bpm['response']['songs'][0]['audio_summary']['tempo'])) + "', '" \
+ str(listenedAt) + "', '" + image_url + "')")
print listenedAt
# get necessary data from db
songs1 = g.db.execute("SELECT ID, ARTIST, TITLE, BPM, LISTENED, IMAGE FROM songs LIMIT 1000").fetchall()
songs2 = g.db.execute("SELECT BPM, COUNT(*) AS NUMSONGS FROM songs GROUP BY BPM ORDER BY BPM LIMIT 1000").fetchall()
return render_template('index.html', name=username, num_songs=num_songs, songs1=songs1, songs2=songs2)
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "joedanz/flask-weather",
"score": 3
} |
#### File: flask-weather/app/weather_tests.py
```python
import os
import weather
import datetime
import unittest
import tempfile
class WeatherTestCase(unittest.TestCase):
def setUp(self):
self.db_fd, weather.app.config['DATABASE'] = tempfile.mkstemp()
weather.app.config['TESTING'] = True
self.app = weather.app.test_client()
weather.init_db()
def tearDown(self):
os.close(self.db_fd)
os.unlink(weather.app.config['DATABASE'])
def test_empty_db(self):
"""Test empty database with no entries."""
rv = self.app.get('/')
assert 'Nothing logged yet.' in rv.data
def test_report(self):
"""Test reporting weather"""
rv = self.app.get('/report/11210/63/23', follow_redirects=True)
assert b'11210' in rv.data
def test_full_db(self):
"""Test reporting weather"""
rv = self.app.get('/', follow_redirects=True)
assert b'11210' in rv.data
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "Joe-Davidson1802/floobits-neovim",
"score": 2
} |
#### File: python3/floobits/editor.py
```python
import sys
from collections import defaultdict
import time
vim = None
timeouts = defaultdict(list)
top_timeout_id = 0
cancelled_timeouts = set()
calling_timeouts = False
line_endings = "\n"
welcome_text = 'Welcome %s!\n\nYou are all set to collaborate. You should check out our docs at https://%s/help/plugins/#sublime-usage. \
You must run \':FlooCompleteSignup\' before you can login to floobits.com.'
NEW_ACCOUNT_TXT = 'Welcome {username}!\n\nYou\'re all set to collaborate. You should check out our docs at https://{host}/help/plugins/vim#usage. \
You must run \'Floobits - Complete Sign Up\' so you can log in to the website.'
LINKED_ACCOUNT_TXT = """Welcome {username}!\n\nYou are all set to collaborate.
You may want to check out our docs at https://{host}/help/plugins/vim#usage"""
def name():
if sys.version_info < (3, 0):
py_version = 2
else:
py_version = 3
return 'Neovim-py%s' % py_version
def codename():
return 'vim'
def windows(*args, **kwargs):
return []
def set_timeout(func, timeout, *args, **kwargs):
global top_timeout_id
timeout_id = top_timeout_id
top_timeout_id + 1
if top_timeout_id > 100000:
top_timeout_id = 0
def timeout_func():
if timeout_id in cancelled_timeouts:
cancelled_timeouts.remove(timeout_id)
return
func(*args, **kwargs)
then = time.time() + (timeout / 1000.0)
timeouts[then].append(timeout_func)
return timeout_id
def cancel_timeout(timeout_id):
if timeout_id in timeouts:
cancelled_timeouts.add(timeout_id)
def call_timeouts():
global calling_timeouts
if calling_timeouts:
return
calling_timeouts = True
now = time.time()
to_remove = []
for t, tos in list(timeouts.items()):
if now >= t:
for timeout in tos:
timeout()
to_remove.append(t)
for k in to_remove:
del timeouts[k]
calling_timeouts = False
def error_message(message, *args, **kwargs):
message = message.replace("\n", " ")
vim.command('echom "%s"' % message)
def status_message(message):
message = message.replace("\n\n", " ")
vim.command('echom "%s"' % message)
def message_dialog(message):
message = message.replace("\n\n", " ")
vim.command('echom "%s"' % message)
def vim_choice(prompt, default, choices):
default = choices.index(default) + 1
prompt = prompt.replace("\n\n", " ")
choices_str = '\n'.join(['&%s' % choice for choice in choices])
try:
choice = int(vim.eval('confirm("%s", "%s", %s)' % (prompt, choices_str, default)))
except KeyboardInterrupt:
return None
if choice == 0:
return None
return choices[choice - 1]
def ok_cancel_dialog(prompt):
prompt = prompt.replace("\n\n", " ")
choice = vim_choice(prompt, 'ok', ['ok', 'cancel'])
return choice == 'ok'
def open_file(filename):
current_buffer = vim.eval('expand("%:p")')
if current_buffer != filename:
vim.command(':silent! edit! %s | :silent! :filetype detect' % filename)
def platform():
return sys.platform
def get_line_endings(path=None):
return line_endings
```
#### File: floobits/floocommon/flooui.py
```python
import os.path
import webbrowser
import re
import json
from . import api, msg, utils, reactor, shared as G, event_emitter
from .handlers import account, credentials
from .. import editor
from ..common.exc_fmt import str_e
class FlooUI(event_emitter.EventEmitter):
def __init__(self):
super(FlooUI, self).__init__()
self.agent = None
def _make_agent(self, context, owner, workspace, auth, join_action):
"""@returns new Agent()"""
raise NotImplemented()
def user_y_or_n(self, context, prompt, affirmation_txt, cb):
"""@returns True/False"""
raise NotImplemented()
def user_select(self, context, prompt, choices_big, choices_small, cb):
"""@returns (choice, index)"""
raise NotImplemented()
def user_charfield(self, context, prompt, initial, cb):
"""@returns String"""
raise NotImplemented()
def user_dir(self, context, prompt, initial, cb):
"""@returns a String directory (probably not expanded)"""
raise NotImplemented()
def get_a_window(self, abs_path, cb):
"""opens a project in a window or something"""
raise NotImplemented()
@utils.inlined_callbacks
def link_account(self, context, host, cb):
prompt = 'No credentials found in ~/.floorc.json for %s. Would you like to sign in? (opens a browser)' % host
yes = yield self.user_y_or_n, context, prompt, 'Sign in'
if not yes:
return
agent = credentials.RequestCredentialsHandler()
if not agent:
self.error_message('''A configuration error occured earlier. Please go to %s and sign up to use this plugin.
We're really sorry. This should never happen.''' % host)
return
agent.once('end', cb)
try:
reactor.reactor.connect(agent, host, G.DEFAULT_PORT, True)
except Exception as e:
print((str_e(e)))
@utils.inlined_callbacks
def create_or_link_account(self, context, host, force, cb):
if host != "floobits.com":
self.link_account(context, host, cb)
return
disable_account_creation = utils.get_persistent_data().get('disable_account_creation')
if disable_account_creation and not force:
print('We could not automatically create or link your floobits account. Please go to floobits.com and sign up to use this plugin.')
return
if not G.EXPERT_MODE:
editor.message_dialog('Thank you for installing the Floobits plugin!\n\nLet\'s set up your editor to work with Floobits.')
choices = [
'Sign in to Floobits',
'Automatically create a Floobits account',
'Cancel (see https://floobits.com/help/floorc)'
]
(choice, index) = yield self.user_select, context, 'You need an account to use Floobits! Do you want to:', choices, None
if index == -1 or index == 2:
d = utils.get_persistent_data()
if not d.get('disable_account_creation'):
d['disable_account_creation'] = True
utils.update_persistent_data(d)
# TODO: this instruction is only useful for Sublime Text
editor.message_dialog('''You can set up a Floobits account at any time under:\n\nTools -> Floobits -> Set up''')
cb(None)
return
agent = None
if index == 0:
agent = credentials.RequestCredentialsHandler()
else:
agent = account.CreateAccountHandler()
agent.once('end', cb)
try:
reactor.reactor.connect(agent, host, G.DEFAULT_PORT, True)
except Exception as e:
print((str_e(e)))
def open_workspace(self):
if not self.agent:
return
try:
webbrowser.open(self.agent.workspace_url, new=2, autoraise=True)
except Exception as e:
msg.error("Couldn't open a browser: %s" % (str_e(e)))
def open_workspace_settings(self):
if not self.agent:
return
try:
webbrowser.open(self.agent.workspace_url + '/settings', new=2, autoraise=True)
except Exception as e:
msg.error("Couldn't open a browser: %s" % (str_e(e)))
def pinocchio(self, host=None):
floorc = utils.load_floorc_json()
auth = floorc.get('AUTH', {}).get(host or G.DEFAULT_HOST, {})
username = auth.get('username')
secret = auth.get('secret')
if not (username and secret):
return self.error_message('You don\'t seem to have a Floobits account of any sort')
webbrowser.open('https://%s/%s/pinocchio/%s' % (G.DEFAULT_HOST, username, secret))
def prejoin_workspace(self, workspace_url, dir_to_share, api_args):
try:
result = utils.parse_url(workspace_url)
except Exception as e:
msg.error(str_e(e))
return False
host = result.get('host')
if not api.get_basic_auth(host):
raise ValueError('No auth credentials for %s. Please add a username and secret for %s in your ~/.floorc.json' % (host, host))
try:
w = api.get_workspace_by_url(workspace_url)
except Exception as e:
editor.error_message('Error opening url %s: %s' % (workspace_url, str_e(e)))
return False
if w.code >= 400:
try:
d = utils.get_persistent_data()
try:
del d['workspaces'][result['owner']][result['name']]
except Exception:
pass
try:
del d['recent_workspaces'][workspace_url]
except Exception:
pass
utils.update_persistent_data(d)
except Exception as e:
msg.debug(str_e(e))
return False
msg.debug('workspace: ', json.dumps(w.body))
anon_perms = w.body.get('perms', {}).get('AnonymousUser', [])
msg.debug('api args: ', api_args)
new_anon_perms = api_args.get('perms', {}).get('AnonymousUser', [])
# TODO: prompt/alert user if going from private to public
if set(anon_perms) != set(new_anon_perms):
msg.debug(str(anon_perms), str(new_anon_perms))
w.body['perms']['AnonymousUser'] = new_anon_perms
response = api.update_workspace(workspace_url, w.body)
msg.debug(str(response.body))
utils.add_workspace_to_persistent_json(w.body['owner'], w.body['name'], workspace_url, dir_to_share)
return result
@utils.inlined_callbacks
def remote_connect(self, context, host, owner, workspace, d, join_action=utils.JOIN_ACTION.PROMPT):
G.PROJECT_PATH = os.path.realpath(d)
try:
utils.mkdir(os.path.dirname(G.PROJECT_PATH))
except Exception as e:
msg.error("Couldn't create directory", G.PROJECT_PATH, str_e(e))
return
auth = G.AUTH.get(host)
if not auth:
success = yield self.link_account, context, host
if not success:
return
auth = G.AUTH.get(host)
if not auth:
msg.error("Something went really wrong.")
return
try:
res = api.get_workspace(host, owner, workspace)
if res.code == 404:
msg.error("The workspace https://%s/%s/%s does not exist" % (host, owner, workspace))
return
except Exception as e:
message = 'Error getting workspace https://%s/%s/%s: %s' % (host, owner, workspace, str_e(e))
msg.error(message)
editor.error_message(message)
return
if self.agent:
try:
self.agent.stop()
except Exception:
pass
G.WORKSPACE_WINDOW = yield self.get_a_window, d
self.agent = self._make_agent(context, owner, workspace, auth, join_action)
self.emit("agent", self.agent)
reactor.reactor.connect(self.agent, host, G.DEFAULT_PORT, True)
url = self.agent.workspace_url
utils.add_workspace_to_persistent_json(owner, workspace, url, d)
utils.update_recent_workspaces(url)
@utils.inlined_callbacks
def create_workspace(self, context, host, owner, name, api_args, dir_to_share):
prompt = 'Workspace name: '
api_args['name'] = name
api_args['owner'] = owner
while True:
new_name = yield self.user_charfield, context, prompt, name
name = new_name or name
try:
api_args['name'] = name
r = api.create_workspace(host, api_args)
except Exception as e:
msg.error('Unable to create workspace ', str_e(e))
editor.error_message('Unable to create workspace: %s' % str_e(e))
return
if r.code < 400:
workspace_url = 'https://%s/%s/%s' % (host, owner, name)
msg.log('Created workspace ', workspace_url)
self.remote_connect(context, host, owner, name, dir_to_share, utils.JOIN_ACTION.UPLOAD)
return
msg.error('Unable to create workspace: ', r.body)
if r.code not in (400, 402, 409):
try:
r.body = r.body['detail']
except Exception:
pass
editor.error_message('Unable to create workspace: %s' % r.body)
return
if r.code == 402:
try:
r.body = r.body['detail']
except Exception:
pass
yes = yield self.user_y_or_n, context, '%s Open billing settings?' % r.body, "Yes"
if yes:
webbrowser.open('https://%s/%s/settings#billing' % (host, owner))
return
if r.code == 400:
# TODO: strip leading dots/dashes/etc
name = re.sub('[^A-Za-z0-9_\-\.]', '_', name)
prompt = 'Workspace names may only contain [A-Za-z0-9_\-\.]. Choose another name: '
continue
yes = yield self.user_y_or_n, context, 'Workspace %s/%s already exists. Overwrite?' % (owner, name), 'Yes'
if yes:
# TODO: this doesn't set permissions on the workspace correctly
self.remote_connect(context, host, owner, name, dir_to_share, utils.JOIN_ACTION.PROMPT)
return
prompt = 'Workspace %s/%s already exists. Choose new name: ' % (owner, name)
def join_workspace_by_url(self, context, workspace_url, possible_dirs=None):
try:
d = utils.parse_url(workspace_url)
except Exception as e:
return editor.error_message(str_e(e))
return self.join_workspace(context, d['host'], d['workspace'], d['owner'], possible_dirs)
@utils.inlined_callbacks
def follow_user(self, context, cb=None):
users = self.agent.workspace_info.get('users')
userNames = set()
me = self.agent.get_username_by_id(self.agent.workspace_info['user_id'])
for user in list(users.values()):
username = user['username']
if username == me:
continue
if user['client'] == 'flootty':
continue
if 'highlight' not in user['perms']:
continue
userNames.add(username)
if not userNames:
editor.error_message("There are no other users that can be followed at this time. " +
"NOTE: you can only follow users who have highlight permission.")
cb and cb()
return
userNames = list(userNames)
userNames.sort()
small = [(x in G.FOLLOW_USERS) and "unfollow" or "follow" for x in userNames]
selected_user, index = yield self.user_select, context, "select a user to follow", list(userNames), small
if not selected_user:
cb and cb()
return
if selected_user in G.FOLLOW_USERS:
G.FOLLOW_USERS.remove(selected_user)
cb and cb()
return
G.FOLLOW_USERS.add(selected_user)
G.AGENT.highlight(user=selected_user)
cb and cb()
return
@utils.inlined_callbacks
def join_workspace(self, context, host, name, owner, possible_dirs=None):
utils.reload_settings()
# legacy urls in emacs...
if owner and owner[:2] == "r/":
owner = owner[2:]
if not utils.can_auth():
success = yield self.create_or_link_account, context, host, False
if not success:
return
utils.reload_settings()
possible_dirs = possible_dirs or []
for d in possible_dirs:
info = utils.read_floo_file(d)
if not info:
continue
try:
parsed_url = utils.parse_url(info['url'])
except Exception:
parsed_url = None
if parsed_url and parsed_url['host'] == host and parsed_url['workspace'] == name and parsed_url['owner'] == owner:
self.remote_connect(context, host, owner, name, d)
return
try:
d = utils.get_persistent_data()['workspaces'][owner][name]['path']
except Exception:
d = ''
if d and os.path.isdir(d):
self.remote_connect(context, host, owner, name, d)
return
# TODO: make per-host settings fully general
host_share_dir = G.AUTH.get(host, {}).get('share_dir')
d = d or os.path.join(host_share_dir or G.SHARE_DIR or G.BASE_DIR, owner, name)
join_action = utils.JOIN_ACTION.PROMPT
while True:
d = yield self.user_dir, context, 'Save workspace files to: ', d
if not d:
return
d = os.path.realpath(os.path.expanduser(d))
if not os.path.isdir(d):
y_or_n = yield self.user_y_or_n, context, '%s is not a directory. Create it? ' % d, "Create Directory"
if not y_or_n:
return
utils.mkdir(d)
if not os.path.isdir(d):
msg.error("Couldn't create directory", d)
continue
join_action = utils.JOIN_ACTION.DOWNLOAD
if os.path.isdir(d):
self.remote_connect(context, host, owner, name, d, join_action)
return
@utils.inlined_callbacks
def prompt_share_dir(self, context, ask_about_dir, api_args):
dir_to_share = yield self.user_dir, context, 'Directory to share: ', ask_about_dir
if not dir_to_share:
return
self.share_dir(context, dir_to_share, api_args)
@utils.inlined_callbacks
def share_dir(self, context, dir_to_share, api_args):
utils.reload_settings()
if not utils.can_auth():
success = yield self.create_or_link_account, context, G.DEFAULT_HOST, False
if not success:
return
utils.reload_settings()
dir_to_share = os.path.expanduser(dir_to_share)
dir_to_share = os.path.realpath(dir_to_share)
dir_to_share = utils.unfuck_path(dir_to_share)
if os.path.isfile(dir_to_share):
dir_to_share = os.path.dirname(dir_to_share)
workspace_name = os.path.basename(dir_to_share)
msg.debug('', workspace_name, dir_to_share)
if os.path.isfile(dir_to_share):
dir_to_share = os.path.dirname(dir_to_share)
try:
utils.mkdir(dir_to_share)
except Exception:
msg.error("The directory", dir_to_share, "doesn't exist and I can't create it.")
return
info = utils.read_floo_file(dir_to_share)
def prejoin(workspace_url):
try:
return self.prejoin_workspace(workspace_url, dir_to_share, api_args)
except ValueError:
pass
workspace_url = info.get('url')
if workspace_url:
parsed_url = prejoin(workspace_url)
if parsed_url:
self.remote_connect(context, parsed_url['host'], parsed_url['owner'], parsed_url['workspace'], dir_to_share)
return
parsed_url = utils.get_workspace_by_path(dir_to_share, prejoin)
if parsed_url:
self.remote_connect(context, parsed_url['host'], parsed_url['owner'], parsed_url['workspace'], dir_to_share)
return
host = yield self._get_host, context
if not host:
return
try:
r = api.get_orgs_can_admin(host)
except IOError as e:
editor.error_message('Error getting org list: %s' % str_e(e))
return
choices = [G.AUTH[host]['username']]
if r.code >= 400:
editor.error_message('Error getting org list: %s' % r.body)
elif r.body:
choices += [org['name'] for org in r.body]
if len(choices) == 1:
owner = choices[0]
else:
little = ['Create workspace owned by %s' % s for s in choices]
(owner, index) = yield self.user_select, context, 'Create workspace owned by', choices, little
if not owner:
return
self.create_workspace(context, host, owner, workspace_name, api_args, dir_to_share)
@utils.inlined_callbacks
def _get_host(self, context, cb):
if not G.AUTH:
msg.warn('no auth')
return
hosts = list(G.AUTH.keys())
if len(hosts) == 1:
host = hosts[0]
else:
little = ["%s on %s" % (a['username'], h) for h, a in list(G.AUTH.items())]
(host, index) = yield self.user_select, context, 'Which Floobits account should be used?', hosts, little
if not host:
cb(None)
return
cb(host)
@utils.inlined_callbacks
def delete_workspace(self, context, cb):
host = yield self._get_host, context
if not host:
cb()
return
api_url = 'https://%s/api/workspaces/can/admin' % (host)
try:
r = api.api_request(host, api_url)
except IOError as e:
editor.error_message('Error getting workspaces can admin %s' % str_e(e))
cb()
return
if r.code >= 400:
editor.error_message('Error getting workspace list: %s' % r.body)
cb()
return
choices = ['%s/%s' % (workspace['owner'], workspace['name']) for workspace in r.body]
(workspace, index) = yield self.user_select, context, 'Select workpace to delete', choices, []
if not workspace:
cb()
return
if G.EXPERT_MODE:
yes = True
else:
yes = yield self.user_y_or_n, context, 'Really delete %s?' % workspace, 'Yes'
if not yes:
cb()
return
workspace = r.body[index]
try:
api.delete_workspace(host, workspace['owner'], workspace['name'])
except IOError as e:
editor.error_message('Error deleting workspace' % str_e(e))
cb()
```
#### File: floocommon/handlers/credentials.py
```python
import os
import sys
import uuid
import binascii
import webbrowser
try:
from . import base
from .. import api, shared as G, utils
from ... import editor
from ..exc_fmt import str_e
from ..protocols import no_reconnect
assert api and G and utils
except (ImportError, ValueError):
from . import base
from floo import editor
from floo.common.protocols import no_reconnect
from floo.common.exc_fmt import str_e
from .. import api, shared as G, utils
class RequestCredentialsHandler(base.BaseHandler):
PROTOCOL = no_reconnect.NoReconnectProto
def __init__(self):
super(RequestCredentialsHandler, self).__init__()
self.token = binascii.b2a_hex(uuid.uuid4().bytes).decode('utf-8')
self.success = False
def build_protocol(self, *args):
proto = super(RequestCredentialsHandler, self).build_protocol(*args)
def on_stop():
self.emit('end', self.success)
self.stop()
proto.once('stop', on_stop)
return proto
def is_ready(self):
return False
def on_connect(self):
webbrowser.open('https://%s/dash/link_editor/%s/%s' % (self.proto.host, self.codename, self.token))
self.send({
'name': 'request_credentials',
'client': self.client,
'platform': sys.platform,
'token': self.token,
'version': G.__VERSION__
})
def _on_credentials(self, data):
s = utils.load_floorc_json()
auth = s.get('AUTH', {})
auth[self.proto.host] = data['credentials']
s['AUTH'] = auth
utils.save_floorc_json(s)
utils.reload_settings()
self.success = utils.can_auth(self.proto.host)
if not self.success:
editor.error_message('Something went wrong. See https://%s/help/floorc to complete the installation.' % self.proto.host)
api.send_error('No username or secret')
else:
p = os.path.join(G.BASE_DIR, 'welcome.md')
with open(p, 'w') as fd:
username = G.AUTH.get(self.proto.host, {}).get('username')
text = editor.LINKED_ACCOUNT_TXT.format(username=username, host=self.proto.host)
fd.write(text)
editor.open_file(p)
try:
self.stop()
except Exception as e:
print((str_e(e)))
``` |
{
"source": "joedaws/card-player",
"score": 4
} |
#### File: models/builders/multilayer_perceptron.py
```python
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
class MultilayerPerceptronBuilder:
"""A simple constant width multilayer perceptron.
If input_dim = x then the inputs passed to the model that is built by
this class should have shape ()
Args:
config (aicard.brains.models.config.multilayer_perceptron.Config): A dataclass for configuring
a multilayer perceptron model.
"""
ALLOWED_INITIALIZATIONS = ['random']
def __init__(self, config):
self.initialization = config.initialization
self.num_layers = config.num_layers
self.input_dim = config.input_dim
self.width = config.width
self.output_dim = config.output_dim
try:
self.activation = getattr(tf.nn, config.activation)
except AttributeError:
print(f'The activation function {config.activation} cannot be imported from tf.nn!'
f' Will use relu instead.')
self.activation = getattr(tf.nn, 'relu')
def build(self):
"""Instantiates a multilayer perceptron model."""
model = None
initialization = self.initialization
if initialization == 'random':
model = self.build_base_model()
elif initialization not in self.ALLOWED_INITIALIZATIONS:
raise ValueError(f'Cannot initialize multilayer perceptron with {initialization}')
return model
def build_base_model(self):
"""Build the base model with randomly initialized weights.
Based on the source code I believe that glorot normal initialization
is the default.
"""
# create the model
model = Sequential()
# create input layer
model.add(Dense(self.width,
activation=self.activation,
input_shape=(self.input_dim,),
kernel_initializer='glorot_uniform',
bias_initializer='zeros'
)
)
# create the middle layers
for _ in range(1, self.num_layers-1):
model.add(Dense(self.width, activation=self.activation))
# create output layer
model.add(Dense(self.output_dim, activation=None))
return model
```
#### File: engine/animators/animator.py
```python
import itertools
import threading
import time
import sys
import os
from os import name, system
DEFAULT_FPS = 3
class Animator:
"""Base animator classes"""
def __init__(self):
self.fps = DEFAULT_FPS # the default frames per second
self.done = False
try:
self.columns, self.lines = os.get_terminal_size()
except Exception as e:
self.columns = 80
self.lines = 30
@property
def wait_between_frames(self):
return 1/self.fps
def clear(self):
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
def carriage_return(self):
sys.stdout.write('\r')
def write(self, *args):
sys.stdout.write(*args)
def flush(self):
sys.stdout.flush()
def sleep(self, secs):
time.sleep(secs)
def clear_line(self):
self.carriage_return()
self.write(' '*(self.columns-3))
self.flush()
self.carriage_return()
def clear_line_decorator(self, func):
"""Decorator for animation functions
DOES NOT WORK YET!
"""
def wrapper():
self.clear_line()
func()
self.clear_line()
return wrapper
def animate(self, strings_to_draw, animate_fn=None):
if animate_fn is None:
animate_fn = self.animate_fn
animate_fn(strings_to_draw=strings_to_draw)
def animate_fn(self, strings_to_draw: list):
"""Animates the string objects in the strings to draw list.
Args:
strings_to_draw: List of strings that should be drawin in order
Returns:
nothing, but prints
"""
for string in strings_to_draw:
self.carriage_return()
self.write(string)
self.flush()
self.sleep(self.wait_between_frames)
self.write('\n')
def loop_animate(self, duration, strings_to_draw, animate_fn=None):
if animate_fn is None:
animate_fn = self.loop_animate_fn
t = threading.Thread(target=animate_fn,
kwargs={'strings_to_draw': strings_to_draw})
t.start()
# sleep while the animation is drawing
self.sleep(duration)
self.done = True
def loop_animate_fn(self, strings_to_draw: list):
"""Looping animation the string objects in the strings to draw list.
Args:
strings_to_draw: List of strings that should be drawin in order
Returns:
nothing, but prints
"""
# reset done just in case this function has already been called.
self.done = False
for c in itertools.cycle(strings_to_draw):
if self.done:
break
self.carriage_return()
self.write(c)
self.flush()
self.sleep(self.wait_between_frames)
self.carriage_return()
self.write('\n')
```
#### File: games/core/deck_builder.py
```python
from cartomancy.games.core.deck import Deck
from importlib import import_module
from cartomancy.games.core.card_builder import CardBuilder
class DeckBuilder:
def __init__(self, game_module, card_module):
self.card_builder = CardBuilder(game_module, card_module)
self.game_info = import_module(game_module)
self.card_generator = getattr(import_module('.'.join([game_module, card_module])), 'card_generator')
def build_cards(self):
"""Build instances of cards based using the deck generator."""
cards = [self.card_builder.build_card(fields)
for fields in self.card_generator()]
return cards
def build_deck(self):
deck = Deck()
deck.cards = self.build_cards()
return deck
```
#### File: games/go_fish/game.py
```python
from cartomancy.games.go_fish.state import GoFishState
from cartomancy.games.go_fish import INITIAL_HAND_SIZE_MAP
from cartomancy.brains.spaces.go_fish.actions import Actions
from cartomancy.players.go_fish import GoFishPlayer
from cartomancy.games.core.events import DrawEvent, \
BookEvent, \
AskEvent, \
FailEvent, \
SuccessEvent, \
ExchangeEvent, \
RemovePlayerEvent
class GoFishGame:
"""A class for executing a game of Go Fish.
Note:
The state includes players and players observations as well as
a map which maps a play index to a list of indices of that players's
opponents.
The kind of text animation used depends on whether or not a human
policy is being used.
Args:
policies (list): A list of policy classes.
"""
# total number of books, this number is used to determine termination conditions.
TOTAL_BOOKS = 13
NAME = 'Go Fish!'
def __init__(self, policies):
self.num_players = len(policies)
self.state = GoFishState(self.num_players)
self.turn_number = 1
# instantiate a policy for each player
self.policies_map = {player: policy()
for policy, player in zip(policies, self.state.players)}
self.over = False
# set up print based on which policies are being used to choose actions
self.print_method = None
self._setup_print_method(policies)
def play(self):
"""Play a game of Go Fish."""
self.deal()
while not self.over:
self.turn()
def deal(self):
"""Deal cards to players."""
#print(f"PHASE: begin dealing phase.\n\n")
# Shuffle deck to ensure random start
self.state.deck.shuffle()
# all players take draw cards initial hand size attained.
for _ in range(INITIAL_HAND_SIZE_MAP[self.num_players]):
for player in self.state.players:
draw, book = self.event_draw(player)
self.state.update(draw)
self.state.update(book)
self.print_method(f"The cards have been dealt! BEGIN!\n")
def turn(self):
"""Execute a full turn."""
self.print_method(f'Beginning Turn {self.turn_number}.')
for player in self.state.players:
# if a player is out they don't play their turn.
if not player.is_out:
self.print_method(f"{player.name} is beginning their turn.\n")
self.player_turn(player)
self.check_game_over()
# no need to have the other players take their turn.
if self.over:
break
self.print_method(f'After Turn {self.turn_number} the status is. . .')
status_str = ""
for player in self.state.players:
status_str = f"{player.name} has {len(player.books)}"\
f" books and {len(player.hand)} cards."
self.print_method(status_str)
deck_status_str = f"The deck has {len(self.state.deck.cards)} cards remaining."
self.print_method(deck_status_str)
self.turn_number += 1
def player_turn(self, player: GoFishPlayer):
"""Player asks opponent for a card and goes fish if no exchange."""
# If players holds no cards, they may draw a card if there are cards remaining in the deck.
card_check = self.event_has_cards(player)
keep_asking = isinstance(card_check, SuccessEvent)
if isinstance(card_check, FailEvent):
draw, book = self.event_draw(player)
self.state.update(draw)
self.state.update(book)
keep_asking = isinstance(draw, DrawEvent)
# print statements about books
if isinstance(book, BookEvent):
self.print_method(f"{player.name} made a book with rank {book.rank}.")
if isinstance(card_check, FailEvent) and len(self.state.deck.cards) == 0:
self.print_method(f"{player.name} is out!")
remove_player = self.event_remove_player(player)
self.state.update(remove_player)
# a players may keep asking for cards as long as they receive a card from an opponent.
asks = 0
while keep_asking:
# ensure player has cards before asking. Remove them if they do not.
card_check = self.event_has_cards(player)
if isinstance(card_check, FailEvent) and len(self.state.deck.cards) == 0:
self.print_method(f"{player.name} is out!")
remove_player = self.event_remove_player(player)
self.state.update(remove_player)
break
# get available actions
observations = self.state.observations[player]
actions = Actions(observations=observations, hand=player.hand)
# give available actions to policy and choose an action
policy = self.policies_map[player]
policy.actions = actions
policy.observations = observations
opponent, ask_rank = policy.sample()
# generate ask event
ask = self.event_ask(ask_rank, player, opponent)
self.state.update(ask)
exchange, book = player.ask(opponent, ask_rank)
self.state.update(exchange)
self.state.update(book)
asks += 1
# print statements about exchange
if isinstance(exchange, ExchangeEvent):
if exchange.number == 1:
self.print_method(f"{player.name} obtained a {exchange.rank} from {opponent.name}.")
else:
self.print_method(f"{player.name} obtained {exchange.number} {exchange.rank}s from {opponent.name}.")
elif isinstance(exchange, FailEvent):
self.print_method(f"{player.name} did not obtain a {ask_rank} from {opponent.name}.")
# A player who does not make a catch cannot keep asking.
keep_asking = False
# print statements about books
if isinstance(book, BookEvent):
self.print_method(f"{player.name} made a book with rank {book.rank}.")
if not player.is_out:
# after the asking phase of the turn ends, the player draws a card.
draw, book = self.event_draw(player)
self.state.update(draw)
self.state.update(book)
if isinstance(book, BookEvent):
self.print_method(f"{player.name} made a book with rank {book.rank}.")
# print statement about end of player's turn
self.print_method(f"{player.name} has finished their turn.\n")
def event_draw(self, player: GoFishPlayer):
"""Draw a card from the deck and generate a DrawEvent."""
deck = self.state.deck
if len(deck) > 0:
draw, book = player.draw(deck)
self.print_method(f"{player.name} drew a card from the deck and now has {len(player.hand)} card(s).")
else:
self.print_method(f"{player.name} tried to draw a card, but the deck was empty.")
draw = FailEvent(player=player)
book = FailEvent(player=player)
return draw, book
@staticmethod
def event_has_cards(player: GoFishPlayer):
"""Generate a SuccessEvent if player has cards in hand."""
if len(player.hand) > 0:
event = SuccessEvent(player)
else:
event = FailEvent(player)
return event
def event_ask(self, rank: str, player: GoFishPlayer, opponent: GoFishPlayer):
"""Generate an AskEvent based on the player."""
self.print_method(f"{player.name} is asking {opponent.name} for a {rank}.")
event = AskEvent(player=player, opponent=opponent, rank=rank)
return event
@staticmethod
def event_exchange(ask_event: AskEvent):
"""Generate and perform an ExchangeEvent based on ask event."""
ask_rank = ask_event.rank
player = ask_event.player
opponent = ask_event.opponent
cards = player.ask(opponent, ask_rank)
if cards:
event = ExchangeEvent(player_receiving=player,
player_giving=opponent,
rank=ask_rank,
number=len(cards))
else:
event = FailEvent(player=player)
return event
@staticmethod
def event_remove_player(player: GoFishPlayer):
"""Generate remove player event."""
player.is_out = True
event = RemovePlayerEvent(player)
return event
def reset(self):
"""reset to beginning of game."""
self.state.reset()
self.turn_number = 1
# try to reset policies
for _, policy in self.policies_map.items():
try:
policy.reset()
except AttributeError:
self.print_method(f"{type(policy)} cannot or does not need to be reset.")
# reset game over flag
self.over = False
def check_game_over(self):
"""See if the game is over."""
total_books = 0
for player in self.state.players:
total_books += len(player.books)
self.over = total_books == self.TOTAL_BOOKS
if self.over:
book_totals = {len(player.books): player
for player in self.state.players}
winner = book_totals[max(book_totals)].name
self.print_method(f"All books are acquired. {winner} has won!")
@staticmethod
def get_player_state_str(player):
"""returns string representation of a players state"""
state_str = f"{player.name} state:"
for rank in player.state:
if player.state[rank]:
state_str += rank + ": "
for suit in player.state[rank]:
state_str += suit + " "
if player.state[rank]:
state_str += " "
return state_str
def get_state_str(self):
"""Returns string representation of the state."""
state_str = f"The state of the go fish game:"
for player in self.state.players:
state_str += self.get_player_state_str(player)
return state_str
def __str__(self):
"""Printable version of state of Game."""
state_str = self.get_state_str()
return state_str
def _setup_print_method(self, policies):
"""Chooses the appropriate TextAnimator based on the policies playing.
When a human policy is used, use the slow text animator.
This method choose the "strategy" for printing text.
Args:
policies (list): List of policies being used in the
game to choose actions.
"""
# check policies for human policy
policy_names = [policy.NAME for policy in policies]
if 'human' in policy_names:
# set print strategy to slow text
from cartomancy.engine.animators.slow_text import SlowText
st = SlowText()
self.print_method = st.animate_text
else:
# set print strategy to fast text
self.print_method = print
```
#### File: cartomancy/players/base.py
```python
class Player:
"""A generic players class"""
GAME = 'GENERIC'
MAXIMUM_HAND_SIZE = 52
def __init__(self, name):
self.name = name
self.card_type = None
self.index = None
self._hand = None
self._deck = None
@property
def hand(self):
"""The players's hand, i.e., a list of card objects"""
if self._hand is None:
print(f'{self.GAME} {self.name} is initializing hand.')
self._hand = []
if len(self._hand) > self.MAXIMUM_HAND_SIZE:
raise ValueError(f'Hand size exceeds maximum'\
f'handsize {self.MAXIMUM_HAND_SIZE}')
return self._hand
@hand.setter
def hand(self, new_hand):
"""setter method for players's hand of cards"""
self._hand = new_hand
def draw(self, deck, n=1):
"""Player draws card(s) from provided deck.
Args:
deck (Deck): A instance of a card deck.
n (int): Number of cards to draw from the deck.
"""
new_cards = [deck.draw(1)[0] for _ in range(n)]
self.receive(new_cards)
def receive(self, new_card):
"""add card(s) to players's hand"""
if self.card_type is None:
self.card_type = type(new_card)
if isinstance(new_card, list):
self.hand += new_card
elif isinstance(new_card, self.card_type):
self.hand.append(new_card)
else:
raise ValueError(f"Cannot add {type(new_card)} to"
"{self.name}'s hand.")
def remove(self, cards_to_remove):
"""Remove a card or cards from hand."""
if not isinstance(cards_to_remove, list):
cards_to_remove = [cards_to_remove]
self.hand = [card for card in self.hand if str(card) not in
[str(c) for c in cards_to_remove]]
def hand_str(self):
"""Returns single line string representation of hand"""
hand_str = ""
for c in self.hand:
hand_str += str(c) + " "
return hand_str
def __str__(self):
"""Printable version of player"""
return self.hand_str()
```
#### File: tests/test_engine/test_slow_text.py
```python
from cartomancy.engine.animators.slow_text import SlowText
def test_slow_text():
string1 = 'Hello. This is a test of the slow text animator.'
string2 = 'Now for the glorious second line of text.'
st = SlowText()
st.animate_text(string1)
st.animate_text(string2)
```
#### File: test_games/test_core/test_deck.py
```python
import pytest
from cartomancy.games.core.deck import Deck
@pytest.fixture
def deck():
return Deck()
def test_deck(deck):
assert hasattr(deck, 'draw')
assert hasattr(deck, 'shuffle')
assert hasattr(deck, '__len__')
assert hasattr(deck, '__str__')
``` |
{
"source": "joedaws/lde2021",
"score": 4
} |
#### File: classification/data_utils/loader.py
```python
import os
import pandas as pd
from classification.data_utils.processor import DataLocator
from classification.data_utils.processor import PROCESSOR_MAP
def load_df(name) -> pd.DataFrame:
"""Loads a prepared dataframe.
If the file has already been processed then we just load
directly from the saved csv, otherwise, the dataprocessor instance is used.
"""
if os.path.isfile(DataLocator.DATA_PATHS[name]):
df = pd.read_csv(DataLocator.DATA_PATHS[name])
else:
# obtain data processor for this kind of data
data_processor = PROCESSOR_MAP.get(name)
# use processor to obtain dataframe (also saves csv to file)
df = data_processor.process()
return df
```
#### File: classification/data_utils/processor.py
```python
from abc import ABC, abstractmethod
from typing import Tuple
import pandas as pd
from pandas import DataFrame
class DataLocator:
"""Just holds the dictionaries for finding the path to certain data"""
# dictionary of inputs paths
RAW_DATA_PATHS = {
'ecoli': 'classification/resources/raw_data/ecoli/ecoli.data',
'glass': 'classification/resources/raw_data/glass/glass.data',
'abalone': 'classification/resources/raw_data/abalone/abalone.data'
}
# dictionary of output paths
DATA_PATHS = {
'ecoli': 'classification/resources/data/ecoli.csv',
'glass': 'classification/resources/data/glass.csv',
'abalone': 'classification/resources/data/abalone.csv',
'winequality': 'classification/resources/data/winequality-red.csv',
'algerian': 'classification/resources/data/Algerian_forest_fires_dataset_UPDATE.csv'
}
class AbstractDataProcessor(DataLocator, ABC):
"""
The Abstract Class defining the template method for processing data.
"""
NAME = 'ABSTRACT'
@classmethod
def process(cls) -> DataFrame:
"""
This method defines the skeleton of the process method from raw data to
correctly formatter csv file
"""
name, df = cls.convert_raw_data_to_df()
cls.save_as_csv(name, df)
return df
# The save functionality is the same for all data
@classmethod
def save_as_csv(cls, name: str, df: DataFrame) -> None:
df.to_csv(cls.DATA_PATHS[name])
# This operation has to be implemented in a subclass.
@classmethod
@abstractmethod
def convert_raw_data_to_df(cls) -> Tuple[str, DataFrame]:
"""
The child classes must implement this. The dataframe must have column
names feature_i for all of the feature columns and a column of
labels with the name label.
This method may also transform the raw data features into a
form more useful for classification such as one hot encoding.
"""
pass
@classmethod
def get_raw_path(cls):
return cls.RAW_DATA_PATHS[cls.NAME]
@classmethod
def get_path(cls):
return cls.DATA_PATHS[cls.NAME]
class EcoliDataProcessor(AbstractDataProcessor):
NAME = 'ecoli'
@classmethod
def convert_raw_data_to_df(cls) -> Tuple[str, DataFrame]:
"""Converts raw ecoli data to a dataframe"""
# path to raw data file
path = cls.get_raw_path()
# names of columns in the dataframe
names = ['sequence'] + [f'feature_{i}' for i in range(1, 8)] + ['label']
# create dataframe
df = pd.read_csv(path, delim_whitespace=True, names=names)
return cls.NAME, df
class GlassDataProcessor(AbstractDataProcessor):
NAME = 'glass'
@classmethod
def convert_raw_data_to_df(cls) -> Tuple[str, DataFrame]:
"""Converts raw glass data to a dataframe."""
path = cls.get_raw_path()
# name of columns in the dataframe
names = ['index']+[f'feature_{i}' for i in range(9)]+['label']
# TODO should we drop the index column
# create dataframe
df = pd.read_csv(path, names=names)
return cls.NAME, df
class LetterDataProcessor(AbstractDataProcessor):
NAME = 'letter'
NUM_FEATURES = 16
@classmethod
def convert_raw_data_to_df(cls) -> Tuple[str, DataFrame]:
"""Converts raw letter data into a dataframe"""
path = cls.get_raw_path()
# names of columns
names = ['label']+[f'feature_{i}' for i in range(cls.NUM_FEATURES)]
df = pd.read_csv(path, names=names)
return cls.NAME, df
class OptdigitsDataProcessor(AbstractDataProcessor):
NAME = 'optdigits'
NUM_FEATURES = 64
@classmethod
def convert_raw_data_to_df(cls) -> Tuple[str, DataFrame]:
"""converts raw optdigits data into dataframe"""
path = cls.get_raw_path()
# names of columns
names = [f'feature_{i}' for i in range(cls.NUM_FEATURES)]+['label']
df = pd.read_csv(path, names=names)
return cls.NAME, df
class AbaloneDataProcessor(AbstractDataProcessor):
NAME = 'abalone'
NUM_FEATURES = 8
@classmethod
def convert_raw_data_to_df(cls) -> Tuple[str, DataFrame]:
"""converts raw optdigits data into dataframe"""
path = cls.get_raw_path()
# names of columns
names = [f'feature_{i}' for i in range(cls.NUM_FEATURES)]+['label']
df = pd.read_csv(path, names=names)
return cls.NAME, df
# collect data processors in map for use my loaders
PROCESSOR_MAP = {
'ecoli': EcoliDataProcessor,
'glass': GlassDataProcessor,
'letter': LetterDataProcessor,
'optdigits': OptdigitsDataProcessor,
'abalone': AbaloneDataProcessor
}
```
#### File: lde2021/rl_environments/RLBanditEnv.py
```python
import gym
import numpy as np
import torch
import stable_baselines3 as sb3
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.env_util import make_vec_env
import pybullet_envs
import pandas as pd
import pickle
import os
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style='whitegrid', palette=[sns.color_palette('colorblind')[i] for i in [0,3,4,2]])
np.set_printoptions(suppress=True, linewidth=100, precision=4)
pd.set_option('precision', 4)
gym.logger.set_level(40)
plt.rcParams['font.family'] = 'monospace'
plt.rcParams['font.weight'] = 'bold'
class RLBanditEnv:
'''
numerical experiment where the policies are trained on rl environments and
then compared in the bandit setting via various policy evaluation methods
'''
def __init__(self, params):
self.__dict__.update(params)
self.make_env()
def make_env(self):
'''create the environment'''
try:
self.env = gym.make(self.env_name)
except:
self.env = make_vec_env(self.env_name, n_envs=1)
self.low = self.env.action_space.low
self.high = self.env.action_space.high
def train_target_policies(self, seed=None):
'''train policies to be ranked'''
if seed is not None:
np.random.seed(seed)
torch.manual_seed(seed)
self.env.seed(seed)
self.env.action_space.seed(seed)
models = {
'A2C': sb3.A2C('MlpPolicy', self.env, seed=seed).learn(self.train_steps),
'DDPG': sb3.DDPG('MlpPolicy', self.env, seed=seed).learn(self.train_steps),
'PPO': sb3.PPO('MlpPolicy', self.env, seed=seed).learn(self.train_steps),
'SAC': sb3.SAC('MlpPolicy', self.env, seed=seed).learn(self.train_steps),
'TD3': sb3.TD3('MlpPolicy', self.env, seed=seed).learn(self.train_steps)}
self.target_policies = {name: model.policy for name, model in models.items()}
self.num_policy_pairs = len(models) * (len(models) - 1) / 2
def evaluate_policy_rl(self, policy, num_sims=10):
'''evaluate policy in rl environment'''
reward_avg, reward_std = evaluate_policy(policy, self.env, n_eval_episodes=num_sims,
deterministic=False, warn=False)
return reward_avg, reward_std
def estimate_policy_value(self, policy, num_sims, seed=None):
'''estimate policy value in bandit environment'''
policy_value = 0
for _ in range(num_sims):
if seed is not None:
self.env.seed(seed)
obs = self.env.reset()
for t in range(self.env_steps):
action, _ = policy.predict(obs, deterministic=False)
obs, reward, done, _ = self.env.step(action)
policy_value += reward
if done:
break
policy_value /= num_sims
return policy_value
def evaluate_target_policies(self, num_sims=100):
'''evaluate target policies in bandit environment'''
self.value_true = {}
for name, policy in self.target_policies.items():
self.value_true[name] = self.estimate_policy_value(policy, num_sims)
def probability_proxy(self, action1, action2):
'''compute probability of taking action1 instead of action2'''
action_delta = (action1 - action2) / (self.high - self.low)
prob = np.exp((1 - 1 / (1 - action_delta**2 + 1e-08)).mean())
return prob
def generate_historical_data(self):
'''sample historical data by deploying target policies'''
self.historical_data, self.value_emp = [], {}
for name, policy in self.target_policies.items():
self.value_emp[name] = 0
seed = np.random.randint(1e+06)
self.env.seed(seed)
obs = self.env.reset()
actions, value, prob = [], 0, 1
for t in range(self.env_steps):
action, _ = policy.predict(obs, deterministic=False)
actions.append(action)
action_det, _ = policy.predict(obs, deterministic=True)
prob *= self.probability_proxy(action, action_det)
obs, reward, done, _ = self.env.step(action)
value += reward
if done:
break
self.historical_data.append([seed, actions, value, prob])
self.value_emp[name] += value
self.rho = np.mean(list(self.value_emp.values()))
def estimate_trajectory_probability(self, policy, trajectory):
'''estimate proability that the policy follows the trajectory'''
prob = 1.
seed, actions, _, _ = trajectory
self.env.seed(seed)
obs = self.env.reset()
for t in range(min(self.env_steps, len(actions))):
action, _ = policy.predict(obs, deterministic=True)
prob *= self.probability_proxy(action, actions[t])
obs, _, done, _ = self.env.step(action)
return prob
def compute_value_dim(self, policy):
'''evaluate the policy via the direct method'''
value_dim = []
for trajectory in self.historical_data:
s, a, r, _ = trajectory
prob = self.estimate_trajectory_probability(policy, trajectory)
value_dim.append(r * prob)
return np.mean(value_dim)
def compute_value_lde(self, policy):
'''evaluate the policy via the limited data estimator'''
value_lde = []
for trajectory in self.historical_data:
s, a, r, _ = trajectory
prob = self.estimate_trajectory_probability(policy, trajectory)
value_lde.append((r - self.rho) * prob + self.rho)
return np.mean(value_lde)
def compute_value_dre(self, policy):
'''evaluate the policy via the doubly robust estimator'''
value_dre = []
for trajectory in self.historical_data:
s, a, r, p = trajectory
prob = self.estimate_trajectory_probability(policy, trajectory)
value_dre.append((r - self.rho) * prob / (p + 1e-06) + self.rho)
return np.mean(value_dre)
def compute_value_ips(self, policy):
'''evaluate the policy via the inverse propensity scoring'''
value_ips = []
for trajectory in self.historical_data:
s, a, r, p = trajectory
prob = self.estimate_trajectory_probability(policy, trajectory)
value_ips.append(r * prob / (p + 1e-06))
return np.mean(value_ips)
def swap_count(self, array1, array2):
'''count the number of swaps required to transform array1 into array2'''
L = list(array2)
swaps = 0
for element in list(array1):
ind = L.index(element)
L.pop(ind)
swaps += ind
return swaps
def rank_target_policies(self):
'''evaluate and rank target policies via various methods'''
self.value_dim, self.value_lde, self.value_dre, self.value_ips = {}, {}, {}, {}
for name, policy in self.target_policies.items():
self.value_lde[name] = self.compute_value_lde(policy)
self.value_dre[name] = self.compute_value_dre(policy)
self.value_ips[name] = self.compute_value_ips(policy)
self.value_dim[name] = self.compute_value_dim(policy)
self.method_values = {'True': self.value_true, 'LDE': self.value_lde,
'DRE': self.value_dre, 'IPS': self.value_ips,
'DiM': self.value_dim, 'Emp': self.value_emp}
self.values = pd.DataFrame.from_dict(self.method_values)
self.ranks = {method: np.argsort(list(value.values()))
for method, value in self.method_values.items()}
def score_ranking(self):
'''compute scores of individual rankings'''
scores = [1 - self.swap_count(self.ranks[method], self.ranks['True'])\
/ self.num_policy_pairs for method in self.ranks]
return scores
def report_scores(self):
'''print the resulting scores'''
scores = np.array(self.scores, ndmin=2)[:,:-1]
scores_med = np.median(scores, axis=0)
scores_avg = np.mean(scores, axis=0)
scores_std = np.std(scores, axis=0)
print(f'average scores of policy evaluation methods on {self.env_name}:')
for k in range(1,len(self.ranks)-1):
print(f' {list(self.ranks)[k]} = {scores_med[k]:.4f}',
f'/ {scores_avg[k]:.4f} ({scores_std[k]:.3f})')
print()
self.method_values.pop('Emp', None)
data = pd.DataFrame(scores, columns=self.method_values.keys()).drop(columns='True')
fig, ax = plt.subplots(figsize=(8,4))
sns.violinplot(data=data, cut=0, gridsize=1000, bw=.5, linewidth=3)
ax.set_title(self.env_name, fontname='monospace', fontweight='bold')
ax.set_ylim(0,1)
plt.tight_layout()
os.makedirs('./images/', exist_ok=True)
plt.savefig(f'./images/scores_{self.env_name}.pdf', format='pdf')
plt.show()
def run_simulation_explicit(self, seed=None):
'''run a single ranking with verbose output'''
print(f'\ntraining target policies...')
self.train_target_policies(seed)
print(f'rl-values of target policies:')
for name, policy in self.target_policies.items():
value_avg, value_std = self.evaluate_policy_rl(policy)
print(f' {name:>4s}-value = {value_avg:.4f} (std = {value_std:.4f})')
self.evaluate_target_policies()
print(f'\ngenerating historical data...')
self.generate_historical_data()
print(f'estimating values of target policies via policy evaluation methods...')
self.rank_target_policies()
print(f'estimated values:\n{self.values}')
self.scores = self.score_ranking()
def run_simulations(self, num_sims, seed=None):
'''run multiple simulations'''
self.train_target_policies(seed)
self.evaluate_target_policies()
self.scores = []
for n in range(num_sims):
self.generate_historical_data()
self.rank_target_policies()
self.scores.append(self.score_ranking())
def run_tests(self, num_sims, num_tests, seed=None):
'''run multiple tests'''
if seed is not None:
np.random.seed(seed)
seeds = list(map(int, np.random.randint(1e+06, size=num_tests)))
test_scores = []
for n in range(num_tests):
print(f'running test {n+1}/{num_tests} on {self.env_name}...')
self.run_simulations(num_sims, seeds[n])
test_scores += self.scores
self.scores = test_scores
def save_variables(self, path='./save/'):
'''save class variables to a file'''
os.makedirs(path, exist_ok=True)
save_name = f'{self.env_name}.pkl'
with open(path + save_name, 'wb') as save_file:
pickle.dump(self.__dict__, save_file)
def load_variables(self, save_name, path='./save/'):
'''load class variables from a file'''
try:
with open(path + save_name, 'rb') as save_file:
self.__dict__.update(pickle.load(save_file))
except:
raise NameError(f'\ncannot load file {save_name}...')
```
#### File: joedaws/lde2021/run_synthetic_data.py
```python
from synthetic_data.SyntheticBanditEnv import SyntheticBanditEnv
import argparse
def setup():
'''setup the experiment'''
parser = argparse.ArgumentParser(description='argument parser for example 1')
parser.add_argument('-d', '--example',
default='1',
help='number of the reeard function: 1 or 2')
parser.add_argument('-s', '--seed',
default=2021,
help='value of random seed')
parser.add_argument('-save', '--save', action='store_true')
parser.add_argument('-load', '--load', action='store_true')
# parse the arguments
args = parser.parse_args()
print(f'Will {"load" if args.load else "perform"} '
+ f'the experiment for Example 1.{args.example}')
return int(args.example), int(args.seed), args.save, args.load
if __name__ == '__main__':
example, random_seed, save, load = setup()
params = {'num_s': 100, 'num_a': 100, 'dom_s': [0,1], 'dom_a': [-1,1], 'example': example}
env = SyntheticBanditEnv(params)
if load:
env.reproduce_pictures(f'Synthetic_{example}.pkl')
else:
test_params = {
'a': {'a_min': .01, 'a_max': .25, 'num_a_tests': 49, 'num_m': 1, 'num_sims': 1000},
'm': {'alpha': .01, 'm_min': 1, 'm_max': 500, 'num_m_tests': 50, 'num_sims': 1000},
'3d': {'a_min': .01, 'a_max': .10, 'num_a_tests': 10, 'm_min': 1, 'm_max': 10,
'num_m_tests': 10, 'num_sims': 1000},
'grid': {'a_min': .05, 'a_max': .15, 'num_a_tests': 3, 'm_min': 1, 'm_max': 1000,
'num_m_tests': 5, 'num_sims': 1000}}
env.produce_pictures(test_params, seed=random_seed)
if save:
env.save_variables()
``` |
{
"source": "joedaws/motherbrain",
"score": 4
} |
#### File: spaces/go_fish/actions.py
```python
from motherbrain.brains.spaces.go_fish.observations import Observations
class Actions:
"""Go Fish actions.
In go fish a player must choose an opponent to ask and a rank to ask for.
This action space determines who are the valid opponents to ask and
what are the valid ranks to ask for.
Note that the observed hand lengths are known exactly to the player at
ask time.
"""
def __init__(self, observations: Observations, hand: list):
self.opponents = observations.opponents
self.observed_hand_len = observations.observed_hand_len
self.hand = hand
@property
def valid_opponents(self):
"""Returns list of opponents whom the players can ask a card from.
You can only as an opponent for cards if they have cards to ask for.
Returns:
list of opponent indices of opponent players that have cards.
"""
return [opponent for opponent in self.opponents
if self.observed_hand_len[opponent].hand_len > 0]
@property
def valid_ranks(self):
"""Returns list of ranks which might possibly still be in play.
A valid rank is any rank for which the player current has a card.
Returns:
a list ranks in the players hand.
"""
return [card.rank for card in self.hand]
def get_possible_actions(self):
return self.valid_opponents, self.valid_ranks
```
#### File: spaces/go_fish/observations.py
```python
from motherbrain.games.go_fish.card import CARD_FIELD_VALUES
from motherbrain.games.core.events import ExchangeEvent, BookEvent, AskEvent, DrawEvent, FailEvent, SuccessEvent
RANKS = CARD_FIELD_VALUES['rank']
class ObservedOpponentRanks:
"""Class for storing observations about ranks possessed by an opponent.
Args:
opponent (GoFishPlayer): A player instance that is an opponent.
"""
def __init__(self, opponent):
self.opponent = opponent
self.ranks = {rank: 0 for rank in RANKS}
def update(self, event):
"""After an event update the observed ranks.
Args:
event: A dataclass from motherbrain.games.core.events
"""
if isinstance(event, ExchangeEvent):
self.update_exchange_event(event)
elif isinstance(event, BookEvent):
self.update_book_event(event)
elif isinstance(event, AskEvent):
self.update_ask_event(event)
else:
# raise ValueError(f'Cannot update observed ranks for event type {type(event)}')
pass
def update_exchange_event(self, exchange_event):
"""Update the observed ranks after a witnessed event.
The exchange event either increase the observed count of rank or decreases it.
Args:
exchange_event (ExchangeEvent): A dataclass including, player_giving,
player_receiving, rank, number.
"""
if self.opponent == exchange_event.destination:
self.ranks[exchange_event.rank] += exchange_event.number
elif self.opponent == exchange_event.source:
self.ranks[exchange_event.rank] = 0
def update_book_event(self, book_event):
"""Update the observed ranks after a book event."""
if self.opponent == book_event.player:
self.ranks[book_event.rank] = 0
def update_ask_event(self, ask_event):
"""Update the observed ranks after an ask event."""
if self.opponent == ask_event.player:
self.ranks[ask_event.rank] += 1
def __str__(self):
"""String representation of this observation."""
info = f"Ranks observed for opponent {self.opponent.name} are:\n"
rank_info = "".join([count+": "+rank+"\n"
for rank, count in self.ranks.items() if count > 0])
return info+rank_info
class ExactOpponentHandLen:
"""Class for storing the exact hand lengths of opponents."""
def __init__(self, opponent):
self.opponent = opponent
@property
def hand_len(self):
"""Returns true hand length of opponent."""
return len(self.opponent.hand)
@property
def is_valid(self):
"""Boolean for is hand_len is non-zero."""
return self.hand_len > 0
def update(self, event):
"""We can ignore all events in this case."""
pass
class ObservedOpponentHandLen:
"""Class for storing observations about the number of cards possessed by an opponent."""
def __init__(self, opponent):
self.opponent = opponent
self.hand_len = 0
@property
def is_valid(self):
"""Boolean for is hand_len is non-zero."""
return self.hand_len > 0
def update(self, event):
"""After an event update the number of cards in a hand.
Args:
event: Either ExchangeEvent or BookEvent.
"""
if isinstance(event, ExchangeEvent):
self.update_exchange_event(event)
elif isinstance(event, DrawEvent):
self.update_draw_event(event)
elif isinstance(event, BookEvent):
self.update_book_event(event)
else:
# raise ValueError(f'Cannot update observed ranks for event type {type(event)}')
pass
def update_exchange_event(self, exchange_event):
"""Update the number of cards in hands after a witnessed event.
Args:
exchange_event (EchangeEvent): A dataclass including, player_giving_index,
player_receiving_index, rank, number.
"""
if self.opponent == exchange_event.destination:
self.hand_len += exchange_event.number
elif self.opponent == exchange_event.source:
self.hand_len -= exchange_event.number
def update_book_event(self, book_event):
"""Update the number of cards in hands after a book event."""
if self.opponent == book_event.player:
self.hand_len -= 4
def update_draw_event(self, draw_event):
"""Update the number of cards in hands after a draw event."""
if self.opponent == draw_event.player:
self.hand_len += 1
class Observations:
"""Go Fish observation spaces.
Holds data known to a particular player about the state of the game and the
partially known information about the opponents.
"""
def __init__(self, player, opponents):
self.player = player
self.opponents = opponents
self.num_opponents = len(opponents)
self.observed_ranks = {opponent: ObservedOpponentRanks(opponent) for opponent in self.opponents}
self.observed_hand_len = {opponent: ExactOpponentHandLen(opponent) for opponent in self.opponents}
def get_observation(self, opponent):
"""Return a tuple representing the observation by a players."""
ranks = self.observed_ranks[opponent].ranks # a dictionary whose keys are ranks
number = self.observed_hand_len[opponent].hand_len # an integer describing number of cards
return ranks, number
def update(self, event):
"""Update the observation spaces based on observed event."""
for opponent in self.opponents:
self.observed_ranks[opponent].update(event)
self.observed_hand_len[opponent].update(event)
def __str__(self):
"""Returns Strings of all of the observed ranks."""
string = "".join([str(obs) for opp, obs in self.observed_ranks.items()])
return string
```
#### File: engine/animators/loading.py
```python
from motherbrain.engine.animators.animator import Animator
import threading
class Loading(Animator):
NAME = 'LOADING SCREEN'
def __init__(self):
super().__init__()
self.count = 0
self.load_str = ['l |',
'lo /',
'loa -',
'loa \\',
'load |',
'loadi /',
'loadin -',
'loading \\']
self.done_str = 'Done! '
def draw_loading(self):
self.loop_animate(duration=10, strings_to_draw=self.load_str)
self.write(self.done_str)
self.flush()
self.count += 1
```
#### File: games/go_fish/state.py
```python
from motherbrain.players.go_fish import GoFishPlayer
from motherbrain.games.core.deck_builder import DeckBuilder
from motherbrain.brains.spaces.go_fish.observations import Observations
class GoFishState:
"""A class representing the state of the go fish game."""
get_deck = DeckBuilder('motherbrain.games.go_fish', 'card').build_deck
def __init__(self, num_players=4):
player_names = ['player ' + str(i) for i in range(num_players)]
self.players = {GoFishPlayer(name): i for i, name in enumerate(player_names)}
self._set_player_indices()
self.opponents_map = self._setup_opponents_map()
self.deck = self.get_deck()
self.observations = {player: Observations(player, self.opponents_map[player])
for player in self.players}
def reset(self):
"""Reset to an initial state."""
self.deck = self.get_deck()
self.observations = {player: Observations(player, self.opponents_map[player])
for player in self.players}
def hands(self):
"""Returns hands of all players.
Returns:
dictionary whose keys are players and whose values are the corresponding player's hand.
"""
return {player: player.hand for player in self.players}
def update(self, event):
"""update the state according to the event."""
# update observations
for player in self.players:
self.observations[player].update(event)
def _set_player_indices(self):
"""Set the index attribute of the players."""
for player, index in self.players.items():
player.index = index
def _setup_opponents_map(self):
"""Create a dictionary describing the opponents of each players."""
indices = list(self.players.values())
return {player: [opponent for opponent in self.players if opponent != player] for player in self.players}
```
#### File: motherbrain/players/go_fish.py
```python
from motherbrain.players.base import Player
from motherbrain.games.go_fish.card import CARD_FIELD_VALUES
from motherbrain.games.core.events import ExchangeEvent, BookEvent, FailEvent, DrawEvent
ALLOWED_RANKS = CARD_FIELD_VALUES['rank']
class GoFishPlayer(Player):
"""A go Fish players class."""
GAME = 'GoFish'
def __init__(self, name):
super().__init__(name)
self.books = []
self.is_out = False
@property
def state(self):
"""A dictionary for describing contents of hand.
The keys are the allowed ranks that the players current
has in their hand and the values are lists
of suits of the cards of that rank.
"""
if self.hand is None:
raise ValueError(f'Cannot compute state for uninitialized hand.')
state = {rank: [] for rank in ALLOWED_RANKS}
for card in self.hand:
state[card.rank].append(card.suit)
return state
def look_for_rank_match(self, rank):
"""Find all cards in hand matching a given rank"""
if rank not in ALLOWED_RANKS:
raise ValueError(f'{rank} is not a valid rank.')
rank_matched_cards = [card for card in self.hand if card.rank == rank]
return rank_matched_cards
def _check_for_books(self):
"""Check hand for books.
If a book is found then those cards are removed from
the players's hand and put into the books attribute.
Since we check for books after each time this player obtains new
cards of one rank, there can only be at most one book.
"""
event = None
for rank, suits in self.state.items():
if len(suits) == 4:
self.books.append(rank)
# create book event
event = BookEvent(player=self, rank=rank)
# remove cards of rank from hand
self.hand = [c for c in self.hand if c.rank != rank]
break # book was found and there can only be one book during this check.
if event is None:
# create fail event since no book was made
event = FailEvent(player=self)
return event
def ask(self, another_player, rank):
"""ask another players if they have a card of a particular rank.
Returns two events based on what occured during the execution.
If the other player gives cards, then an Exchanged event is created.
If the other player gives no cards, then a fail event is created.
If this player makes a book after receiving cards, then a BookEvent is created.
If this player makes does not make a book after receiving cards, then a fail
event is created.
Returns:
Event based on the outcome of the ask
Event based on the outcome of the checking for books
"""
cards = another_player.tell(rank)
self.receive(cards)
if cards:
exchange = ExchangeEvent(source=self, destination=another_player, rank=rank, number=len(cards))
else:
exchange = FailEvent(player=self)
book = self._check_for_books()
return exchange, book
def draw(self, deck, n=1):
"""Player draws card(s) from provided deck.
Args:
deck (Deck): A instance of a card deck.
n (int): Number of cards to draw from the deck.
"""
new_cards = [deck.draw(1)[0] for _ in range(n)]
self.receive(new_cards)
draw = DrawEvent(player=self, number=n)
book = self._check_for_books()
return draw, book
def tell(self, rank):
"""give card to another players if they have a card of requested rank"""
# get indices of instances of card
idx = [i for i, c in enumerate(self.hand) if rank in str(c)]
cards_to_give = [self.hand[i] for i in idx]
self.remove(cards_to_give)
return cards_to_give
```
#### File: test_games/test_go_fish/test_game.py
```python
import pytest
from motherbrain.games.go_fish import INITIAL_HAND_SIZE_MAP
from motherbrain.games.run import create_game
from motherbrain import MOTHERBRAIN_PATH
import yaml
from yaml import Loader
import os
@pytest.fixture
def game():
"""Parse configs, create game, and play."""
config_path = os.path.join(MOTHERBRAIN_PATH, 'games/go_fish/config/')
go_fish_config_path = os.path.join(config_path, 'random.yaml')
# load config
with open(go_fish_config_path, 'rb') as stream:
config = yaml.load(stream, Loader=Loader)
# load game
game = create_game(config)
return game
def test_attributes(game):
assert game.state
assert game.num_players
assert game.turn
def test_deal(game):
game.reset()
game.deal()
# check player hands
for player in game.state.players:
assert len(player.hand) == INITIAL_HAND_SIZE_MAP[game.num_players]
# check observations
for player, obs in game.state.observations.items():
for opponent in obs.opponents:
assert obs.observed_hand_len[opponent].hand_len == INITIAL_HAND_SIZE_MAP[game.num_players]
assert obs.observed_ranks[opponent].ranks['2'] == 0
# check that deck has correct amount of cards
assert len(game.state.deck.cards) == 52 - 4*5
def test_turn(game):
game.reset()
game.deal()
game.turn()
``` |
{
"source": "joedaws/poly-init-nets",
"score": 4
} |
#### File: joedaws/poly-init-nets/deep.py
```python
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
class DeepNet(nn.Module):
def __init__(self,L,n,a=-1,b=1):
"""
L -- number of hidden layers
n -- dimension of the input
"""
super(DeepNet,self).__init__()
self.L = L
self.n = n
middlen = 8*n-4
self.a = a
self.b = b
self.C = (b-a)**2/4
self.hidden = nn.ModuleList()
# first hidden layer
self.hidden.append(nn.Linear(n,middlen))
# iterate over middle hidden layers
for i in range(1,self.L):
self.hidden.append(nn.Linear(middlen,middlen))
# output
self.hidden.append(nn.Linear(middlen,1))
def forward(self,x):
"""
x -- input of size (N,d) where N is the number of
sample points.
"""
# first hidden layer
h = self.hidden[0](x).clamp(min=0)
# middle layers
for i in range(1,self.L):
h = self.hidden[i](h).clamp(min=0)
# output layer
return self.hidden[-1](h)
def poly_init(self):
"""
Initializes all network parameters so that it behaves like a
polynomial on the domain [a,b]^d
"""
with torch.no_grad():
a = self.a
b = self.b
C = self.C
n = self.n
L = self.L
# one dimensional case
if self.n == 1:
# input --> first hidden layer
self.hidden[0].weight.data[0:4,0] = torch.tensor(
[1,1,1,a+b],dtype=torch.float)
self.hidden[0].bias.data = torch.tensor(
[-a,-(a+b)*0.5,-b,-a*b],dtype=torch.float)
# h1 --> h2
self.hidden[1].weight.data = torch.tensor(
[[2/(b-a),4/(a-b),2/(b-a),0.0],
[2/(b-a),4/(a-b),2/(b-a),0.0],
[2/(b-a),4/(a-b),2/(b-a),0.0],
[C*(2/(a-b)),C*(4/(b-a)),C*(2/(a-b)),1.0]],dtype=torch.float)
self.hidden[1].bias.data = torch.tensor(
[0.,-0.5,-1.0,0.0],dtype=torch.float)
# hk --> h(k+1)
for i in range(2,self.L):
self.hidden[i].bias.data = torch.tensor(
[0.,-0.5,-1.0,0.0],dtype=torch.float)
self.hidden[i].weight.data = torch.tensor(
[[2,-4,2,0.0],
[2,-4,2,0.0],
[2,-4,2,0.0],
[-2*C/(2**(2*(i-1))),
4*C/(2**(2*(i-1))),
-2*C/(2**(2*(i-1))),
1.0]],dtype=torch.float)
# output layer
i = self.L-1
self.hidden[-1].bias.data.fill_(0.)
self.hidden[-1].weight.data[0] = torch.tensor(
[-2*C/(2**(2*i)),
4*C/(2**(2*i)),
-2*C/(2**(2*i)),
1.0],dtype = torch.float)
# 2 or more case
else:
# input --> first hidden layer
first = self.hidden[0]
first.weight.data.fill_(0)
for i in range(0,n-1):
first.weight.data[i*8:(i+1)*8,i] = torch.tensor(
[1,1,1,a+b,0.5,0.5,0.5,0.5*(a+b)],dtype=torch.float)
first.weight.data[i*8+4:(i+1)*8+4,i+1] = torch.tensor(
[0.5,0.5,0.5,0.5*(a+b),1,1,1,a+b],dtype=torch.float)
for ii in range(0,2*n-1):
first.bias.data[4*ii:4*(ii+1)] = torch.tensor(
[-a,-(a+b)*0.5,-b,-a*b],dtype=torch.float)
# h1 --> h2
second = self.hidden[1]
second.weight.data.fill_(0)
for i in range(0,2*n-1):
second.weight.data[i*4:(i+1)*4,i*4:(i+1)*4] = torch.tensor(
[[2/(b-a),4/(a-b),2/(b-a),0.0],
[2/(b-a),4/(a-b),2/(b-a),0.0],
[2/(b-a),4/(a-b),2/(b-a),0.0],
[C*(2/(a-b)),C*(4/(b-a)),C*(2/(a-b)),1.0]],dtype=torch.float)
second.bias.data[4*i:4*(i+1)] = torch.tensor(
[0.,-0.5,-1.0,0.0],dtype=torch.float)
# hk --> hk+1
for k in range(2,L):
hk = self.hidden[k]
hk.weight.data.fill_(0)
for i in range(0,2*n-1):
hk.weight.data[i*4:(i+1)*4,i*4:(i+1)*4] = torch.tensor(
[[2,-4,2,0.0],
[2,-4,2,0.0],
[2,-4,2,0.0],
[-2*C/(2**(2*(k-1))),
4*C/(2**(2*(k-1))),
-2*C/(2**(2*(k-1))),
1.0]],dtype=torch.float)
hk.bias.data[4*i:4*(i+1)] = torch.tensor(
[0,-0.5,-1,0],dtype=torch.float)
# output layer
self.hidden[-1].bias.data.fill_(0.)
k = self.L-1
for j in range(0,2*n-1):
# even case
if j % 2 == 0:
self.hidden[-1].weight.data[0,4*j:4*(j+1)] = torch.tensor(
[-2*C/(2**(2*k)),
4*C/(2**(2*k)),
-2*C/(2**(2*k)),
1.0],dtype = torch.float)
# odd case
else:
self.hidden[-1].weight.data[0,4*j:4*(j+1)] = torch.tensor(
[-2*C/(2**(2*k)),
4*C/(2**(2*k)),
-2*C/(2**(2*k)),
1.0],dtype = torch.float)
return
def xavier_init(self):
"""
initializes the linear layers.
The weights are initialized using xavier random initialization.
The biases use uniform initialization on the interval of approximation.
"""
with torch.no_grad():
# iterate over the hidden layers
for h in self.hidden:
torch.nn.init.xavier_uniform_(h.weight)
h.bias.uniform_(self.a,self.b)
```
#### File: poly-init-nets/utils/roots.py
```python
import numpy as np
import csv
import numpy.polynomial.legendre as leg
import numpy.polynomial.chebyshev as cheb
# set this dictionary of useful terms
polydata = {'MAXDEGREE': 20,
'LEGFILE':"../data/polyroots/legroots.txt",
'CHEBFILE':"../data/polyroots/chebroots.txt"}
# set some global variables
MAXD = polydata['MAXDEGREE']
LEGF = polydata['LEGFILE']
CHEBF = polydata['CHEBFILE']
# function to genereate Legendre
def get_leg_roots():
roots = {}
# loop over all degrees up to MAXD
for i in range(0,MAXD):
c = np.zeros(MAXD)
c[i] = 1
r = leg.legroots(c)
roots.update({i:r})
# save roots to file
w = csv.writer(open(LEGF,"w"))
for key, val in roots.items():
w.writerow([key,val])
return
# function to genereate Legendre
def leg_roots_vec():
roots = {}
# loop over all degrees up to MAXD
for i in range(0,MAXD):
c = np.zeros(MAXD)
c[i] = 1
r = leg.legroots(c)
roots.update({i:r})
return roots
# function to get scaling factor in legendre polynomial
def get_leg_scalfac(deg):
testpt = 0.5
c = np.zeros(MAXD)
c[deg] = 1
val = leg.legval(testpt,c,tensor=False)
r = leg.legroots(c)
prod = 1
for root in r:
prod = prod * (testpt-root)
scalfac = val/prod
return scalfac
# function to genereate Chebyshev Roots
def get_cheb_roots():
roots = {}
# loop over all degrees up to MAXD
for i in range(0,MAXD):
c = np.zeros(MAXD)
c[i] = 1
r = cheb.chebroots(c)
roots.update({i:r})
# save roots to file
w = csv.writer(open(CHEBF,"w"))
for key, val in roots.items():
w.writerow([key,val])
return
if (__name__ == '__main__'):
get_leg_roots()
get_cheb_roots()
``` |
{
"source": "joedborg/charm-logrotate-reactive",
"score": 2
} |
#### File: charm-logrotate-reactive/reactive/logrotate.py
```python
import os
from charms.logrotate import Configuration, render_template
from charms.reactive import when, when_not, set_flag
from charmhelpers.core.hookenv import status_set, log
conf = Configuration()
ready = ('active', 'Logrotate is ready.')
@when('apt.installed.logrotate')
@when('apt.installed.gzip')
@when('apt.installed.bzip2')
@when('apt.installed.xz-utils')
@when_not('logrotate.installed')
def install_logrotate() -> None:
"""
Triggerd after all of the
required packages are installed
by the apt layer.
Logrotate is added to /etc/cron.daily
so no scheduling is needed by the charm.
:return: None
"""
status_set(*ready)
set_flag('logrotate.installed')
@when('logrotate.installed')
@when('config.changed')
def configure_logrotate() -> None:
"""
Apply logrotate configuration.
:return: None
"""
status_set('maintenance', 'Applying configuration.')
for logname in conf.logfiles():
log('Adding logrotate entry for {}'.format(logname))
tmpl_data = {}
tmpl_data['path'] = conf.path(logname)
tmpl_data['when'] = conf.when(logname)
tmpl_data['compress'] = conf.compress(logname)
tmpl_data['compresscmd'] = conf.compresscmd(logname)
tmpl_data['compressext'] = conf.compressext(logname)
tmpl_data['dateext'] = conf.dateext(logname)
tmpl_data['period'] = conf.period(logname)
tmpl_data['perms'] = conf.perms(logname)
tmpl_data['owner'] = conf.owner(logname)
tmpl_data['group'] = conf.group(logname)
tmpl_data['prerotate'] = conf.prerotate(logname)
tmpl_data['postrotate'] = conf.postrotate(logname)
logrotate_path = '/etc/logrotate.d/{}'.format(logname)
render_template('logrotate.tmpl', logrotate_path, tmpl_data)
os.chmod(logrotate_path, 0o444)
status_set(*ready)
``` |
{
"source": "joedborg/k8s-operator-coder",
"score": 2
} |
#### File: nginx_ingress_integrator/v0/ingress.py
```python
import logging
from ops.charm import CharmEvents
from ops.framework import EventBase, EventSource, Object
from ops.model import BlockedStatus
# The unique Charmhub library identifier, never change it
LIBID = "db0af4367506491c91663468fb5caa4c"
# Increment this major API version when introducing breaking changes
LIBAPI = 0
# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version
LIBPATCH = 5
logger = logging.getLogger(__name__)
REQUIRED_INGRESS_RELATION_FIELDS = {
"service-hostname",
"service-name",
"service-port",
}
OPTIONAL_INGRESS_RELATION_FIELDS = {
"limit-rps",
"limit-whitelist",
"max-body-size",
"retry-errors",
"service-namespace",
"session-cookie-max-age",
"tls-secret-name",
}
class IngressAvailableEvent(EventBase):
pass
class IngressCharmEvents(CharmEvents):
"""Custom charm events."""
ingress_available = EventSource(IngressAvailableEvent)
class IngressRequires(Object):
"""This class defines the functionality for the 'requires' side of the 'ingress' relation.
Hook events observed:
- relation-changed
"""
def __init__(self, charm, config_dict):
super().__init__(charm, "ingress")
self.framework.observe(
charm.on["ingress"].relation_changed, self._on_relation_changed
)
self.config_dict = config_dict
def _config_dict_errors(self, update_only=False):
"""Check our config dict for errors."""
blocked_message = "Error in ingress relation, check `juju debug-log`"
unknown = [
x
for x in self.config_dict
if x
not in REQUIRED_INGRESS_RELATION_FIELDS | OPTIONAL_INGRESS_RELATION_FIELDS
]
if unknown:
logger.error(
"Ingress relation error, unknown key(s) in config dictionary found: %s",
", ".join(unknown),
)
self.model.unit.status = BlockedStatus(blocked_message)
return True
if not update_only:
missing = [
x for x in REQUIRED_INGRESS_RELATION_FIELDS if x not in self.config_dict
]
if missing:
logger.error(
"Ingress relation error, missing required key(s) in config dictionary: %s",
", ".join(missing),
)
self.model.unit.status = BlockedStatus(blocked_message)
return True
return False
def _on_relation_changed(self, event):
"""Handle the relation-changed event."""
# `self.unit` isn't available here, so use `self.model.unit`.
if self.model.unit.is_leader():
if self._config_dict_errors():
return
for key in self.config_dict:
event.relation.data[self.model.app][key] = str(self.config_dict[key])
def update_config(self, config_dict):
"""Allow for updates to relation."""
if self.model.unit.is_leader():
self.config_dict = config_dict
if self._config_dict_errors(update_only=True):
return
relation = self.model.get_relation("ingress")
if relation:
for key in self.config_dict:
relation.data[self.model.app][key] = str(self.config_dict[key])
class IngressProvides(Object):
"""This class defines the functionality for the 'provides' side of the 'ingress' relation.
Hook events observed:
- relation-changed
"""
def __init__(self, charm):
super().__init__(charm, "ingress")
# Observe the relation-changed hook event and bind
# self.on_relation_changed() to handle the event.
self.framework.observe(
charm.on["ingress"].relation_changed, self._on_relation_changed
)
self.charm = charm
def _on_relation_changed(self, event):
"""Handle a change to the ingress relation.
Confirm we have the fields we expect to receive."""
# `self.unit` isn't available here, so use `self.model.unit`.
if not self.model.unit.is_leader():
return
ingress_data = {
field: event.relation.data[event.app].get(field)
for field in REQUIRED_INGRESS_RELATION_FIELDS
| OPTIONAL_INGRESS_RELATION_FIELDS
}
missing_fields = sorted(
[
field
for field in REQUIRED_INGRESS_RELATION_FIELDS
if ingress_data.get(field) is None
]
)
if missing_fields:
logger.error(
"Missing required data fields for ingress relation: {}".format(
", ".join(missing_fields)
)
)
self.model.unit.status = BlockedStatus(
"Missing fields for ingress: {}".format(", ".join(missing_fields))
)
# Create an event that our charm can use to decide it's okay to
# configure the ingress.
self.charm.on.ingress_available.emit()
``` |
{
"source": "joedborg/microk8s",
"score": 2
} |
#### File: installer/cli/microk8s.py
```python
import argparse
import logging
import traceback
from typing import List
from sys import exit
import click
from cli.echo import Echo
from common.errors import BaseError
from vm_providers.factory import get_provider_for
from vm_providers.errors import ProviderNotFound
from common import definitions
logger = logging.getLogger(__name__)
@click.command(name="microk8s", context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True,
))
@click.option('-h', '--help', is_flag=True)
@click.pass_context
def cli(ctx, help):
try:
if help and len(ctx.args) == 0:
show_help()
exit(0)
elif help:
ctx.args.append("--help")
if len(ctx.args) == 0:
show_error()
exit(1)
if ctx.args[0] == 'install':
install(ctx.args[1:])
exit(0)
elif ctx.args[0] == 'uninstall':
uninstall()
exit(0)
elif ctx.args[0] == 'stop':
run(ctx.args)
stop()
exit(0)
else:
run(ctx.args)
exit(0)
except BaseError as e:
Echo.error(str(e))
exit(e.get_exit_code())
except Exception as e:
Echo.error("An unexpected error occurred.")
Echo.info(str(e))
Echo.info(traceback.print_exc())
exit(254)
def show_error():
msg = """Usage: microk8s [OPTIONS] COMMAND [ARGS]...
Options:
--help Shows the available COMMANDS."""
click.echo(msg)
def show_help():
msg = """Usage: microk8s [OPTIONS] COMMAND [ARGS]...
Options:
--help Show this message and exit.
Commands:
install Installs MicroK8s. Use --cpu, --mem, --disk to appoint resources.
uninstall Removes MicroK8s"""
click.echo(msg)
commands = _get_microk8s_commands()
for command in commands:
if command in definitions.command_descriptions:
click.echo(" {:<15} {}".format(command, definitions.command_descriptions[command]))
else:
click.echo(" {:<15}".format(command))
if len(commands) == 2:
click.echo("")
click.echo("Install and start MicroK8s to see the full list of commands.")
def _show_install_help():
msg = """Usage: microk8s install OPTIONS
Options:
--help Show this message and exit.
--cpu Cores used by MicroK8s (default={})
--mem RAM in GB used by MicroK8s (default={})
--disk Maximum volume in GB of the dynamicaly expandable hard disk to be used (default={})
-y, --assume-yes Automatic yes to prompts"""
Echo.info(msg.format(definitions.DEFAULT_CORES, definitions.DEFAULT_MEMORY, definitions.DEFAULT_DISK))
def install(args) -> None:
if "--help" in args:
_show_install_help()
return
parser = argparse.ArgumentParser("microk8s install")
parser.add_argument('--cpu', default=definitions.DEFAULT_CORES, type=int)
parser.add_argument('--mem', default=definitions.DEFAULT_MEMORY, type=int)
parser.add_argument('--disk', default=definitions.DEFAULT_DISK, type=int)
parser.add_argument('-y', '--assume-yes', action='store_true', default=definitions.DEFAULT_ASSUME)
args = parser.parse_args(args)
vm_provider_name: str = 'multipass'
vm_provider_class = get_provider_for(vm_provider_name)
echo = Echo()
try:
vm_provider_class.ensure_provider()
except ProviderNotFound as provider_error:
if provider_error.prompt_installable:
if echo.is_tty_connected() and args.assume_yes:
vm_provider_class.setup_provider(echoer=echo)
elif echo.is_tty_connected() and echo.confirm(
"Support for {!r} needs to be set up. "
"Would you like to do that it now?".format(provider_error.provider)
) and not args.assume_yes:
vm_provider_class.setup_provider(echoer=echo)
else:
raise provider_error
else:
raise provider_error
instance = vm_provider_class(echoer=echo)
instance.launch_instance(vars(args))
echo.info("MicroK8s is up and running. See the available commands with 'microk8s --help'.")
def uninstall() -> None:
vm_provider_name = "multipass"
vm_provider_class = get_provider_for(vm_provider_name)
echo = Echo()
try:
vm_provider_class.ensure_provider()
except ProviderNotFound as provider_error:
if provider_error.prompt_installable:
if echo.is_tty_connected():
echo.warning((
"MicroK8s is not running. VM provider {!r} has been removed."
.format(provider_error.provider)))
return 1
else:
raise provider_error
instance = vm_provider_class(echoer=echo)
instance.destroy()
echo.info("Thank you for using MicroK8s!")
def stop() -> None:
vm_provider_name = "multipass"
vm_provider_class = get_provider_for(vm_provider_name)
vm_provider_class.ensure_provider()
instance = vm_provider_class(echoer=Echo())
instance_info = instance.get_instance_info()
if instance_info.is_running():
instance.stop()
def run(cmd) -> None:
vm_provider_name = "multipass"
vm_provider_class = get_provider_for(vm_provider_name)
echo = Echo()
try:
vm_provider_class.ensure_provider()
except ProviderNotFound as provider_error:
if provider_error.prompt_installable:
if echo.is_tty_connected():
echo.warning("MicroK8s is not installed. Please run 'microk8s install'.")
return 1
else:
raise provider_error
instance = vm_provider_class(echoer=echo)
command = cmd[0]
cmd[0] = "microk8s.{}".format(command)
instance.run(cmd)
def _get_microk8s_commands() -> List:
vm_provider_name = "multipass"
vm_provider_class = get_provider_for(vm_provider_name)
echo = Echo()
try:
vm_provider_class.ensure_provider()
instance = vm_provider_class(echoer=echo)
instance_info = instance.get_instance_info()
if instance_info.is_running():
commands = instance.run('ls -1 /snap/bin/'.split(), hide_output=True)
mk8s = [c.decode().replace('microk8s.', '') for c in commands.split() if c.decode().startswith('microk8s')]
return mk8s
else:
return ["start", "stop"]
except ProviderNotFound as provider_error:
return ["start", "stop"]
if __name__ == '__main__':
cli()
``` |
{
"source": "joeddav/datasets",
"score": 2
} |
#### File: metrics/squad/squad.py
```python
import datasets
from .evaluate import evaluate
_CITATION = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={<NAME> and <NAME> and <NAME> and <NAME>},
booktitle={EMNLP},
year={2016}
}
"""
_DESCRIPTION = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
_KWARGS_DESCRIPTION = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict {'text': list of possible texts for the answer, as a list of strings}
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
"""
class Squad(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": {"id": datasets.Value("string"), "prediction_text": datasets.Value("string")},
"references": {
"id": datasets.Value("string"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}
),
},
}
),
codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"],
reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"],
)
def _compute(self, predictions, references):
pred_dict = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
dataset = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
score = evaluate(dataset=dataset, predictions=pred_dict)
return score
``` |
{
"source": "joedeandev/LiveJanus",
"score": 3
} |
#### File: livejanus/util/__init__.py
```python
from datetime import datetime
from os import environ
from random import choices
from string import ascii_letters, digits
alphanumeric = digits + ascii_letters
def time_as_utc() -> float:
return datetime.utcnow().timestamp()
class SocketInvalidDataException(Exception):
pass
def random_string(source: str = alphanumeric, length: int = 6) -> str:
return "".join(choices(source, k=length))
def is_debug() -> bool:
return str(environ.get("DEBUG", False)).lower() == "true"
``` |
{
"source": "joedefen/subshop",
"score": 3
} |
#### File: subshop/LibGen/DataStore.py
```python
import os
import sys
import time
from io import IOBase
import traceback
from collections import OrderedDict
from ruamel.yaml import YAML
from LibGen.YamlDump import yaml_to_file
from LibGen.CustLogger import CustLogger as lg
yaml = YAML(typ='safe')
yaml.default_flow_style = False
class DataStore():
"""
See module description. The DataStore object is usually the
base class for a specific DataStore.
"""
# pylint: disable=too-many-arguments
def __init__(self, filename, storedir, autoflush=True, warn_if_corrupt=False,
flow_nodes=None, backup=False):
"""Create a DataStore.
Provide the filename uniquely identifying the store, and
optionally the directory for the store. The directory
defaults (given above)
'autoflush' persists all modification immediately; if disabled,
then call flush() as needed (mods that are not flushed are lost).
'warn_if_corrupt' will recreate the datastore if corrupted if
starting anew is OK.
"""
self.datastore = None
self.filename = os.path.expanduser(storedir + '/' + filename)
self._stat = (0, 0) # size and time of underlying file (at last read/write)
self.autoflush = autoflush # write datastore on every modify?
self.warn_if_corrupt = warn_if_corrupt # recreate corrupted DB?
self.dirty = False # has unflushed changes?
self.do_backup = backup
self.flow_nodes = flow_nodes
def get_filename(self, backup=False):
"""TBD"""
return self.get_backupname() if backup else self.filename
def get_backupname(self):
"""TBD"""
return self.filename + '.bak'
def get_tmpname(self):
"""TBD"""
return self.filename + '.tmp'
def _get_datastore(self):
"""
Gets the current DataStore (as a python object). If
it does not exist, it will bring it from the persistent
file (in YAML) or create any empty one.
"""
if not isinstance(self.datastore, dict):
self._read_datastore()
return self.datastore
def _update_stat(self, filehandle):
if isinstance(filehandle, IOBase):
stat = os.fstat(filehandle.fileno())
else:
stat = os.stat(filehandle)
self._stat = (stat.st_size, stat.st_mtime)
def is_changed(self):
"""Check if the datastore has change underneath us."""
try:
status = os.stat(self.filename)
except OSError:
# sometimes the file is gone while being written
# ... catch it next time
return False
stat = (status.st_size, status.st_mtime)
return stat != self._stat
def refresh(self):
"""Read the database if the underlying file has a new
timestamp and/or size.
Return True if updated and re-read, else false.
NOTE: the higher level app has to re-get objects, too,
to complete any update.
"""
if self.is_changed():
if self.dirty:
lg.warn('re-read {} overwriting "dirty" status'.format(
self.filename))
self._read_datastore()
return True
return False
def _read_datastore(self, try_backup=False):
"""
Read the DataStore into memory (or the backup if opted).
"""
op_str = ''
try:
with open(self.get_filename(try_backup), "r", encoding='utf-8') as fh:
self._update_stat(fh)
self.datastore = yaml.load(fh)
self.dirty = False
fh.close() # to be sure
if not isinstance(self.datastore, dict):
raise Exception('corrupt DataStore (not dict)')
lg.tr5("len=", len(self.datastore) if self.datastore else 0)
if try_backup:
self.flush(force=True, backup=False) # fix non-backup
elif self.do_backup:
self.flush(force=True, backup=True) # make backup after successful read
return True
except Exception as exc:
op_str = 'read' if isinstance(exc, IOError) else 'parse'
dbname = os.path.basename(self.filename)
if not try_backup and os.path.isfile(self.get_backupname()):
lg.warn(f"cannot {op_str} {dbname} [{exc}], trying backup\n")
if self._read_datastore(try_backup=True):
time.sleep(5) # a little time to see the error messages
return True
if op_str == 'read' or self.warn_if_corrupt:
lg.warn(f"cannot {op_str} {dbname} [{exc}], starting empty\n")
time.sleep(5) # a little time to see the error messages
self.datastore = OrderedDict()
self.flush(force=True)
return True
lg.warn(f"cannot {op_str} {dbname} [{exc}], aborting\n")
raise
@staticmethod
def _kys(key):
"""
Keys are identified as list of parts or a string with '^'
separating the parts. This is a convenice method to
convert to list if necessary.
"""
return key.split('^') if isinstance(key, str) else key
def flush(self, force=False, backup=False):
"""
Flush current changes to disk conditionally on having
unwritten modifications unless forced.
"""
if self.dirty or force:
with open(self.get_tmpname(), "w", encoding='utf-8') as fh:
yaml_to_file(self.datastore, fh, flow_nodes=self.flow_nodes)
self._update_stat(fh)
fh.close() # to be sure
lg.tr5('dirty' if self.dirty else 'forced')
self.dirty = False
os.replace(self.get_tmpname(), self.get_filename(backup))
else:
lg.tr5('skipped')
def get(self, key, default=None):
"""
Gets the node (leaf or not) given the key.
Retuns None if it does not exist.
"""
kys = self._kys(key)
node = self._get_datastore()
# lg.db("-0-get() kys:", kys)
for ky in kys[:-1]:
# lg.db("get() ky:", ky, 'node:', str(node))
# lg.db("-1-get() ky:", ky, 'node:', type(node))
node = node.get(ky, None)
# lg.db("-2-get() ky:", ky, 'node:', type(node))
if not node or not isinstance(node, dict):
node = None
break
rv = node.get(kys[-1], default) if node else default
lg.tr5("get key:", key, 'rv:', rv, type(rv))
return rv
def subkeys(self, key):
"""
Given a key to a non-leaf node, gets the subkeys at that level.
This is handy when the subkeys are variable.
"""
node = self.get(key)
if node and isinstance(node, dict):
return list(node.keys())
return None
def nthSubkey(self, idx, key):
"""
Given a key to a non-leaf node, gets the
nth (i.e., 0, 1, ...) subkey at that level.
This is handy when the subkeys are variable.
"""
subkeys = self.subkeys(key)
if subkeys and idx in range(len(subkeys)):
return subkeys[idx]
return None
def put(self, key, value):
"""
Stores a value with a given key.
- intermediate nodes are created as needed.
- NOTE: you cannot store a value atop a non-leaf node
Returns true if it worked.
- if it worked and the value is changed/added and
autoflush is on, the DataStore save to disk.
"""
rv = True # until proven otherwise
kys = self._kys(key)
try:
node = self._get_datastore()
except Exception:
lg.pr(traceback.format_exc())
node = None
if node is None:
lg.err('OMG: Failed to read grits (will redo everything)')
node = {}
rv = False
subkey = ''
for ky in kys[:-1]:
subkey += ('^' if subkey else '') + ky
nxt_node = node.get(ky)
if nxt_node is None:
nxt_node = node[ky] = OrderedDict()
node = nxt_node
elif isinstance(nxt_node, dict):
node = nxt_node
else:
lg.err('subkey ({}) is a leaf node with value ({})',
subkey, str(nxt_node))
nxt_node = node[ky] = OrderedDict()
rv = False
lg.tr5("put key:", key, 'val:', value)
if node.get(kys[-1]) != value:
node[kys[-1]] = value
self.dirty = True
if self.autoflush:
self.flush()
return rv
def purge(self, key):
"""Purge either internal or leaf node.
Returns True if anything removed, else False.
"""
kys = self._kys(key)
node, parNode = self._get_datastore(), None
for ky in kys:
if isinstance(node, dict):
node, parNode = node.get(ky), node
else:
lg.tr5("not found: key:", key)
return False
del parNode[kys[-1]]
self.dirty = True
lg.tr5("del key:", key, 'oVal:', str(node))
if self.autoflush:
self.flush()
return True
def runner(argv):
"""Tests DataStore using a test (i.e., TestCfg) store.
NOTE: this creates and leaves: data.d/test_cfg.yaml
"""
class TestCfg(DataStore):
"""
Specialized DataStore for test purposes.
"""
def __init__(self, storedir=None):
"""TBD"""
DataStore.__init__(self, filename='test_cfg.yaml',
storedir=storedir, autoflush=True)
def test_cfg():
"""TBD"""
cfg = TestCfg()
screens = cfg.get('screens')
if screens is None:
lg.db('No screens ...')
screens = OrderedDict()
screens['year_min'] = 2018
screens['fuzzy_min'] = 35
screens['bitrate_min'] = 192
screens['format_min'] = 3
screens['media_min'] = 2
screens['reltype_min'] = 3
screens['tag_min'] = 3
formats = screens['formats'] = OrderedDict()
formats['FLAC'] = 3
formats['MP3'] = 2
formats['AAC'] = 0
formats['AC3'] = 0
formats['DTS'] = 0
media = screens['media'] = OrderedDict()
media['CD'] = 3
media['DVD'] = 3
media['Blu-Ray'] = 3
media['WEB'] = 2
media['Vinyl'] = 1
media['Soundboard'] = 0
media['SACD'] = 0
media['DAT'] = 0
media['Cassette'] = 0
types = screens['reltypes'] = OrderedDict()
types['Album'] = 3
types['Anthology'] = 3
types['Compilation'] = 3
types['Soundtrack'] = 2
types['EP'] = 2
types['Single'] = 1
types['Live Album'] = 1
types['Remix'] = 0
types['Bootleg'] = 0
types['Interview'] = 0
types['Mixtape'] = 0
types['Demo'] = 0
types['Concert Recording'] = 0
types['DJ Mix'] = 0
types['Unknown'] = 0
tags = screens['tags'] = OrderedDict()
tags['3a'] = 'jazz,smooth.jazz,ambient,orchestral,piano'
tags['2a'] = 'blues,classical,country,folk,pop,swing'
tags['1a'] = 'electro,electronic,soul,world.music'
tags['0a'] = 'alternative.rock,dance,dubstep,experimental,folk.rock,fusion,'
tags['0b'] = 'gothic.rock,hardcore.dance,hardcore.punk,hip.hop,kpop,latin,metal'
tags['0c'] = 'post.punk,progressive.rock,pyschedelic.rock,rock,score,techno,trance'
lg.db('Writing screens ...')
cfg.put('screens', screens)
else:
lg.db('Current screens ...')
yaml.dump(screens, sys.stdout)
# pylint: disable=import-outside-toplevel
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-V', '--log-level', choices=lg.choices,
default='TR5', help='set logging/verbosity level [dflt=TR5]')
opts = parser.parse_args(argv)
lg.setup(level=opts.log_level)
test_cfg()
```
#### File: subshop/LibSub/SubFixer.py
```python
import argparse
import os
import re
import shutil
import sys
import statistics
import math
import copy
from types import SimpleNamespace
# from io import FileIO as file
from LibGen.CustLogger import CustLogger as lg
from LibSub import ConfigSubshop
debug_str = os.environ.get('SubFixerDB', None)
DEBUG = bool(debug_str and debug_str not in ('0', 'f', 'false'))
DBfillBucket = False
DBdumpBuckets = False
DBdumpLrAns = False
def getch():
"""TBD"""
import termios
import tty
file_descriptor = sys.stdin.fileno()
settings = termios.tcgetattr(file_descriptor)
try:
tty.setraw(file_descriptor)
return sys.stdin.read(1)
finally:
termios.tcsetattr(file_descriptor, termios.TCSADRAIN, settings)
class Caption:
"""Defines one caption of an SRT file."""
begin_re_str = r'^(\d+):(\d+):(\d+),(\d+)\s+--\>\s+(\d+):(\d+):(\d+),(\d+)$'
begin_re_matcher = None
def __init__(self):
"""The constructor creates an empty caption."""
self.leader, self.beg_ms, self.end_ms, self.lines = '', None, None, None
if not Caption.begin_re_matcher:
Caption.begin_re_matcher = re.compile(Caption.begin_re_str)
def set(self, beg_s, end_s, text, caplist=None, leader=0):
"""Use to create captions roll-your-own caption list. E.g.,
caplist = []
while ....: Caption.append(beg_s, end_s, text, caplist)
Arguments:
- caplist - a list to append the new caption
- if caplist is given, then 'leader' is set to "1", "2", ...
- leader is expected to be an integer if given (ignored if caplist is given)
- text will be split on line separators; empty lines removed
Returns:
- the caption object on success
- None if not given valid text with some non-whitespace chars
"""
lines, self.lines = text.splitlines(), []
for line in lines:
line = line.strip()
if line:
self.lines.append(line)
if not lines:
self.leader, self.beg_ms, self.end_ms, self.lines = '', None, None, None
return None # indicates invalid
self.beg_ms = int(round(beg_s * 1000))
self.end_ms = int(round(end_s * 1000))
if isinstance(caplist, list):
caplist.append(self)
self.leader = str(len(caplist))
else:
self.leader = None if leader is None else str(leader)
return self
@staticmethod
def compose(caplist):
"""Compose string from a list of Captions that represents the guts
of an srt file."""
outs = ''
for idx, caption in enumerate(caplist):
outs += '\n' if idx else ''
outs += caption.to_str(idx+1)
return outs
@staticmethod
def write_to_file(out_file, caplist):
"""Write a list of Captions to a file."""
outs = Caption.compose(caplist)
with open(out_file, 'w', encoding = 'utf-8', errors='ignore') as out:
wrcnt = out.write(outs)
lg.tr5('------> Wrote', wrcnt, 'bytes to', out_file)
@staticmethod
def _ms_str(millis, prefix=''):
"""This is for readability"""
if millis < 0:
millis = -millis
prefix = '-'
hrs = millis // (60*60*1000)
mins = (millis // (60*1000)) % 60
secs = (millis // 1000) % 60
millis = millis % 1000
rv = ('{:02d}:'.format(hrs) if hrs else ''
) + ('{:02d}:'.format(mins) if hrs or mins else ''
) + ('{:02d}.{:03d}'.format(secs, millis))
return prefix + (rv[1:] if rv.startswith('0') else rv)
def __repr__(self):
"""Debugging representation for readability. Intended to be concise
on liner with endtime being relative."""
return '{}{} {}'.format(self._ms_str(self.beg_ms),
self._ms_str(self.end_ms-self.beg_ms,'+'), ' '.join(self.lines))
def delta_str(self, caplist):
"""Representation showing time from beginning or end which
ever is closer."""
to_end_ms = self.beg_ms - caplist.captions[-1].end_ms # negative, presumbly
rel_ms = self.beg_ms if abs(self.beg_ms) < abs(to_end_ms) else to_end_ms
return f'{self._ms_str(rel_ms)} {" ".join(self.lines)}'
def to_str(self, idx=None):
"""Formal representation of caption per the specs except
for the empty line between captions."""
def ms_str(millis):
hrs = millis // (60*60*1000)
mins = (millis // (60*1000)) % 60
secs = (millis // 1000) % 60
millis = millis % 1000
return '{:02d}:{:02d}:{:02d},{:03d}'.format(hrs, mins, secs, millis)
rv = str(idx) if idx is not None else self.leader
rv += '\n{} --> {}\n'.format(ms_str(self.beg_ms), ms_str(self.end_ms))
rv += '\n'.join(self.lines) + '\n'
return rv
def mini_str(self, max_wds=7):
"""Abbreviated one-liner of caption debugging .. a very
terse form w/o endtime and limited # of words.
"""
def sec_str(millis):
secs = int(round(millis / 1000))
hrs = secs // (60*60)
mins = (secs // 60) % 60
secs = secs % 60
return '{}{:02d}:{:02d}'.format(f'{hrs}:' if hrs else '', mins, secs)
rv = sec_str(self.beg_ms) + ' ' + ' '.join((' '.join(self.lines)).split()[0:max_wds])
return rv
class CaptionList:
"""TBD"""
CHARFIXES = {'¶': '♪'}
REGEX = [#r'\dx',
r'\.(com|net|org)\b', r'\bsync\b.*\b(fixed|corrected)\b',
r'\bair date\b', r'\bArt Subs\b', r'\bcaption',
r'\bHawkeye\d', r'\b[Nn]anban', r'\bsubtitle', r'\bTVShow\b',
r'\bwww\.', 'âª']
subshop_params = ConfigSubshop.get_params()
ad_params = subshop_params.ad_params
def __init__(self, srt_file):
"""Create a caption list from a open caption file."""
self.captions = []
self.anomalies = []
self.misnum_cnt = 0
self.code_option_score_lengths = False
self.purge_ads_cnt = 0
self.ads = []
self.fixed_char_cnt = 0
self.delay_cnt = 0
self.trans_table = str.maketrans(''.join(CaptionList.CHARFIXES.keys()),
''.join(CaptionList.CHARFIXES.values()))
self.limited_pats = [re.compile(pattern, re.IGNORECASE)
for pattern in CaptionList.ad_params.limited_regexes]
self.global_pats = [re.compile(pattern, re.IGNORECASE)
for pattern in CaptionList.ad_params.global_regexes]
# self.formulas = None # computed formulas
# self.lri = None # linear regression vs reference
if isinstance(srt_file, list):
self.captions = srt_file
return
if isinstance(srt_file, str):
with open(srt_file, 'r', encoding = 'utf-8', errors='ignore') as srt:
lines = srt.readlines()
else:
lines = srt_file.readlines()
while lines:
self._get_next(lines)
def is_updated(self):
"""TBD"""
return self.anomalies or self.purge_ads_cnt or self.fixed_char_cnt or self.delay_cnt
def _add_anomaly(self, *terms):
terms = [str(term) for term in terms]
self.anomalies.append(' '.join(terms))
def _fix_chars(self, line):
fixed = line.translate(self.trans_table)
self.fixed_char_cnt += 1 if fixed != line else 0
return fixed
def _get_next(self, lines):
"""Get next caption from remaining lines.
- Returns caption if another is found else None
"""
def to_ms(nums):
"""ms from [hr, min, sec, ms]"""
return int(nums[3]) + 1000 * (
int(nums[2]) + 60 * (int(nums[1]) + 60 * int(nums[0])))
caption = Caption()
while lines:
line = lines.pop(0).strip()
if caption.beg_ms is None:
mat = Caption.begin_re_matcher.match(line)
if mat:
caption.beg_ms = to_ms(mat.group(1, 2, 3, 4))
caption.end_ms = to_ms(mat.group(5, 6, 7, 8))
caption.lines = []
else:
caption.leader = line # last one wins
else:
if line:
caption.lines.append(self._fix_chars(line))
elif not caption.lines:
self._add_anomaly('empty caption:', str(caption))
caption.beg_ms = None # anomally
else:
lg.tr9('caption:', vars(caption))
break
if caption.beg_ms is not None and caption.lines:
self.captions.append(caption)
def repair(self, verbose=False, title=None):
"""TBD"""
deletions = []
order_errs = 0
for idx, caption in enumerate(self.captions):
if not caption:
continue
if not re.match(r'\d+$', caption.leader):
pass # who cares
# self._add_anomaly('fix malformed leader:', caption.leader, caption)
elif int(caption.leader) != idx + 1:
self._add_anomaly('fix misnumbered:', caption.leader, 'not', idx + 1, caption)
self.misnum_cnt += 1
if caption.beg_ms < 0:
if caption.end_ms <= 0:
self._add_anomaly('rmv negative offset:', caption)
deletions.insert(0, idx)
else:
caption.beg_ms = 0
self._add_anomaly('adj negative offset:', caption)
continue
n_caption = None
if idx+1 < len(self.captions):
n_caption = self.captions[idx+1]
if caption.beg_ms > n_caption.beg_ms:
self._add_anomaly('out-of-order:', caption, '\n ', n_caption)
order_errs += 1
for deletion in deletions:
del self.captions[deletion]
lg.tr2('repair: #captions', len(self.captions))
if order_errs:
self.captions = sorted(self.captions, key=lambda x: x.beg_ms)
# fix overlaps and such in reverse order
prev_caption = None
for idx in range(len(self.captions) - 1, -1, -1):
caption = self.captions[idx]
n_caption, prev_caption = prev_caption, caption
if caption.end_ms <= caption.beg_ms:
if n_caption:
next_delta = n_caption.beg_ms - caption.end_ms
if next_delta < 200: # so short; force overlap to fix in next block
# report as overlap
caption.end_ms = n_caption.beg_ms + 1
else:
self._add_anomaly('fix non positive duration:', caption)
caption.end_ms = caption.beg_ms + min(2000, next_delta)
else:
self._add_anomaly('fix non positive duration:', caption)
caption.end_ms = caption.beg_ms + 2000 # arbitrary
if n_caption and caption.end_ms > n_caption.beg_ms:
self._add_anomaly('fix caption overlap:', caption, '\n ', n_caption)
# try to split fairly
duration = max(n_caption.end_ms, caption.end_ms) - caption.beg_ms
tot_lines = len(caption.lines) + len(n_caption.lines)
duration1 = int(round(duration * len(caption.lines) / tot_lines))
duration2 = duration - duration1
caption.end_ms = caption.beg_ms + duration1
n_caption.beg_ms = caption.end_ms
n_caption.end_ms = n_caption.beg_ms + duration2
if self.anomalies:
if title:
lg.pr(title)
lg.pr('------> Fixing', len(self.anomalies), 'anomalies')
if verbose:
misnumbered = 0
for anomaly in self.anomalies:
if 'misnumbered' in anomaly:
misnumbered += 1
else:
lg.pr('err:', anomaly)
if misnumbered:
lg.pr('err: AND fixed', misnumbered, 'misnumbered captions')
for idx, caption in enumerate(self.captions):
caption.leader = str(idx + 1)
def detect_ads(self, limit_s=None, use_config_pats=True, pattern=None):
"""TBD"""
lg.tr5('detect_ads(): limit_s:', limit_s, 'use_config_pats:',
use_config_pats, 'pattern=', pattern)
if not self.captions: # avoid exception if no subs
return
limit_ms = (self.ad_params.limit_s if limit_s is None else limit_s) * 1000
save_from_ms = self.captions[0].beg_ms + limit_ms
save_to_ms = self.captions[-1].end_ms - limit_ms
for idx, caption in enumerate(self.captions):
text = '\n'.join(caption.lines) + '\n'
matched = False
if use_config_pats and save_from_ms < caption.beg_ms > save_to_ms:
for pat in self.limited_pats:
if pat.search(text):
lg.tr1('ad match', text)
self.ads.append((pat.pattern, idx))
matched = True
break
if use_config_pats and not matched:
for pat in self.global_pats:
if pat.search(text):
lg.tr1('ad match', text)
self.ads.append((pat.pattern, idx))
matched = True
break
if pattern and not matched:
if pattern.search(text):
lg.tr1('ad match', text)
self.ads.append((pattern.pattern, idx))
def purge_ads(self):
"""NOTE: for this to work, all detect_ads() first."""
if self.ads:
deletions = [] # must be built in reverse order
for pair in self.ads:
_, idx = pair # pair is (regex, idx)
deletions.insert(0, idx)
for deletion in deletions:
del self.captions[deletion]
self.ads = [] # prevent double purge
self.purge_ads_cnt += len(deletions)
for idx, caption in enumerate(self.captions):
caption.leader = str(idx + 1)
def delay_subs(self, delay_ms):
"""TBD"""
deletions = []
for idx, caption in enumerate(self.captions):
caption.beg_ms += delay_ms
caption.end_ms += delay_ms
if caption.beg_ms < 0:
if caption.end_ms >= 0:
caption.beg_ms = 0
self.delay_cnt += 1
else:
self._add_anomaly('lost frame (negative time):', caption)
deletions.insert(0, idx)
else:
self.delay_cnt += 1
for deletion in deletions:
del self.captions[deletion]
@staticmethod
def linear_regression(x, y, b_rnd=3, m_rnd=5):
"""Compute linear regression.
Returns: intercept, slope, goodness-of-fit, description
"""
# pylint: disable=invalid-name
N = min(len(x), len(y))
x_mean = statistics.mean(x) if x else 0
y_mean = statistics.mean(y) if y else 0
B1_num, B1_den, xy_sum, xx_sum, x_sum, yy_sum, y_sum = 0, 0, 0, 0, 0, 0, 0
for idx in range(N):
X, Y = x[idx], y[idx]
B1_num += ((X - x_mean) * (Y - y_mean))
B1_den += ((X - x_mean)**2)
xy_sum += X*Y
x_sum += X
y_sum += Y
xx_sum += X*X
yy_sum += Y*Y
B1 = B1_num / B1_den if B1_den else 0
B0 = y_mean - (B1*x_mean)
stdev, squares_sum = 0.0, 0.0
for idx in range(N):
X, Y = x[idx], y[idx]
Ycalc = B1*X + B0
squares_sum += (Y-Ycalc)**2
if N:
stdev = math.sqrt(squares_sum / N)
num = (N * xy_sum) - (x_sum * y_sum)
den = math.sqrt((N * xx_sum - x_sum**2) * (N * yy_sum - y_sum**2))
R = num / den if den > 0 else 1.0
lri = SimpleNamespace()
lri.intercept = round(B0, b_rnd)
lri.slope = round(B1, m_rnd)
lri.x_left = x[0] if x else 0
lri.x_right = x[-1] if x else 0
lri.y_left = round(B0 + B1*lri.x_left, b_rnd)
lri.y_right = round(B0 + B1*lri.x_right, b_rnd)
lri.stdev = round(stdev, b_rnd+1)
lri.R = round(R, 4)
lri.RR = round(R*R, 4)
lri.N = N
lri.squares_sum = squares_sum
lri.line = f'y = {lri.intercept} + {lri.slope:.5f}*x'
return lri
@staticmethod
def clear_text(cap):
"""Extract clear text from captions."""
rv = []
for line in cap.lines:
if '<' in line: # remove <b> </b> and the like
line = re.sub(r'<[^>]*>', '', line)
if r"{"'\\' in line: # remove {\an8}, {^G6} and the like
line = re.sub(r'{[^}]*}', '', line)
line = line.strip()
if line:
rv.append(line)
return rv
@staticmethod
def hhmmss_str(seconds):
"""TBD"""
seconds = int(round(seconds))
hrs = seconds // 3600
mins = (seconds // 60) % 60
secs = seconds % 60
rv = '{}{:02d}:{:02d}'.format('{}:'.format(hrs) if hrs else '', mins, secs)
return re.sub(r'^0', r'', rv)
def compare(self, orig_caplist, video_duration, do_lri=True):
"""Compare "original" captions to these captions.
Assumes:
- only time shifts and caption duration shifts are being done
- the original can have more frames and extras are at beginning
"""
# pylint: disable=too-many-locals,too-many-branches
def secs(millis):
return round(millis/1000, 3)
def skip_to(o_idx, o_caps, n_cap, limit):
n_clear_text = self.clear_text(n_cap)
for idx, o_cap in enumerate(o_caps[o_idx:]):
if self.clear_text(o_cap) == n_clear_text:
return o_idx + idx
if idx >= limit:
return None
return None
huge = 1000*1000*1000
min_offset, max_offset, sum_offset = huge, -huge, 0
min_delta, max_delta, sum_delta = huge, -huge, 0 # for caption durations
for caplist in (orig_caplist, self): # normalize the lists
caplist.detect_ads()
caplist.purge_ads()
lg.db('compare():', '#orig_caps:', len(orig_caplist.captions),
'#caps:', len(self.captions))
orig_caplist.repair() # gotta repair the older on for a sane comparison and stats
o_caps = orig_caplist.captions
n_caps = self.captions
lg.tr1('compare(): (after repair)', '#orig_caps:', len(orig_caplist.captions),
'#caps:', len(self.captions))
caption_cnt = len(n_caps)
delta_caption_cnt = len(o_caps) - len(n_caps)
unfound_caption_cnt = 0
found_caption_cnt = 0
removed_where = []
o_millis, offsets, deltas, oo_caps, nn_caps = [], [], [], [], []
skip_max = abs(delta_caption_cnt) + 10 # how far ahead to look
n_idx, o_idx = -1, -1
while True:
n_idx, o_idx = n_idx+1, o_idx+1
if n_idx >= len(n_caps):
if o_idx < len(o_caps):
print('REMOVED {} caps at END:'.format(len(o_caps) - o_idx))
removed_where.append('END')
while o_idx < len(o_caps):
print(' ', o_caps[o_idx])
o_idx += 1
break
n_cap, o_cap = n_caps[n_idx], o_caps[o_idx]
lg.tr9('compare(): n_cap', n_cap)
o_found_idx = skip_to(o_idx, o_caps, n_cap, skip_max)
if o_found_idx is None:
unfound_caption_cnt += 1
o_idx -= 1 # don't advance the old captions
print('UNFOUND:', n_cap, '\n at ocap:', o_cap)
continue
if o_idx < o_found_idx:
where = 'pos{}/{}'.format(n_idx+1, len(n_caps)) if n_idx else 'START'
removed_where.append(where)
print('REMOVED {} caps at {}:'.format(o_found_idx - o_idx, where))
while o_idx < o_found_idx:
print(' ', o_cap)
o_idx += 1
o_cap = o_caps[o_idx]
found_caption_cnt += 1
oo_caps.append(o_cap)
nn_caps.append(n_cap)
o_millis.append(o_cap.beg_ms)
offset = n_cap.beg_ms - o_cap.beg_ms
offsets.append(offset)
min_offset = min(min_offset, offset)
max_offset = max(max_offset, offset)
sum_offset += offset
delta = (n_cap.end_ms - n_cap.beg_ms) - (o_cap.end_ms - o_cap.beg_ms)
deltas.append(delta)
min_delta = min(min_delta, delta)
max_delta = max(max_delta, delta)
sum_delta += delta
lg.tr8('compare(): offset:', offset, min_offset, max_offset,
'delta:', delta, min_delta, max_delta,
'\n ', o_cap, '\n ', n_cap)
result = ''
if do_lri:
lri = self.linear_regression(o_millis, offsets, b_rnd=0) if offsets else None
result = self.make_compare_str(lri)
if offsets:
if self.code_option_score_lengths:
avg_delta = secs(round(statistics.mean(deltas)))
range_delta = secs(max(avg_delta - min_delta, max_delta - avg_delta))
if abs(avg_delta) >= 0.050 or range_delta >= 0.050:
result += ' lengths by: {}s'.format(avg_delta)
if range_delta >= 0.050:
result += '+-{}'.format(range_delta)
else:
anomaly_cnt = len(self.anomalies) - self.misnum_cnt
if orig_caplist:
anomaly_cnt += len(orig_caplist.anomalies) - orig_caplist.misnum_cnt
if anomaly_cnt:
result += ' w {} anomalies'.format(anomaly_cnt)
for idx, o_milli in enumerate(o_millis):
n_milli = o_milli + offsets[idx] # recompute new milli
linear_milli = lri.intercept + (1+lri.slope)*o_milli
delta_ms = int(round(n_milli - linear_milli))
lg.tr4('intercept:', int(round(lri.intercept)),
'slope:', round(lri.slope, 4),
'o_ms:', o_milli, 'n_ms:', n_milli,
'linear_ms', int(round(linear_milli)),
'delta_ms:', delta_ms)
if DEBUG:
print('offset:', n_milli, 'delta:', delta_ms,
'EXCEPTION' if abs(delta_ms) >= 10 else '',
'\n o:', oo_caps[idx], '\n n:', nn_caps[idx])
if unfound_caption_cnt:
result += ' w {} unmatched'.format(unfound_caption_cnt)
if delta_caption_cnt:
result += ' w {} removed{}'.format(delta_caption_cnt,
' at {}'.format(removed_where[0] if len(removed_where) == 1 else ''))
video_duration = int(round(video_duration))
if video_duration and n_caps:
last_caption_secs = n_caps[-1].end_ms / 1000
delta = int(round(video_duration - last_caption_secs)) # how much longer is video?
if delta < 0:
late_captions = [ cap for cap in n_caps if cap.end_ms/1000 > video_duration]
print('{} CAPTIONS BEYOND VIDEO END at {}:'.format(len(late_captions),
self.hhmmss_str(video_duration)))
for cap in late_captions:
print(' ', cap)
lg.tr2('compare(): delta:', delta, last_caption_secs, video_duration)
if delta < -5:
result += ' short by {}s'.format(-delta)
if delta > 180:
result += ' long by {}s'.format(delta)
result += ' of {} captions'.format(caption_cnt)
if self.purge_ads_cnt: # won't find any
result += ' -{} ads'.format(self.purge_ads_cnt)
return result
@staticmethod
def make_compare_str(lri):
"""TBD"""
result = ''
if lri:
lg.tr1('make_compare_str(): regression:', vars(lri))
stdev = round(lri.stdev/1000, 2) # stdev in seconds
result += ' dev {:0.2f}s'.format(stdev)
intercept_s = round(lri.intercept/1000, 1) # intercept_s in seconds
if intercept_s >= 0.1:
result += f' shift {intercept_s}s'
slope_pct = round(lri.slope*100, 2) # slope_pct in percent
if abs(slope_pct) >= .01:
result += f' rate {slope_pct:0.2f}%'
# fit = round(fit*100, 2) # fit now in percent
# if fit < 99.995:
# result += ' fit {:0.2f}%'.format(fit)
result += f' pts {lri.N}'
return result
class CaptionListAnalyzer(CaptionList):
"""This class enables comparing a CaptionList to a "Reference"
CaptionList (e.g., derived from speed-to-text automatically).
Normally, just call analyze() is called with the reference CapList.
"""
def __init__(self, srt_file):
super().__init__(srt_file)
self.xmcaps = None # list of matched caption objects
self.lri = None # linear regression of whole
self.formulas = None # formulas for correction to reference
self.point_cnt = 0 # should match len(self.xmcaps)
self.far_out_point_cnt = 0 # points with timing too dubious to use
self.verbosity = 0
def analyze(self, ref_caplist, video_duration, out_file=None,
verbosity=0, fallback_caplist=None):
"""Compare "reference" captions (conventionally with suffix .REF.srt) to these
captions PHRASE by PHRASE. How we do this:
- extract the "phrases" from the reference and studied subtitles
all the time of the reference subtitles
"""
def is_better(lri, nlri, min_deltadev=None):
if min_deltadev is None:
min_deltadev = lims.min_deltadev
if not lri:
return True
if not nlri:
return False
lg.tr8('is_better(): deltadev:', lri.stdev - nlri.stdev, min_deltadev)
lg.tr8('is_better(): deltaoffset:',
abs(nlri.intercept) - abs(lri.intercept),
lims.min_deltaoffset)
return bool(lri.stdev - nlri.stdev >= min_deltadev or
abs(lri.intercept) - abs(nlri.intercept)
>= lims.min_deltaoffset)
lims = self.subshop_params.sync_params
# pylint: disable=too-many-locals,too-many-branches
if out_file:
assert isinstance(out_file, str)
new_caplist = None
best_caplist, alt_caplist = self, None
decision = 'KEEP unadjusted subs'
devs = [-1, -1, -1] # for unadjusted, linear adj, rift adj
whynot = self.best_linear_fit(ref_caplist, 'unadjusted', verbosity)
best_lri = self.lri
devs[0] = int(round(best_lri.stdev)) if best_lri else 100000
if self.formulas and len(self.xmcaps) >= lims.min_ref_pts and (
abs(best_lri.intercept) >= lims.min_offset
or abs(best_lri.slope*100) >= lims.min_rate):
lg.pr(' <<<< Doing linear adjustment ... >>>>')
alt_caplist = CaptionListAnalyzer(copy.deepcopy(self.captions))
alt_caplist.replicate_xmcaps(self.xmcaps)
alt_caplist.adjust_by_formulas(self.formulas)
lg.tr3('whynot:', whynot, '#formulas:', len(alt_caplist.formulas)
if alt_caplist.formulas else None)
whynot = alt_caplist.best_rifts_fit(ref_caplist,
'linear-adjusted', verbosity)
lg.tr3('whynot:', whynot, '#formulas:', len(alt_caplist.formulas))
devs[1] = int(round(alt_caplist.lri.stdev)) if alt_caplist.lri else 100000
if is_better(best_lri, alt_caplist.lri, min_deltadev=20):
best_caplist, best_lri = alt_caplist, alt_caplist.lri
decision = 'PICK linear adjusted subs'
lg.tr2('#formulas:', len(alt_caplist.formulas))
rift_cnt = 0
if (alt_caplist and len(alt_caplist.formulas) > 1
and len(best_caplist.xmcaps) >= lims.min_ref_pts
and (abs(best_lri.intercept) >= lims.min_offset
or abs(best_lri.slope*100) >= lims.min_rate
or best_lri.stdev >= lims.min_dev)):
lg.pr(' <<<< Looking for rifts ... >>>>')
new_caplist = CaptionListAnalyzer(copy.deepcopy(alt_caplist.captions))
new_caplist.replicate_xmcaps(alt_caplist.xmcaps)
new_caplist.adjust_by_formulas(alt_caplist.formulas)
rift_cnt = len(alt_caplist.formulas) -1
whynot = new_caplist.best_linear_fit(ref_caplist,
'rift-adjusted', verbosity)
if whynot:
if best_caplist == self:
return whynot + ' [ADJUSTED SUBS]'
else:
devs[2] = int(round(new_caplist.lri.stdev))
if is_better(best_lri, new_caplist.lri):
best_caplist = new_caplist
decision = 'PICK rift adjusted subs'
elif best_caplist != self:
decision = 'PICK linear adjusted subs'
best_caplist.formulas = []
rift_cnt = 0
if best_caplist == self:
decision = 'KEEP unadjusted subs'
decision += f' {devs[0]}/{devs[1]}/{devs[2]}ms'
if fallback_caplist:
lg.pr(' <<<< Analyze fallback subs ... >>>>')
whynot = fallback_caplist.best_linear_fit(ref_caplist, 'unadjusted',
verbosity=-1)
best_lri, fb_lri = best_caplist.lri, fallback_caplist.lri
if not is_better(fb_lri, best_lri, min_deltadev=20):
if fb_lri and best_lri:
decision = 'KEEP fallback subs offs:{}/{}ms devs:{}/{}ms [fb/new]'.format(
int(fb_lri.intercept), int(best_lri.intercept),
int(fb_lri.stdev), int(best_lri.stdev))
elif fb_lri:
decision = 'KEEP fallback subs offs:{}ms devs:{}ms [fb]'.format(
int(fb_lri.intercept), int(fb_lri.stdev))
else:
decision = 'KEEP fallback subs'
best_caplist = fallback_caplist
rift_cnt = 0
if out_file:
lg.pr('=>', decision)
else:
decision = f'WOULD {decision}'
# print('recurse:', recurse, 'outf:', out_filename)
if best_caplist != fallback_caplist and out_file:
lg.db('compare(): writing:', out_file)
with open(out_file, 'w', encoding = 'utf-8', errors='ignore') as out:
outs = ''
for idx, caption in enumerate(best_caplist.captions):
outs += '\n' if idx else ''
outs += caption.to_str(idx+1)
wrcnt = out.write(outs)
# updated = True
lg.tr5('------> Wrote', wrcnt, 'bytes to', out_file)
result = best_caplist.make_resultstr(
None if best_caplist == fallback_caplist else self,
video_duration, rift_cnt)
rv = f'OK {result} [{decision}]'
# lg.pr('rv:', rv)
return rv
def make_resultstr(self, src_caplist, video_duration, rift_cnt):
"""TBD"""
result = self.make_compare_str(self.lri)
anomaly_cnt = len(self.anomalies)
if src_caplist and self != src_caplist:
anomaly_cnt += len(src_caplist.anomalies)
if anomaly_cnt > 0:
result += f' w {anomaly_cnt} anomalies'
if rift_cnt > 0:
result += f' w {rift_cnt} rifts'
return result
def best_linear_fit(self, ref_caplist, phase, verbosity=0):
"""Core analysis."""
self.lri, self.formulas = None, []
for caplist in (ref_caplist, self): # normalize the caption lists (e.g., remove ads)
caplist.detect_ads()
caplist.purge_ads()
caplist.repair(verbose=True,
title=f'=> Audit {phase if caplist == self else "reference"} subs')
xvals, yvals = self.make_xygrid(ref_caplist, verbosity)
if xvals and yvals:
lri = self.lri = self.linear_regression(xvals, yvals, b_rnd=0)
if verbosity >= 0:
self.print_lri(lri, prefix=f'=> Linear fit of {phase} to REF:')
else:
lri = None
lims = self.subshop_params.sync_params
lg.tr1('lims:', vars(lims))
whynot = None
if not lri:
whynot = f'cannot compute linear regression [pts={len(self.xmcaps)}]'
elif abs(lri.slope*100) > lims.max_rate:
whynot = f'rate-too-big(abs ({lri.slope*100})>{lims.max_rate})'
elif abs(lri.intercept) > lims.max_offset:
whynot = f'offset-too-big (abs({lri.intercept})>{lims.max_offset})'
elif lri.stdev > lims.max_dev:
whynot = f'dev-too-big ({lri.stdev}>{lims.max_dev})'
elif self.point_cnt < 100:
whynot = f'two-few-points({self.point_cnt}<{lims.min_ref_pts})'
if whynot: # too bad to try
return f'FAILED: analysis [{whynot}]'
if whynot:
return whynot # too awful to try adjustment
self._add_formula(0, len(xvals), self.lri)
return whynot
def _add_formula(self, xbot, xtop, lri):
ns = SimpleNamespace(**{'xbot': xbot, 'xtop': xtop, 'lri': lri})
lg.tr5('add formula:', vars(ns))
self.formulas.append(ns)
def best_rifts_fit(self, ref_caplist, phase, verbosity=0):
"""Find the best fit using rifts (i.e., multiple linear fits)."""
whynot = self.best_linear_fit(ref_caplist, phase, verbosity)
if whynot:
return whynot
self.formulas = []
self.find_breaks(nominal_slope=self.lri.slope)
return None
def make_xygrid(self, ref_caplist, verbosity):
"""
Verbosity: 1=lots, 0=little, -1=minimal
"""
xwords = self.make_wordlist()
ywords = self.make_wordlist(ref_caplist)
xwords_keys = self.make_phrase_keys(xwords)
self.correlate_xy(ywords, xwords_keys, xwords, verbosity)
self.purge_outliers()
if verbosity > 0:
if self.xmcaps:
self.dump_xmcaps()
else:
lg.pr('WARNING: NO MATCHED CAPS')
xvals, yvals = self.make_xyvals()
return xvals, yvals
def make_wordlist(self, ref_caplist=None):
"""TBD"""
if ref_caplist:
captions = ref_caplist.captions
else:
captions = self.captions
self.init_xmcaps() # sparse ... one-to-one with captions
words = []
max_word_ms = int(round((1000*60/100))) # 100 w/min min rate
min_word_ms = int(round((1000*60/160))) # 160 w/min max rate
fudge = 2 # how far out-of-expection the caption lenght can be
# pylint: disable=too-many-nested-blocks
for capno, caption in enumerate(captions):
# for mcap in self.xmcaps:
mcap = None if ref_caplist else self.xmcaps[capno]
lg.tr8('make_wordlist(): caption:', caption)
raw_words, cooked_words = ' '.join(self.clear_text(caption)).lower().split(), []
for idx, word in enumerate(raw_words):
word = re.sub('^[^a-z]*', '', word)
word = re.sub('[^a-z]*$', '', word)
if word:
cooked_words.append(word)
if not cooked_words:
continue
ms_per_word = (caption.end_ms - caption.beg_ms) / len(cooked_words)
if ms_per_word < min_word_ms/fudge or ms_per_word > max_word_ms*2:
continue
ms_per_word = min(ms_per_word, max_word_ms)
ms_per_word = max(ms_per_word, min_word_ms)
lg.tr9('make_wordlist(): ms_per_word', ms_per_word)
for idx, cooked_word in enumerate(cooked_words):
word = SimpleNamespace()
word.word = cooked_word
word.mcap = mcap
word.pos = idx
word.ms = int(round(caption.beg_ms + idx * ms_per_word))
words.append(word)
# if False:
# print('WORDSET:')
# for idx, word in enumerate(words):
# if True or idx < 10 or idx >= len(words) - 10:
# print('word:', idx, vars(word))
return words
@staticmethod
def get_phrase_words(words, wordno):
"""TBD"""
max_phrase_words = 16
phrase_words = [words[wordno]]
for idx in range(wordno+1, min(wordno+max_phrase_words, len(words))):
if words[idx].ms - words[idx-1].ms > 1000:
break
phrase_words.append(words[idx])
return phrase_words
def make_phrase_keys(self, words):
"""TDB"""
tune = self.subshop_params.phrase_params
rv = {}
for idx in range(len(words)):
phrase_words = self.get_phrase_words(words, idx)
for cnt in range(1, len(phrase_words)):
phrase = ' '.join([w.word for w in phrase_words[0:cnt]])
max_word_len = max([len(w.word) for w in phrase_words[0:cnt]])
# - min phrase length is 8
# - min phrase length is 10 with one 5-letter word
# - if multiple hits, we 'None' it out to indicate multiple
# if len(phrase) >= 10 and max_word_len >= 5:
if len(phrase) >= tune.min_str_len and max_word_len >= tune.min_word_len:
wordno = rv.get(phrase, 'unfound')
rv[phrase] = idx if isinstance(wordno, str) else None
if False: # for initial debugging
ambig_cnt = 0
for phrase, wordno in rv.items():
if wordno is None:
lg.pr('phrase:', phrase, ':: [ambigous] multiple hits')
ambig_cnt += 1
print('#keys:', len(rv), '#ambiguous:', ambig_cnt)
return rv
def correlate_xy(self, ywords, xwords_keys, xwords, verbosity):
"""TBD"""
skip = 0
matched_cnt = 0
far_out_cnt = 0
far_out_max = 10 # limit how far from begining of subtitle
far_out_capnos = set() # the captions NOT matched due to too far out
matched_capnos = set() # the captions NOT matched due to too far out
for idx in range(len(ywords)):
if skip > 0:
skip -= 1
continue
# get a list of words at idx of limited size and believed
# to be separated by less than a second or so
phrase_words = self.get_phrase_words(ywords, idx)
for cnt in range(len(phrase_words)-1, 0, -1):
phrase = ' '.join([w.word for w in phrase_words[0:cnt]])
xwordno = xwords_keys.get(phrase, None)
if not xwordno:
# lg.tr9('correlate_xy(): FAILED lookup:', phrase)
continue
for widx in range(cnt):
xword = xwords[xwordno + widx]
if xword.mcap.capno in matched_capnos:
continue # don't match same caption twice
if xword.mcap.capno in far_out_capnos:
continue # don't reject same caption twice
yword = ywords[idx + widx]
if xword.pos + yword.pos > far_out_max:
far_out_capnos.add(xword.mcap.capno)
continue
if not xword.mcap.matches:
matched_cnt += 1
matched_capnos.add(xword.mcap.capno)
xsubphrase = [xwd for xwd in xwords[widx:cnt]
if xwd.mcap.capno == xword.mcap.capno]
xword.mcap.matches.append(SimpleNamespace(**{'phrase': phrase,
'delta_ms': yword.ms - xword.ms,
'ypos': yword.pos, 'xpos': xword.pos,
'xlen': len(xsubphrase)}))
if xword.pos + yword.pos > far_out_max:
far_out_capnos.add(xword.mcap.capno)
continue
skip = cnt - 1
break # if longest sub-phrase is consumed, done
far_out_cnt = len(far_out_capnos - matched_capnos)
self.far_out_point_cnt = far_out_cnt
self.point_cnt = matched_cnt
if verbosity >= 1:
print('\n=> Matched', matched_cnt, 'xcaps of', len(self.xmcaps),
'less', far_out_cnt, 'questionable')
def dump_xmcaps(self):
"""TBD"""
# for idx in range(min(1000, len(xmcaps))):
for mcap in self.xmcaps:
output = 'xcap:{}\n'.format(str(mcap.caption))
for match in mcap.matches:
output += ' {:5.3f}s [x{}y{}] {}\n'.format(match.delta_ms/1000,
match.xpos, match.ypos, match.phrase)
lg.pr(output)
def replicate_xmcaps(self, o_xmcaps):
"""TBD"""
self.xmcaps = [SimpleNamespace(**{'caption': cap, 'capno': idx, 'matches': []})
for idx, cap in enumerate(self.captions)]
for omcap in o_xmcaps:
mcap = self.xmcaps[omcap.capno] # works because now one-to-one with captions
for match in omcap.matches:
mcap.matches.append(SimpleNamespace(**vars(match))) # deep copy does not work
self.squeeze_xmcaps()
def init_xmcaps(self):
"""Create a 'sparse' list of 'mcaps' from the captions; mcaps have both
caption info and correlation/match information."""
self.xmcaps = [SimpleNamespace(**{'caption': cap, 'capno': idx, 'matches': []})
for idx, cap in enumerate(self.captions)]
def squeeze_xmcaps(self):
"""TBD"""
xmcaps = [p for p in self.xmcaps if p.matches]
# ocnt = len(self.xmcaps)
# if False and ocnt > len(xmcaps):
# lg.pr('SQUEEZED xmcaps from', ocnt, 'to', len(xmcaps))
self.xmcaps = xmcaps
def make_xyvals(self, bot=None, top=None):
"""TBD"""
xvals, yvals = [], []
bot = 0 if bot is None else bot
top = len(self.xmcaps) if top is None else top
for mcap in self.xmcaps:
for match in mcap.matches:
xvals.append(mcap.caption.beg_ms)
yvals.append(match.delta_ms)
return xvals, yvals
def purge_outliers(self):
"""TBD"""
self.squeeze_xmcaps()
for dist in (5, 4, 3, 2):
self.remove_unordered(dist)
outlier_cnt = 1 # prime the pump
while outlier_cnt:
xvals, yvals = self.make_xyvals()
lri = self.linear_regression(xvals, yvals, b_rnd=0)
lg.tr1('1st linear regression:', vars(lri))
outlier_cnt = 0
for mcap in self.xmcaps:
ok_matches = []
for match in mcap.matches:
y_calc = lri.intercept + lri.slope * mcap.caption.beg_ms
if abs(match.delta_ms - y_calc) < 3 * lri.stdev:
ok_matches.append(match)
else:
outlier_cnt += 1
mcap.matches = ok_matches
lg.tr1('OUTLIER_CNT:', outlier_cnt)
self.squeeze_xmcaps()
# NOW, reduce matches to just the best match
for mcap in self.xmcaps:
if len(mcap.matches) < 2:
continue
over30s = [m for m in mcap.matches if len(m.phrase) >= 30]
closest_val, closest_match = 10000, None
candidates = over30s if over30s else mcap.matches
for match in candidates:
if closest_val > match.xpos + match.ypos:
closest_val = match.xpos + match.ypos
closest_match = match
mcap.matches = [closest_match]
def remove_unordered(self, dist):
"""Only for dist >= 2
TODO: check these ranges
"""
xmcaps = self.xmcaps
for idx in range(len(xmcaps)-1, dist-1, -1):
mcap = xmcaps[idx]
ok_matches = []
for match in mcap.matches:
cap_ms = mcap.caption.beg_ms + match.delta_ms
for oidx in range(1, dist+1):
omcap = xmcaps[idx-oidx]
ocaption, omatches = omcap.caption, omcap.matches
ocap_ms_min = ocaption.beg_ms + min([m.delta_ms for m in omatches])
if cap_ms > ocap_ms_min:
ok_matches.append(match)
break
mcap.matches = ok_matches
self.xmcaps = xmcaps
self.squeeze_xmcaps()
xmcaps = self.xmcaps
for idx in range(len(xmcaps) - dist):
mcap = xmcaps[idx]
ok_matches = []
for match in mcap.matches:
cap_ms = mcap.caption.beg_ms + match.delta_ms
for oidx in range(dist):
omcap = xmcaps[idx+oidx]
ocaption, omatches = omcap.caption, omcap.matches
ocap_ms_max = ocaption.beg_ms + max([m.delta_ms for m in omatches])
if cap_ms < ocap_ms_max:
ok_matches.append(match)
break
mcap.matches = ok_matches
self.xmcaps = xmcaps
self.squeeze_xmcaps()
def find_breaks(self, nominal_slope):
"""TBD"""
def best_break(self, xvals, yvals, bot, top, lri):
if lri.slope < 0:
yvals = [-y_ms for y_ms in yvals]
cur_bot, cur_top = bot, top
best_value, best_mid, gap = None, 0, None # no best gap < 0 will be acceptable
border_wid = (cur_top - cur_bot) // tune.border_div
floor = bot + border_wid
ceiling = top - border_wid
for mid in range(floor, ceiling):
left = max(min(mid - tune.pref_pts, bot), 0)
right = min(max(mid + tune.pref_pts, bot), len(xvals))
if mid-left < tune.min_pts or right-mid < tune.min_pts:
continue
l_lri = self.linear_regression(xvals[bot:mid], yvals[bot:mid], b_rnd=0)
if abs(l_lri.slope - nominal_slope) > tune.max_slope_delta:
continue
r_lri = self.linear_regression(xvals[mid:top], yvals[mid:top], b_rnd=0)
if abs(r_lri.slope - nominal_slope) > tune.max_slope_delta:
continue
value = math.sqrt((l_lri.squares_sum + r_lri.squares_sum)/(l_lri.N + r_lri.N))
if best_value is None or value < best_value:
best_value, best_mid = value, mid
if best_value:
mid = best_mid
l_lri = self.linear_regression(xvals[bot:mid], yvals[bot:mid], b_rnd=0)
r_lri = self.linear_regression(xvals[mid:top], yvals[mid:top], b_rnd=0)
joint_stdev = math.sqrt((l_lri.squares_sum + r_lri.squares_sum)
/(l_lri.N + r_lri.N))
if DBdumpLrAns or DEBUG:
self.print_lri(lri, indent=10)
self.print_lri(l_lri, indent=10)
self.print_lri(r_lri, indent=10)
y_range = abs(lri.y_left - lri.y_right)
if joint_stdev >= lri.stdev * tune.min_dev_frac:
if DBdumpLrAns or DEBUG:
print('NOPE: did not reduce std deviation enough [{:.1f}%]'.format(
round(joint_stdev/lri.stdev)*100, 1))
if (l_lri.stdev > lri.stdev * tune.max_dev_frac
or r_lri.stdev > lri.stdev * tune.max_dev_frac):
if DBdumpLrAns or DEBUG:
print('NOPE: left/right brk dev too big [{.0f}%, {:.0f}]'.format(
100*l_lri.stdev/lri.stdev, 100*r_lri.stdev/lri.stdev))
best_value = None
elif abs(l_lri.slope - r_lri.slope) > tune.max_parallel_delta:
if DBdumpLrAns or DEBUG:
print('NOPE: slopes not parallel')
best_value = None
else:
if DBdumpLrAns or DEBUG:
print('NOPE: best_value not found', best_value, '/', y_range)
best_value = None
if best_value is not None:
if DBdumpLrAns or DEBUG:
print('YES: gap works', best_value, '/', y_range,
'[', bot, best_mid, top, ']')
x_ms = (xvals[best_mid-1] + xvals[best_mid])/2
lval = l_lri.intercept + l_lri.slope * x_ms
rval = r_lri.intercept + r_lri.slope * x_ms
gap = int(round(lval - rval))
if False:
print('found gap:', gap, 'at:', self.hhmmss_str(x_ms/1000),
'\n ', self.xmcaps[best_mid-1].caption,
'\n ', self.xmcaps[best_mid].caption,
'\n ', mid
)
return best_value, lri.stdev, best_mid, gap
tune = self.subshop_params.rift_params
# print('tune:', vars(tune))
xvals, yvals = self.make_xyvals()
gap_positions = [(0, 0, 0, 0)]
# ans_whole = self.linear_regression(xvals, yvals, b_rnd=0)
# self.print_lri(ans_whole, prefix='=== LINEAR FIT (WHOLE):')
break_cnt_hint = 1 + int(round(self.captions[-1].end_ms
/ ( tune.trial_mins*60*1000)))
break_cnt_hint = max(tune.min_trial_segs, break_cnt_hint)
section_len = (len(yvals) + break_cnt_hint - 1) // break_cnt_hint
bot, top = 0, section_len
while True:
if DBdumpLrAns or DEBUG:
print('bot:', bot, 'top:', top)
lri = self.linear_regression(xvals[bot:top], yvals[bot:top], b_rnd=0)
if abs(lri.y_right - lri.y_left) < 300: # don't bother trying
stdev, o_stdev, pos, gap = 0, 0, 0, None
else:
stdev, o_stdev, pos, gap = best_break(self, xvals, yvals, bot, top, lri)
# compute next bot
bot = bot + int(round(section_len
* (tune.border_div-3)/tune.border_div))
if stdev:
gap_positions.append((pos, gap, round(stdev, 0), round(o_stdev, 0)))
lg.tr1('gaps:', gap_positions[-1])
bot = max(bot, pos+1) # don't let next bot precede break
top = min(len(yvals), bot + section_len)
if top - bot < section_len / 2:
break
gap_positions.append((len(yvals), 0, 0, 0))
if DEBUG:
print('gaps:', gap_positions)
points, squares_sum = 0, 0
for idx, gapinfo in enumerate(gap_positions):
gap, _, _, _ = gapinfo
if idx < len(gap_positions) - 1:
next_gap = gap_positions[idx+1][0]
bot, top = gap, next_gap
lri = self.linear_regression(xvals[bot:top], yvals[bot:top], b_rnd=0)
# self.print_lri(lri, prefix='SEGMENT:')
points += lri.N
squares_sum += lri.squares_sum
self._add_formula(bot, top, lri)
# if False and len(gap_positions) > 2:
# print('#segments:', len(gap_positions)-1, 'stdev:',
# int(round(math.sqrt(squares_sum/points))))
def adjust_by_formulas(self, formulas):
"""TBD"""
def adjust_ms(millis, lri):
return int(round(millis + lri.intercept + lri.slope*millis))
def pick_best_rift1(bot, top):
nonlocal self
DB = False
if DB:
lg.pr('pick_best_rift:', bot, top)
best_gap_ms, best_capno = -100000, bot
ocaption = self.captions[bot] # advance for non-trite captions only
for idx in range(bot, top):
caption = self.captions[idx+1]
gap_ms = caption.beg_ms - ocaption.end_ms
if DB:
star = '*' if gap_ms > best_gap_ms else ' '
lg.pr(f'{star} {gap_ms}ms #{idx} {caption}')
if gap_ms > best_gap_ms:
best_gap_ms, best_capno = gap_ms, idx+1
for line in caption.lines:
if re.search(r'[a-zA-Z]{2,}', line):
ocaption = caption
return best_capno
# firstly, find the nominial rifts as caption numbers
rifts = [0] * len(formulas)
rifts[-1] = len(self.captions)
for idx in range(0, len(formulas)-1):
formula, nformula = formulas[idx], formulas[idx+1]
low = self.xmcaps[formula.xtop-1].capno
high = self.xmcaps[nformula.xbot].capno
rifts[idx] = pick_best_rift1(low, high)
# finally, adjust the captions per the formula while honoring the rifts
rift_idx, formula = 0, formulas[0]
self.print_lri(formula.lri, prefix='=> ')
for idx, caption in enumerate(self.captions):
if idx >= rifts[rift_idx]:
rift_idx += 1
formula, oformula = formulas[rift_idx], formulas[rift_idx-1]
lri, olri = formula.lri, oformula.lri
delta_ms = (adjust_ms(caption.beg_ms, lri)
- adjust_ms(caption.beg_ms, olri))
caption.beg_ms = adjust_ms(caption.beg_ms, lri)
caption.end_ms = adjust_ms(caption.end_ms, lri)
cap_str = caption.mini_str()
lg.pr(f'=> {cap_str} <<{"="*max(0,58-len(cap_str))} {delta_ms}ms rift')
self.print_lri(lri, prefix='=> ')
else:
lri = formula.lri
caption.beg_ms = adjust_ms(caption.beg_ms, lri)
caption.end_ms = adjust_ms(caption.end_ms, lri)
def print_lri(self, lri, indent=0, prefix=None):
"""Print a summary of the linear regression result."""
print(' '*indent + (prefix + ' ' if prefix else ''),
'[{} to {}]'. format(
self.hhmmss_str(lri.x_left/1000),
self.hhmmss_str(lri.x_right/1000)),
# int(round(lri.stdev)), 'y-l/r:',
# int(round(lri.y_left)), int(round(lri.y_right)),
# int(round(lri.intercept)),
# '+m:', f'{lri.slope:.5f}',
lri.line,
'dev:', int(round(lri.stdev)),
# int(round(lri.intercept)),
'pts:', lri.N
)
class SubFixer:
"""Handler for scrubbing one SRT file at a time"""
def __init__(self, opts):
"""Accepts the namespace created by ArgumentParser OR the list
of command line arguments to give to ArgumentParser."""
if isinstance(opts, list):
opts = self.parse_args(opts)
self.opts = opts
self.caplist = None
@staticmethod
def parse_args(args=None):
"""Argument parsing."""
parser = argparse.ArgumentParser(
description = 'Scrub SRT files with optional offset.')
parser.add_argument('-O', '--no-overwrite', action = "store_false", dest='overwrite',
help = "do NOT overwite original file leaving temp file")
parser.add_argument('-f', '--force', action = "store_true",
help = "write temp file even if unchanged")
parser.add_argument('-i', '--interactive', action = "store_true",
help = "prompt whether to remove ads, etc")
parser.add_argument('-T', '--temp-file', help="specify name of temp file")
parser.add_argument('-v', '--verbose', action = "store_true", default = False,
help = "choose whether show details")
parser.add_argument('-V', '--log-level', choices=lg.choices,
default='INFO', help='set logging/verbosity level [dflt=INFO]')
parser.add_argument('--compare', action="store_true",
help='compare orig_srt_file to synced_srt_file only')
parser.add_argument('--analyze', action="store_true",
help='word-by-word analysis of reference .srt to synced_srt_file only')
parser.add_argument('-d', '--duration', type=float, default=None,
help="specify video duration in seconds")
parser.add_argument('srt_files', nargs='+', help='list pairs of delay and SRT file')
return parser.parse_args(args)
def do_one_file(self, delay_ms, srt_file, make_analyzer=False):
"""TBD"""
lg.tr4(delay_ms, srt_file)
if not srt_file.lower().endswith('.srt') or not os.path.isfile(srt_file):
lg.err('invalid srt file:', srt_file)
return False
if self.opts.compare or self.opts.analyze:
out_filename = '/dev/null'
else:
out_filename = (self.opts.temp_file if self.opts.temp_file
else os.path.splitext(srt_file)[0] + '.TEMP.srt')
updated = False
with open(out_filename, 'w', encoding = 'utf-8', errors='ignore') as out:
with open(srt_file, 'r', encoding = 'utf-8', errors='ignore') as srt:
self.caplist = (CaptionListAnalyzer(srt) if make_analyzer
else CaptionList(srt))
if self.opts.compare or self.opts.analyze:
return False
self.caplist.repair(verbose=self.opts.verbose)
if delay_ms:
lg.db(f'delay_subs({delay_ms})')
self.caplist.delay_subs(delay_ms)
lg.tr9('detecting ads...')
self.caplist.detect_ads()
if self.caplist.ads:
lg.pr('\n------> Will remove these ads:')
for pair in self.caplist.ads:
regex, idx = pair
lg.pr(' [{}] {}'.format(regex, str(self.caplist.captions[idx])))
if self.opts.interactive:
lg.pr('------> OK? ', end='')
sys.stdout.flush()
response = getch().lower()
else:
response = 'y'
if response == 'y':
self.caplist.purge_ads()
if self.caplist.is_updated() or self.opts.force:
outs = ''
for idx, caption in enumerate(self.caplist.captions):
outs += '\n' if idx else ''
outs += caption.to_str(idx+1)
rv = out.write(outs)
updated = True
lg.pr('------> Wrote', rv, 'bytes')
else:
lg.pr('------> NO changes')
if updated:
if self.opts.overwrite:
shutil.move(out_filename, srt_file)
return True
else:
if self.opts.overwrite:
os.unlink(out_filename)
return False
def grep_one_file(self, srt_file, pattern=None, use_config_pats=False):
"""Run the grep function on one SRT file."""
if not pattern and not use_config_pats:
use_config_pats = True
with open(srt_file, 'r', encoding='utf-8', errors='ignore') as srt:
caplist = CaptionList(srt)
caplist.detect_ads(pattern=pattern, use_config_pats=use_config_pats)
if caplist.ads or self.opts.verbose:
print('\n=>', os.path.basename(srt_file),
"IN", os.path.dirname(srt_file))
for regex, idx in caplist.ads:
lg.pr(' [{}] {}'.format(regex,
caplist.captions[idx].delta_str(caplist)))
if caplist.ads and self.opts.force:
self.do_one_file(delay_ms=0, srt_file=srt_file)
return bool(caplist.ads)
def runner(argv):
"""
SubFixer.py [H] - fixes subtitle errors (e.g., overlaps), removes ads,
and syncs against a reference. Normally, use 'subshop ...' to implicitly
use this where needed. The runner() provides access to the primitive tool.
"""
opts = SubFixer.parse_args(argv)
fixer = SubFixer(opts)
lg.setup(level=opts.log_level)
delay_ms = 0
orig_caplist = None # any subs for compare() or reference subs for analyze()
for token in opts.srt_files:
if re.match(r'[\-\+]?\d+(|\.\d+)$', token):
offset = float(token)
if -50.0 < offset <= 50.0:
delay_ms = int(round(offset * 1000))
else:
delay_ms = int(round(offset))
else:
fixer.do_one_file(delay_ms, token,
make_analyzer=bool(orig_caplist and opts.analyze))
if opts.compare and orig_caplist:
compare_str = fixer.caplist.compare(orig_caplist, opts.duration)
lg.pr(compare_str)
sys.exit(0)
elif opts.analyze and orig_caplist:
compare_str = fixer.caplist.analyze(orig_caplist,
opts.duration, opts.temp_file,
verbosity=1 if opts.verbose else 0)
lg.pr(compare_str)
sys.exit(0)
elif opts.compare or opts.analyze:
orig_caplist = fixer.caplist
if opts.compare or opts.analyze:
lg.pr('Usage error: must provide {reference_srt_file} and {synced_srt_file}\n')
```
#### File: joedefen/subshop/setup.py
```python
import io
import os
from setuptools import setup
def read(file_name):
"""Read a text file and return the content as a string."""
pathname = os.path.join(os.path.dirname(__file__), file_name)
with io.open(pathname, encoding="utf-8") as fh:
return fh.read()
setup(
name='subshop',
version='0.1.3',
license='MIT',
description='Tools to download, clean, and synchronize subtitles',
long_description=read('README.md'),
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/joedefen/subshop',
download_url='https://github.com/joedefen/subshop/releases/tag/v0.1.3',
scripts=['subshop', 'video2srt', 'subshop-sys-deps'],
packages=['LibSub', 'LibGen'],
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
'Intended Audience :: End Users/Desktop',
],
install_requires=['requests', 'PlexAPI',
'Send2Trash', 'ruamel.yaml', 'pysigset', 'vosk']
)
``` |
{
"source": "joedenis/quantstats",
"score": 3
} |
#### File: quantstats/samples/all_praescire_ohlc.py
```python
import pandas as pd
from pathlib import Path
def read_excel_file(path=Path.home().joinpath('Dropbox', 'praescire_database', 'old_databases' )):
"""
reading in the ohlc from excel and saving as csv
"""
all_data = pd.read_excel(path / "all_years.xlsx", sheet_name=1)
all_data['Volume'] = 0
all_data['Adj Close'] = all_data['Close']
print(all_data)
all_data.to_csv(Path("/home/joe/PycharmProjects/quantstats_git/quantstats/data/all_praescire.csv"), index=False)
if __name__ == "__main__":
read_excel_file()
``` |
{
"source": "joedight/SourceIO",
"score": 3
} |
#### File: bsp/entities/base_entity_classes.py
```python
def parse_source_value(value):
if type(value) is str:
return float(value) if '.' in value else int(value)
else:
return value
def parse_int_vector(string):
return [parse_source_value(val) for val in string.replace(' ', ' ').split(' ')]
def parse_float_vector(string):
return [float(val) for val in string.replace(' ', ' ').split(' ')]
class Base:
hammer_id_counter = 0
def __init__(self):
self.hammer_id = 0
self.class_name = 'ANY'
@classmethod
def new_hammer_id(cls):
new_id = cls.hammer_id_counter
cls.hammer_id_counter += 1
return new_id
@staticmethod
def from_dict(instance, entity_data: dict):
if 'hammerid' in entity_data:
instance.hammer_id = int(entity_data.get('hammerid'))
else: # Titanfall
instance.hammer_id = Base.new_hammer_id()
instance.class_name = entity_data.get('classname')
class Angles(Base):
def __init__(self):
super().__init__()
self.angles = [0.0, 0.0, 0.0] # Type: angle
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.angles = parse_float_vector(entity_data.get('angles', "0 0 0")) # Type: angle
class Origin(Base):
def __init__(self):
super().__init__()
self.origin = None # Type: origin
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0")) # Type: origin
class Studiomodel(Base):
def __init__(self):
super().__init__()
self.model = None # Type: studio
self.skin = None # Type: integer
self.modelscale = 1.0 # Type: float
self.disableshadows = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.model = entity_data.get('model', None) # Type: studio
instance.skin = parse_source_value(entity_data.get('skin', 0)) # Type: integer
instance.modelscale = float(entity_data.get('modelscale', 1.0)) # Type: float
instance.disableshadows = entity_data.get('disableshadows', None) # Type: choices
class BasePlat(Base):
def __init__(self):
super().__init__()
pass
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
class Targetname(Base):
def __init__(self):
super().__init__()
self.targetname = None # Type: target_source
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.targetname = entity_data.get('targetname', None) # Type: target_source
class Parentname(Base):
def __init__(self):
super().__init__()
self.parentname = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.parentname = entity_data.get('parentname', None) # Type: target_destination
class BaseBrush(Base):
def __init__(self):
super().__init__()
pass
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
class EnableDisable(Base):
def __init__(self):
super().__init__()
self.StartDisabled = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.StartDisabled = entity_data.get('startdisabled', None) # Type: choices
class RenderFxChoices(Base):
def __init__(self):
super().__init__()
self.renderfx = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.renderfx = entity_data.get('renderfx', None) # Type: choices
class Shadow(Base):
def __init__(self):
super().__init__()
self.disableshadows = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.disableshadows = entity_data.get('disableshadows', None) # Type: choices
class RenderFields(RenderFxChoices):
def __init__(self):
super(RenderFxChoices).__init__()
self.rendermode = None # Type: choices
self.renderamt = 255 # Type: integer
self.rendercolor = [255, 255, 255] # Type: color255
self.disablereceiveshadows = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
RenderFxChoices.from_dict(instance, entity_data)
instance.rendermode = entity_data.get('rendermode', None) # Type: choices
instance.renderamt = parse_source_value(entity_data.get('renderamt', 255)) # Type: integer
instance.rendercolor = parse_int_vector(entity_data.get('rendercolor', "255 255 255")) # Type: color255
instance.disablereceiveshadows = entity_data.get('disablereceiveshadows', None) # Type: choices
class DXLevelChoice(Base):
def __init__(self):
super().__init__()
self.mindxlevel = None # Type: choices
self.maxdxlevel = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.mindxlevel = entity_data.get('mindxlevel', None) # Type: choices
instance.maxdxlevel = entity_data.get('maxdxlevel', None) # Type: choices
class Inputfilter(Base):
def __init__(self):
super().__init__()
self.InputFilter = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.InputFilter = entity_data.get('inputfilter', None) # Type: choices
class Global(Base):
def __init__(self):
super().__init__()
self.globalname = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.globalname = entity_data.get('globalname', None) # Type: string
class EnvGlobal(Targetname):
def __init__(self):
super(Targetname).__init__()
self.initialstate = None # Type: choices
self.counter = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.initialstate = entity_data.get('initialstate', None) # Type: choices
instance.counter = parse_source_value(entity_data.get('counter', 0)) # Type: integer
class DamageFilter(Base):
def __init__(self):
super().__init__()
self.damagefilter = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.damagefilter = entity_data.get('damagefilter', None) # Type: target_destination
class ResponseContext(Base):
def __init__(self):
super().__init__()
self.ResponseContext = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.ResponseContext = entity_data.get('responsecontext', None) # Type: string
class Breakable(Targetname, DamageFilter, Shadow):
def __init__(self):
super(Targetname).__init__()
super(DamageFilter).__init__()
super(Shadow).__init__()
self.ExplodeDamage = None # Type: float
self.ExplodeRadius = None # Type: float
self.PerformanceMode = None # Type: choices
self.BreakModelMessage = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
DamageFilter.from_dict(instance, entity_data)
Shadow.from_dict(instance, entity_data)
instance.ExplodeDamage = float(entity_data.get('explodedamage', 0)) # Type: float
instance.ExplodeRadius = float(entity_data.get('exploderadius', 0)) # Type: float
instance.PerformanceMode = entity_data.get('performancemode', None) # Type: choices
instance.BreakModelMessage = entity_data.get('breakmodelmessage', None) # Type: string
class BreakableBrush(Parentname, Breakable, Global):
def __init__(self):
super(Breakable).__init__()
super(Parentname).__init__()
super(Global).__init__()
self.propdata = None # Type: choices
self.health = 1 # Type: integer
self.material = None # Type: choices
self.explosion = None # Type: choices
self.gibdir = [0.0, 0.0, 0.0] # Type: angle
self.nodamageforces = None # Type: choices
self.gibmodel = None # Type: string
self.spawnobject = None # Type: choices
self.explodemagnitude = None # Type: integer
self.pressuredelay = None # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Parentname.from_dict(instance, entity_data)
Breakable.from_dict(instance, entity_data)
Global.from_dict(instance, entity_data)
instance.propdata = entity_data.get('propdata', None) # Type: choices
instance.health = parse_source_value(entity_data.get('health', 1)) # Type: integer
instance.material = entity_data.get('material', None) # Type: choices
instance.explosion = entity_data.get('explosion', None) # Type: choices
instance.gibdir = parse_float_vector(entity_data.get('gibdir', "0 0 0")) # Type: angle
instance.nodamageforces = entity_data.get('nodamageforces', None) # Type: choices
instance.gibmodel = entity_data.get('gibmodel', None) # Type: string
instance.spawnobject = entity_data.get('spawnobject', None) # Type: choices
instance.explodemagnitude = parse_source_value(entity_data.get('explodemagnitude', 0)) # Type: integer
instance.pressuredelay = float(entity_data.get('pressuredelay', 0)) # Type: float
class BreakableProp(Breakable):
def __init__(self):
super(Breakable).__init__()
self.pressuredelay = None # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Breakable.from_dict(instance, entity_data)
instance.pressuredelay = float(entity_data.get('pressuredelay', 0)) # Type: float
class BaseNPC(DamageFilter, Angles, Shadow, RenderFields, ResponseContext, Targetname):
def __init__(self):
super(RenderFields).__init__()
super(DamageFilter).__init__()
super(Angles).__init__()
super(Shadow).__init__()
super(ResponseContext).__init__()
super(Targetname).__init__()
self.target = None # Type: target_destination
self.squadname = None # Type: string
self.hintgroup = None # Type: string
self.hintlimiting = None # Type: choices
self.sleepstate = None # Type: choices
self.wakeradius = None # Type: float
self.wakesquad = None # Type: choices
self.enemyfilter = None # Type: target_destination
self.ignoreunseenenemies = None # Type: choices
self.physdamagescale = 1.0 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
DamageFilter.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
Shadow.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
ResponseContext.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.target = entity_data.get('target', None) # Type: target_destination
instance.squadname = entity_data.get('squadname', None) # Type: string
instance.hintgroup = entity_data.get('hintgroup', None) # Type: string
instance.hintlimiting = entity_data.get('hintlimiting', None) # Type: choices
instance.sleepstate = entity_data.get('sleepstate', None) # Type: choices
instance.wakeradius = float(entity_data.get('wakeradius', 0)) # Type: float
instance.wakesquad = entity_data.get('wakesquad', None) # Type: choices
instance.enemyfilter = entity_data.get('enemyfilter', None) # Type: target_destination
instance.ignoreunseenenemies = entity_data.get('ignoreunseenenemies', None) # Type: choices
instance.physdamagescale = float(entity_data.get('physdamagescale', 1.0)) # Type: float
class info_npc_spawn_destination(Targetname, Parentname, Angles):
icon_sprite = "editor/info_target.vmt"
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.ReuseDelay = 1 # Type: float
self.RenameNPC = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.ReuseDelay = float(entity_data.get('reusedelay', 1)) # Type: float
instance.RenameNPC = entity_data.get('renamenpc', None) # Type: string
class BaseNPCMaker(Targetname, EnableDisable, Angles):
icon_sprite = "editor/npc_maker.vmt"
def __init__(self):
super(Targetname).__init__()
super(EnableDisable).__init__()
super(Angles).__init__()
self.MaxNPCCount = 1 # Type: integer
self.SpawnFrequency = "5" # Type: string
self.MaxLiveChildren = 5 # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.MaxNPCCount = parse_source_value(entity_data.get('maxnpccount', 1)) # Type: integer
instance.SpawnFrequency = entity_data.get('spawnfrequency', "5") # Type: string
instance.MaxLiveChildren = parse_source_value(entity_data.get('maxlivechildren', 5)) # Type: integer
class npc_template_maker(BaseNPCMaker):
icon_sprite = "editor/npc_maker.vmt"
def __init__(self):
super(BaseNPCMaker).__init__()
self.origin = [0, 0, 0]
self.TemplateName = None # Type: target_destination
self.Radius = 256 # Type: float
self.DestinationGroup = None # Type: target_destination
self.CriterionVisibility = "CHOICES NOT SUPPORTED" # Type: choices
self.CriterionDistance = "CHOICES NOT SUPPORTED" # Type: choices
self.MinSpawnDistance = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
BaseNPCMaker.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.TemplateName = entity_data.get('templatename', None) # Type: target_destination
instance.Radius = float(entity_data.get('radius', 256)) # Type: float
instance.DestinationGroup = entity_data.get('destinationgroup', None) # Type: target_destination
instance.CriterionVisibility = entity_data.get('criterionvisibility', "CHOICES NOT SUPPORTED") # Type: choices
instance.CriterionDistance = entity_data.get('criteriondistance', "CHOICES NOT SUPPORTED") # Type: choices
instance.MinSpawnDistance = parse_source_value(entity_data.get('minspawndistance', 0)) # Type: integer
class BaseHelicopter(BaseNPC):
def __init__(self):
super(BaseNPC).__init__()
self.InitialSpeed = "0" # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
BaseNPC.from_dict(instance, entity_data)
instance.InitialSpeed = entity_data.get('initialspeed', "0") # Type: string
class PlayerClass(Base):
def __init__(self):
super().__init__()
pass
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
class Light(Base):
def __init__(self):
super().__init__()
self._light = [255, 255, 255, 200] # Type: color255
self._lightHDR = [-1, -1, -1, 1] # Type: color255
self._lightscaleHDR = 1 # Type: float
self.style = None # Type: choices
self.pattern = None # Type: string
self._constant_attn = "0" # Type: string
self._linear_attn = "0" # Type: string
self._quadratic_attn = "1" # Type: string
self._fifty_percent_distance = "0" # Type: string
self._zero_percent_distance = "0" # Type: string
self._hardfalloff = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance._light = parse_int_vector(entity_data.get('_light', "255 255 255 200")) # Type: color255
instance._lightHDR = parse_int_vector(entity_data.get('_lighthdr', "-1 -1 -1 1")) # Type: color255
instance._lightscaleHDR = float(entity_data.get('_lightscalehdr', 1)) # Type: float
instance.style = entity_data.get('style', None) # Type: choices
instance.pattern = entity_data.get('pattern', None) # Type: string
instance._constant_attn = entity_data.get('_constant_attn', "0") # Type: string
instance._linear_attn = entity_data.get('_linear_attn', "0") # Type: string
instance._quadratic_attn = entity_data.get('_quadratic_attn', "1") # Type: string
instance._fifty_percent_distance = entity_data.get('_fifty_percent_distance', "0") # Type: string
instance._zero_percent_distance = entity_data.get('_zero_percent_distance', "0") # Type: string
instance._hardfalloff = parse_source_value(entity_data.get('_hardfalloff', 0)) # Type: integer
class Node(Base):
def __init__(self):
super().__init__()
self.nodeid = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.nodeid = parse_source_value(entity_data.get('nodeid', 0)) # Type: integer
class HintNode(Node):
def __init__(self):
super(Node).__init__()
self.hinttype = None # Type: choices
self.hintactivity = None # Type: string
self.nodeFOV = "CHOICES NOT SUPPORTED" # Type: choices
self.StartHintDisabled = None # Type: choices
self.Group = None # Type: string
self.TargetNode = -1 # Type: node_dest
self.IgnoreFacing = "CHOICES NOT SUPPORTED" # Type: choices
self.MinimumState = "CHOICES NOT SUPPORTED" # Type: choices
self.MaximumState = "CHOICES NOT SUPPORTED" # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Node.from_dict(instance, entity_data)
instance.hinttype = entity_data.get('hinttype', None) # Type: choices
instance.hintactivity = entity_data.get('hintactivity', None) # Type: string
instance.nodeFOV = entity_data.get('nodefov', "CHOICES NOT SUPPORTED") # Type: choices
instance.StartHintDisabled = entity_data.get('starthintdisabled', None) # Type: choices
instance.Group = entity_data.get('group', None) # Type: string
instance.TargetNode = parse_source_value(entity_data.get('targetnode', -1)) # Type: node_dest
instance.IgnoreFacing = entity_data.get('ignorefacing', "CHOICES NOT SUPPORTED") # Type: choices
instance.MinimumState = entity_data.get('minimumstate', "CHOICES NOT SUPPORTED") # Type: choices
instance.MaximumState = entity_data.get('maximumstate', "CHOICES NOT SUPPORTED") # Type: choices
class TriggerOnce(Global, Origin, EnableDisable, Parentname, Targetname):
def __init__(self):
super(Global).__init__()
super(Origin).__init__()
super(EnableDisable).__init__()
super(Parentname).__init__()
super(Targetname).__init__()
self.filtername = None # Type: filterclass
@staticmethod
def from_dict(instance, entity_data: dict):
Global.from_dict(instance, entity_data)
Origin.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.filtername = entity_data.get('filtername', None) # Type: filterclass
class Trigger(TriggerOnce):
def __init__(self):
super(TriggerOnce).__init__()
super(Origin).__init__()
super(EnableDisable).__init__()
super(Parentname).__init__()
super(Targetname).__init__()
@staticmethod
def from_dict(instance, entity_data: dict):
Origin.from_dict(instance, entity_data)
TriggerOnce.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
class worldbase(Base):
def __init__(self):
super().__init__()
self.message = None # Type: string
self.skyname = "sky_day01_01" # Type: string
self.chaptertitle = None # Type: string
self.startdark = None # Type: choices
self.gametitle = None # Type: choices
self.newunit = None # Type: choices
self.maxoccludeearea = 0 # Type: float
self.minoccluderarea = 0 # Type: float
self.maxoccludeearea_x360 = 0 # Type: float
self.minoccluderarea_x360 = 0 # Type: float
self.maxpropscreenwidth = -1 # Type: float
self.minpropscreenwidth = None # Type: float
self.detailvbsp = "detail.vbsp" # Type: string
self.detailmaterial = "detail/detailsprites" # Type: string
self.coldworld = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.message = entity_data.get('message', None) # Type: string
instance.skyname = entity_data.get('skyname', "sky_day01_01") # Type: string
instance.chaptertitle = entity_data.get('chaptertitle', None) # Type: string
instance.startdark = entity_data.get('startdark', None) # Type: choices
instance.gametitle = entity_data.get('gametitle', None) # Type: choices
instance.newunit = entity_data.get('newunit', None) # Type: choices
instance.maxoccludeearea = float(entity_data.get('maxoccludeearea', 0)) # Type: float
instance.minoccluderarea = float(entity_data.get('minoccluderarea', 0)) # Type: float
instance.maxoccludeearea_x360 = float(entity_data.get('maxoccludeearea_x360', 0)) # Type: float
instance.minoccluderarea_x360 = float(entity_data.get('minoccluderarea_x360', 0)) # Type: float
instance.maxpropscreenwidth = float(entity_data.get('maxpropscreenwidth', -1)) # Type: float
instance.minpropscreenwidth = float(entity_data.get('minpropscreenwidth', 0)) # Type: float
instance.detailvbsp = entity_data.get('detailvbsp', "detail.vbsp") # Type: string
instance.detailmaterial = entity_data.get('detailmaterial', "detail/detailsprites") # Type: string
instance.coldworld = entity_data.get('coldworld', None) # Type: choices
class worldspawn(Targetname, worldbase, ResponseContext):
def __init__(self):
super(Targetname).__init__()
super(worldbase).__init__()
super(ResponseContext).__init__()
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
worldbase.from_dict(instance, entity_data)
ResponseContext.from_dict(instance, entity_data)
class ambient_generic(Targetname):
icon_sprite = "editor/ambient_generic.vmt"
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.message = None # Type: sound
self.health = 10 # Type: integer
self.preset = None # Type: choices
self.volstart = None # Type: integer
self.fadeinsecs = None # Type: integer
self.fadeoutsecs = None # Type: integer
self.pitch = 100 # Type: integer
self.pitchstart = 100 # Type: integer
self.spinup = None # Type: integer
self.spindown = None # Type: integer
self.lfotype = None # Type: integer
self.lforate = None # Type: integer
self.lfomodpitch = None # Type: integer
self.lfomodvol = None # Type: integer
self.cspinup = None # Type: integer
self.radius = "1250" # Type: string
self.SourceEntityName = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.message = entity_data.get('message', None) # Type: sound
instance.health = parse_source_value(entity_data.get('health', 10)) # Type: integer
instance.preset = entity_data.get('preset', None) # Type: choices
instance.volstart = parse_source_value(entity_data.get('volstart', 0)) # Type: integer
instance.fadeinsecs = parse_source_value(entity_data.get('fadeinsecs', 0)) # Type: integer
instance.fadeoutsecs = parse_source_value(entity_data.get('fadeoutsecs', 0)) # Type: integer
instance.pitch = parse_source_value(entity_data.get('pitch', 100)) # Type: integer
instance.pitchstart = parse_source_value(entity_data.get('pitchstart', 100)) # Type: integer
instance.spinup = parse_source_value(entity_data.get('spinup', 0)) # Type: integer
instance.spindown = parse_source_value(entity_data.get('spindown', 0)) # Type: integer
instance.lfotype = parse_source_value(entity_data.get('lfotype', 0)) # Type: integer
instance.lforate = parse_source_value(entity_data.get('lforate', 0)) # Type: integer
instance.lfomodpitch = parse_source_value(entity_data.get('lfomodpitch', 0)) # Type: integer
instance.lfomodvol = parse_source_value(entity_data.get('lfomodvol', 0)) # Type: integer
instance.cspinup = parse_source_value(entity_data.get('cspinup', 0)) # Type: integer
instance.radius = entity_data.get('radius', "1250") # Type: string
instance.SourceEntityName = entity_data.get('sourceentityname', None) # Type: target_destination
class func_lod(Targetname):
def __init__(self):
super(Targetname).__init__()
self.DisappearDist = 2000 # Type: integer
self.Solid = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.DisappearDist = parse_source_value(entity_data.get('disappeardist', 2000)) # Type: integer
instance.Solid = entity_data.get('solid', None) # Type: choices
class env_zoom(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.Rate = 1.0 # Type: float
self.FOV = 75 # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.Rate = float(entity_data.get('rate', 1.0)) # Type: float
instance.FOV = parse_source_value(entity_data.get('fov', 75)) # Type: integer
class env_screenoverlay(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.OverlayName1 = None # Type: string
self.OverlayTime1 = 1.0 # Type: float
self.OverlayName2 = None # Type: string
self.OverlayTime2 = 1.0 # Type: float
self.OverlayName3 = None # Type: string
self.OverlayTime3 = 1.0 # Type: float
self.OverlayName4 = None # Type: string
self.OverlayTime4 = 1.0 # Type: float
self.OverlayName5 = None # Type: string
self.OverlayTime5 = 1.0 # Type: float
self.OverlayName6 = None # Type: string
self.OverlayTime6 = 1.0 # Type: float
self.OverlayName7 = None # Type: string
self.OverlayTime7 = 1.0 # Type: float
self.OverlayName8 = None # Type: string
self.OverlayTime8 = 1.0 # Type: float
self.OverlayName9 = None # Type: string
self.OverlayTime9 = 1.0 # Type: float
self.OverlayName10 = None # Type: string
self.OverlayTime10 = 1.0 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.OverlayName1 = entity_data.get('overlayname1', None) # Type: string
instance.OverlayTime1 = float(entity_data.get('overlaytime1', 1.0)) # Type: float
instance.OverlayName2 = entity_data.get('overlayname2', None) # Type: string
instance.OverlayTime2 = float(entity_data.get('overlaytime2', 1.0)) # Type: float
instance.OverlayName3 = entity_data.get('overlayname3', None) # Type: string
instance.OverlayTime3 = float(entity_data.get('overlaytime3', 1.0)) # Type: float
instance.OverlayName4 = entity_data.get('overlayname4', None) # Type: string
instance.OverlayTime4 = float(entity_data.get('overlaytime4', 1.0)) # Type: float
instance.OverlayName5 = entity_data.get('overlayname5', None) # Type: string
instance.OverlayTime5 = float(entity_data.get('overlaytime5', 1.0)) # Type: float
instance.OverlayName6 = entity_data.get('overlayname6', None) # Type: string
instance.OverlayTime6 = float(entity_data.get('overlaytime6', 1.0)) # Type: float
instance.OverlayName7 = entity_data.get('overlayname7', None) # Type: string
instance.OverlayTime7 = float(entity_data.get('overlaytime7', 1.0)) # Type: float
instance.OverlayName8 = entity_data.get('overlayname8', None) # Type: string
instance.OverlayTime8 = float(entity_data.get('overlaytime8', 1.0)) # Type: float
instance.OverlayName9 = entity_data.get('overlayname9', None) # Type: string
instance.OverlayTime9 = float(entity_data.get('overlaytime9', 1.0)) # Type: float
instance.OverlayName10 = entity_data.get('overlayname10', None) # Type: string
instance.OverlayTime10 = float(entity_data.get('overlaytime10', 1.0)) # Type: float
class env_screeneffect(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.type = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.type = entity_data.get('type', None) # Type: choices
class env_texturetoggle(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
class env_splash(Targetname, Angles):
def __init__(self):
super(Targetname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.scale = 8.0 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.scale = float(entity_data.get('scale', 8.0)) # Type: float
class env_particlelight(Parentname):
def __init__(self):
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.Color = [255, 0, 0] # Type: color255
self.Intensity = 5000 # Type: integer
self.directional = None # Type: choices
self.PSName = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.Color = parse_int_vector(entity_data.get('color', "255 0 0")) # Type: color255
instance.Intensity = parse_source_value(entity_data.get('intensity', 5000)) # Type: integer
instance.directional = entity_data.get('directional', None) # Type: choices
instance.PSName = entity_data.get('psname', None) # Type: string
class env_sun(Targetname, Angles):
def __init__(self):
super(Targetname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
self.use_angles = None # Type: choices
self.pitch = None # Type: integer
self.rendercolor = [100, 80, 80] # Type: color255
self.overlaycolor = [0, 0, 0] # Type: color255
self.size = 16 # Type: integer
self.overlaysize = -1 # Type: integer
self.material = "sprites/light_glow02_add_noz" # Type: sprite
self.overlaymaterial = "sprites/light_glow02_add_noz" # Type: sprite
self.HDRColorScale = 1.0 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
instance.use_angles = entity_data.get('use_angles', None) # Type: choices
instance.pitch = parse_source_value(entity_data.get('pitch', 0)) # Type: integer
instance.rendercolor = parse_int_vector(entity_data.get('rendercolor', "100 80 80")) # Type: color255
instance.overlaycolor = parse_int_vector(entity_data.get('overlaycolor', "0 0 0")) # Type: color255
instance.size = parse_source_value(entity_data.get('size', 16)) # Type: integer
instance.overlaysize = parse_source_value(entity_data.get('overlaysize', -1)) # Type: integer
instance.material = entity_data.get('material', "sprites/light_glow02_add_noz") # Type: sprite
instance.overlaymaterial = entity_data.get('overlaymaterial', "sprites/light_glow02_add_noz") # Type: sprite
instance.HDRColorScale = float(entity_data.get('hdrcolorscale', 1.0)) # Type: float
class game_ragdoll_manager(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.MaxRagdollCount = -1 # Type: integer
self.MaxRagdollCountDX8 = -1 # Type: integer
self.SaveImportant = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.MaxRagdollCount = parse_source_value(entity_data.get('maxragdollcount', -1)) # Type: integer
instance.MaxRagdollCountDX8 = parse_source_value(entity_data.get('maxragdollcountdx8', -1)) # Type: integer
instance.SaveImportant = entity_data.get('saveimportant', None) # Type: choices
class game_gib_manager(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.maxpieces = -1 # Type: integer
self.maxpiecesdx8 = -1 # Type: integer
self.allownewgibs = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.maxpieces = parse_source_value(entity_data.get('maxpieces', -1)) # Type: integer
instance.maxpiecesdx8 = parse_source_value(entity_data.get('maxpiecesdx8', -1)) # Type: integer
instance.allownewgibs = entity_data.get('allownewgibs', None) # Type: choices
class env_lightglow(Parentname, Targetname, Angles):
model = "models/editor/axis_helper_thick.mdl"
def __init__(self):
super(Parentname).__init__()
super(Targetname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.rendercolor = [255, 255, 255] # Type: color255
self.VerticalGlowSize = 30 # Type: integer
self.HorizontalGlowSize = 30 # Type: integer
self.MinDist = 500 # Type: integer
self.MaxDist = 2000 # Type: integer
self.OuterMaxDist = None # Type: integer
self.GlowProxySize = 2.0 # Type: float
self.HDRColorScale = 1.0 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Parentname.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.rendercolor = parse_int_vector(entity_data.get('rendercolor', "255 255 255")) # Type: color255
instance.VerticalGlowSize = parse_source_value(entity_data.get('verticalglowsize', 30)) # Type: integer
instance.HorizontalGlowSize = parse_source_value(entity_data.get('horizontalglowsize', 30)) # Type: integer
instance.MinDist = parse_source_value(entity_data.get('mindist', 500)) # Type: integer
instance.MaxDist = parse_source_value(entity_data.get('maxdist', 2000)) # Type: integer
instance.OuterMaxDist = parse_source_value(entity_data.get('outermaxdist', 0)) # Type: integer
instance.GlowProxySize = float(entity_data.get('glowproxysize', 2.0)) # Type: float
instance.HDRColorScale = float(entity_data.get('hdrcolorscale', 1.0)) # Type: float
class env_smokestack(Parentname, Angles):
def __init__(self):
super(Parentname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.targetname = None # Type: target_source
self.InitialState = None # Type: choices
self.BaseSpread = 20 # Type: integer
self.SpreadSpeed = 15 # Type: integer
self.Speed = 30 # Type: integer
self.StartSize = 20 # Type: integer
self.EndSize = 30 # Type: integer
self.Rate = 20 # Type: integer
self.JetLength = 180 # Type: integer
self.WindAngle = None # Type: integer
self.WindSpeed = None # Type: integer
self.SmokeMaterial = "particle/SmokeStack.vmt" # Type: string
self.twist = None # Type: integer
self.roll = None # Type: float
self.rendercolor = [255, 255, 255] # Type: color255
self.renderamt = 255 # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.targetname = entity_data.get('targetname', None) # Type: target_source
instance.InitialState = entity_data.get('initialstate', None) # Type: choices
instance.BaseSpread = parse_source_value(entity_data.get('basespread', 20)) # Type: integer
instance.SpreadSpeed = parse_source_value(entity_data.get('spreadspeed', 15)) # Type: integer
instance.Speed = parse_source_value(entity_data.get('speed', 30)) # Type: integer
instance.StartSize = parse_source_value(entity_data.get('startsize', 20)) # Type: integer
instance.EndSize = parse_source_value(entity_data.get('endsize', 30)) # Type: integer
instance.Rate = parse_source_value(entity_data.get('rate', 20)) # Type: integer
instance.JetLength = parse_source_value(entity_data.get('jetlength', 180)) # Type: integer
instance.WindAngle = parse_source_value(entity_data.get('windangle', 0)) # Type: integer
instance.WindSpeed = parse_source_value(entity_data.get('windspeed', 0)) # Type: integer
instance.SmokeMaterial = entity_data.get('smokematerial', "particle/SmokeStack.vmt") # Type: string
instance.twist = parse_source_value(entity_data.get('twist', 0)) # Type: integer
instance.roll = float(entity_data.get('roll', 0)) # Type: float
instance.rendercolor = parse_int_vector(entity_data.get('rendercolor', "255 255 255")) # Type: color255
instance.renderamt = parse_source_value(entity_data.get('renderamt', 255)) # Type: integer
class env_fade(Targetname):
icon_sprite = "editor/env_fade"
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.duration = "2" # Type: string
self.holdtime = "0" # Type: string
self.renderamt = 255 # Type: integer
self.rendercolor = [0, 0, 0] # Type: color255
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.duration = entity_data.get('duration', "2") # Type: string
instance.holdtime = entity_data.get('holdtime', "0") # Type: string
instance.renderamt = parse_source_value(entity_data.get('renderamt', 255)) # Type: integer
instance.rendercolor = parse_int_vector(entity_data.get('rendercolor', "0 0 0")) # Type: color255
class env_player_surface_trigger(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.gamematerial = "CHOICES NOT SUPPORTED" # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.gamematerial = entity_data.get('gamematerial', "CHOICES NOT SUPPORTED") # Type: choices
class env_tonemap_controller(Targetname):
icon_sprite = "editor/env_tonemap_controller.vmt"
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class func_useableladder(Targetname, Parentname):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.point0 = None # Type: vector
self.point1 = None # Type: vector
self.StartDisabled = None # Type: choices
self.ladderSurfaceProperties = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.point0 = parse_float_vector(entity_data.get('point0', "0 0 0")) # Type: vector
instance.point1 = parse_float_vector(entity_data.get('point1', "0 0 0")) # Type: vector
instance.StartDisabled = entity_data.get('startdisabled', None) # Type: choices
instance.ladderSurfaceProperties = entity_data.get('laddersurfaceproperties', None) # Type: string
class func_ladderendpoint(Targetname, Parentname, Angles):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
class info_ladder_dismount(Parentname):
def __init__(self):
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
class func_areaportalwindow(Targetname):
def __init__(self):
super(Targetname).__init__()
self.target = None # Type: target_destination
self.FadeStartDist = 128 # Type: integer
self.FadeDist = 512 # Type: integer
self.TranslucencyLimit = "0.2" # Type: string
self.BackgroundBModel = None # Type: string
self.PortalVersion = 1 # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.target = entity_data.get('target', None) # Type: target_destination
instance.FadeStartDist = parse_source_value(entity_data.get('fadestartdist', 128)) # Type: integer
instance.FadeDist = parse_source_value(entity_data.get('fadedist', 512)) # Type: integer
instance.TranslucencyLimit = entity_data.get('translucencylimit', "0.2") # Type: string
instance.BackgroundBModel = entity_data.get('backgroundbmodel', None) # Type: string
instance.PortalVersion = parse_source_value(entity_data.get('portalversion', 1)) # Type: integer
class func_wall(Targetname, RenderFields, Shadow, Global):
def __init__(self):
super(RenderFields).__init__()
super(Targetname).__init__()
super(Shadow).__init__()
super(Global).__init__()
self._minlight = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
Shadow.from_dict(instance, entity_data)
Global.from_dict(instance, entity_data)
instance._minlight = entity_data.get('_minlight', None) # Type: string
class func_clip_vphysics(Targetname, EnableDisable):
def __init__(self):
super(Targetname).__init__()
super(EnableDisable).__init__()
self.filtername = None # Type: filterclass
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
instance.filtername = entity_data.get('filtername', None) # Type: filterclass
class func_brush(Global, Inputfilter, Origin, Shadow, EnableDisable, Parentname, RenderFields, Targetname):
def __init__(self):
super(RenderFields).__init__()
super(Global).__init__()
super(Inputfilter).__init__()
super(Origin).__init__()
super(Shadow).__init__()
super(EnableDisable).__init__()
super(Parentname).__init__()
super(Targetname).__init__()
self._minlight = None # Type: string
self.Solidity = None # Type: choices
self.excludednpc = None # Type: string
self.invert_exclusion = None # Type: choices
self.solidbsp = None # Type: choices
self.vrad_brush_cast_shadows = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Global.from_dict(instance, entity_data)
Inputfilter.from_dict(instance, entity_data)
Origin.from_dict(instance, entity_data)
Shadow.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance._minlight = entity_data.get('_minlight', None) # Type: string
instance.Solidity = entity_data.get('solidity', None) # Type: choices
instance.excludednpc = entity_data.get('excludednpc', None) # Type: string
instance.invert_exclusion = entity_data.get('invert_exclusion', None) # Type: choices
instance.solidbsp = entity_data.get('solidbsp', None) # Type: choices
instance.vrad_brush_cast_shadows = entity_data.get('vrad_brush_cast_shadows', None) # Type: choices
class vgui_screen_base(Targetname, Parentname, Angles):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(Angles).__init__()
self.panelname = None # Type: string
self.overlaymaterial = None # Type: string
self.width = 32 # Type: integer
self.height = 32 # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.panelname = entity_data.get('panelname', None) # Type: string
instance.overlaymaterial = entity_data.get('overlaymaterial', None) # Type: string
instance.width = parse_source_value(entity_data.get('width', 32)) # Type: integer
instance.height = parse_source_value(entity_data.get('height', 32)) # Type: integer
class vgui_screen(vgui_screen_base):
def __init__(self):
super(vgui_screen_base).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
vgui_screen_base.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class vgui_slideshow_display(Targetname, Parentname, Angles):
model = "models/editor/axis_helper_thick.mdl"
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.displaytext = None # Type: string
self.directory = "slideshow" # Type: string
self.minslidetime = 0.5 # Type: float
self.maxslidetime = 0.5 # Type: float
self.cycletype = None # Type: choices
self.nolistrepeat = None # Type: choices
self.width = 256 # Type: integer
self.height = 128 # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.displaytext = entity_data.get('displaytext', None) # Type: string
instance.directory = entity_data.get('directory', "slideshow") # Type: string
instance.minslidetime = float(entity_data.get('minslidetime', 0.5)) # Type: float
instance.maxslidetime = float(entity_data.get('maxslidetime', 0.5)) # Type: float
instance.cycletype = entity_data.get('cycletype', None) # Type: choices
instance.nolistrepeat = entity_data.get('nolistrepeat', None) # Type: choices
instance.width = parse_source_value(entity_data.get('width', 256)) # Type: integer
instance.height = parse_source_value(entity_data.get('height', 128)) # Type: integer
class cycler(Angles, Parentname, RenderFields, Targetname):
def __init__(self):
super(RenderFields).__init__()
super(Angles).__init__()
super(RenderFxChoices).__init__()
super(Parentname).__init__()
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.model = None # Type: studio
self.skin = None # Type: integer
self.sequence = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Angles.from_dict(instance, entity_data)
RenderFxChoices.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.model = entity_data.get('model', None) # Type: studio
instance.skin = parse_source_value(entity_data.get('skin', 0)) # Type: integer
instance.sequence = parse_source_value(entity_data.get('sequence', 0)) # Type: integer
class gibshooterbase(Targetname, Parentname):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.angles = "0 0 0" # Type: string
self.m_iGibs = 3 # Type: integer
self.delay = "0" # Type: string
self.gibangles = "0 0 0" # Type: string
self.gibanglevelocity = "0" # Type: string
self.m_flVelocity = 200 # Type: integer
self.m_flVariance = "0.15" # Type: string
self.m_flGibLife = "4" # Type: string
self.lightingorigin = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.angles = entity_data.get('angles', "0 0 0") # Type: string
instance.m_iGibs = parse_source_value(entity_data.get('m_igibs', 3)) # Type: integer
instance.delay = entity_data.get('delay', "0") # Type: string
instance.gibangles = entity_data.get('gibangles', "0 0 0") # Type: string
instance.gibanglevelocity = entity_data.get('gibanglevelocity', "0") # Type: string
instance.m_flVelocity = parse_source_value(entity_data.get('m_flvelocity', 200)) # Type: integer
instance.m_flVariance = entity_data.get('m_flvariance', "0.15") # Type: string
instance.m_flGibLife = entity_data.get('m_flgiblife', "4") # Type: string
instance.lightingorigin = entity_data.get('lightingorigin', None) # Type: target_destination
class env_beam(Targetname, RenderFxChoices, Parentname):
def __init__(self):
super(Targetname).__init__()
super(RenderFxChoices).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.renderamt = 100 # Type: integer
self.rendercolor = [255, 255, 255] # Type: color255
self.Radius = 256 # Type: integer
self.life = "1" # Type: string
self.BoltWidth = 2 # Type: float
self.NoiseAmplitude = None # Type: float
self.texture = "sprites/laserbeam.spr" # Type: sprite
self.TextureScroll = 35 # Type: integer
self.framerate = None # Type: integer
self.framestart = None # Type: integer
self.StrikeTime = "1" # Type: string
self.damage = "0" # Type: string
self.LightningStart = None # Type: target_destination
self.LightningEnd = None # Type: target_destination
self.decalname = "Bigshot" # Type: string
self.HDRColorScale = 1.0 # Type: float
self.TouchType = None # Type: choices
self.filtername = None # Type: filterclass
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
RenderFxChoices.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.renderamt = parse_source_value(entity_data.get('renderamt', 100)) # Type: integer
instance.rendercolor = parse_int_vector(entity_data.get('rendercolor', "255 255 255")) # Type: color255
instance.Radius = parse_source_value(entity_data.get('radius', 256)) # Type: integer
instance.life = entity_data.get('life', "1") # Type: string
instance.BoltWidth = float(entity_data.get('boltwidth', 2)) # Type: float
instance.NoiseAmplitude = float(entity_data.get('noiseamplitude', 0)) # Type: float
instance.texture = entity_data.get('texture', "sprites/laserbeam.spr") # Type: sprite
instance.TextureScroll = parse_source_value(entity_data.get('texturescroll', 35)) # Type: integer
instance.framerate = parse_source_value(entity_data.get('framerate', 0)) # Type: integer
instance.framestart = parse_source_value(entity_data.get('framestart', 0)) # Type: integer
instance.StrikeTime = entity_data.get('striketime', "1") # Type: string
instance.damage = entity_data.get('damage', "0") # Type: string
instance.LightningStart = entity_data.get('lightningstart', None) # Type: target_destination
instance.LightningEnd = entity_data.get('lightningend', None) # Type: target_destination
instance.decalname = entity_data.get('decalname', "Bigshot") # Type: string
instance.HDRColorScale = float(entity_data.get('hdrcolorscale', 1.0)) # Type: float
instance.TouchType = entity_data.get('touchtype', None) # Type: choices
instance.filtername = entity_data.get('filtername', None) # Type: filterclass
class env_beverage(Targetname, Parentname):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.health = 10 # Type: integer
self.beveragetype = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.health = parse_source_value(entity_data.get('health', 10)) # Type: integer
instance.beveragetype = entity_data.get('beveragetype', None) # Type: choices
class env_embers(Targetname, Parentname, Angles):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(Angles).__init__()
self.particletype = None # Type: choices
self.density = 50 # Type: integer
self.lifetime = 4 # Type: integer
self.speed = 32 # Type: integer
self.rendercolor = [255, 255, 255] # Type: color255
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.particletype = entity_data.get('particletype', None) # Type: choices
instance.density = parse_source_value(entity_data.get('density', 50)) # Type: integer
instance.lifetime = parse_source_value(entity_data.get('lifetime', 4)) # Type: integer
instance.speed = parse_source_value(entity_data.get('speed', 32)) # Type: integer
instance.rendercolor = parse_int_vector(entity_data.get('rendercolor', "255 255 255")) # Type: color255
class env_funnel(Targetname, Parentname):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class env_blood(Targetname, Parentname):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.spraydir = [0.0, 0.0, 0.0] # Type: angle
self.color = None # Type: choices
self.amount = "100" # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.spraydir = parse_float_vector(entity_data.get('spraydir', "0 0 0")) # Type: angle
instance.color = entity_data.get('color', None) # Type: choices
instance.amount = entity_data.get('amount', "100") # Type: string
class env_bubbles(Targetname, Parentname):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.density = 2 # Type: integer
self.frequency = 2 # Type: integer
self.current = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.density = parse_source_value(entity_data.get('density', 2)) # Type: integer
instance.frequency = parse_source_value(entity_data.get('frequency', 2)) # Type: integer
instance.current = parse_source_value(entity_data.get('current', 0)) # Type: integer
class env_explosion(Targetname, Parentname):
icon_sprite = "editor/env_explosion.vmt"
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.iMagnitude = 100 # Type: integer
self.iRadiusOverride = None # Type: integer
self.fireballsprite = "sprites/zerogxplode.spr" # Type: sprite
self.rendermode = "CHOICES NOT SUPPORTED" # Type: choices
self.ignoredEntity = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.iMagnitude = parse_source_value(entity_data.get('imagnitude', 100)) # Type: integer
instance.iRadiusOverride = parse_source_value(entity_data.get('iradiusoverride', 0)) # Type: integer
instance.fireballsprite = entity_data.get('fireballsprite', "sprites/zerogxplode.spr") # Type: sprite
instance.rendermode = entity_data.get('rendermode', "CHOICES NOT SUPPORTED") # Type: choices
instance.ignoredEntity = entity_data.get('ignoredentity', None) # Type: target_destination
class env_smoketrail(Targetname, Parentname):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.opacity = 0.75 # Type: float
self.spawnrate = 20 # Type: float
self.lifetime = 5.0 # Type: float
self.startcolor = [192, 192, 192] # Type: color255
self.endcolor = [160, 160, 160] # Type: color255
self.emittime = 0 # Type: float
self.minspeed = 10 # Type: float
self.maxspeed = 20 # Type: float
self.mindirectedspeed = 0 # Type: float
self.maxdirectedspeed = 0 # Type: float
self.startsize = 15 # Type: float
self.endsize = 50 # Type: float
self.spawnradius = 15 # Type: float
self.firesprite = "sprites/firetrail.spr" # Type: sprite
self.smokesprite = "sprites/whitepuff.spr" # Type: sprite
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.opacity = float(entity_data.get('opacity', 0.75)) # Type: float
instance.spawnrate = float(entity_data.get('spawnrate', 20)) # Type: float
instance.lifetime = float(entity_data.get('lifetime', 5.0)) # Type: float
instance.startcolor = parse_int_vector(entity_data.get('startcolor', "192 192 192")) # Type: color255
instance.endcolor = parse_int_vector(entity_data.get('endcolor', "160 160 160")) # Type: color255
instance.emittime = float(entity_data.get('emittime', 0)) # Type: float
instance.minspeed = float(entity_data.get('minspeed', 10)) # Type: float
instance.maxspeed = float(entity_data.get('maxspeed', 20)) # Type: float
instance.mindirectedspeed = float(entity_data.get('mindirectedspeed', 0)) # Type: float
instance.maxdirectedspeed = float(entity_data.get('maxdirectedspeed', 0)) # Type: float
instance.startsize = float(entity_data.get('startsize', 15)) # Type: float
instance.endsize = float(entity_data.get('endsize', 50)) # Type: float
instance.spawnradius = float(entity_data.get('spawnradius', 15)) # Type: float
instance.firesprite = entity_data.get('firesprite', "sprites/firetrail.spr") # Type: sprite
instance.smokesprite = entity_data.get('smokesprite', "sprites/whitepuff.spr") # Type: sprite
class env_physexplosion(Targetname, Parentname):
icon_sprite = "editor/env_physexplosion.vmt"
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.magnitude = "100" # Type: string
self.radius = "0" # Type: string
self.targetentityname = None # Type: target_destination
self.inner_radius = 0 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.magnitude = entity_data.get('magnitude', "100") # Type: string
instance.radius = entity_data.get('radius', "0") # Type: string
instance.targetentityname = entity_data.get('targetentityname', None) # Type: target_destination
instance.inner_radius = float(entity_data.get('inner_radius', 0)) # Type: float
class env_physimpact(Targetname, Parentname):
icon_sprite = "editor/env_physexplosion.vmt"
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.angles = "0 0 0" # Type: string
self.magnitude = 100 # Type: integer
self.distance = None # Type: integer
self.directionentityname = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.angles = entity_data.get('angles', "0 0 0") # Type: string
instance.magnitude = parse_source_value(entity_data.get('magnitude', 100)) # Type: integer
instance.distance = parse_source_value(entity_data.get('distance', 0)) # Type: integer
instance.directionentityname = entity_data.get('directionentityname', None) # Type: target_destination
class env_fire(Targetname, Parentname, EnableDisable):
icon_sprite = "editor/env_fire"
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(EnableDisable).__init__()
self.origin = [0, 0, 0]
self.health = 30 # Type: integer
self.firesize = 64 # Type: integer
self.fireattack = 4 # Type: integer
self.firetype = None # Type: choices
self.ignitionpoint = 32 # Type: float
self.damagescale = 1.0 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.health = parse_source_value(entity_data.get('health', 30)) # Type: integer
instance.firesize = parse_source_value(entity_data.get('firesize', 64)) # Type: integer
instance.fireattack = parse_source_value(entity_data.get('fireattack', 4)) # Type: integer
instance.firetype = entity_data.get('firetype', None) # Type: choices
instance.ignitionpoint = float(entity_data.get('ignitionpoint', 32)) # Type: float
instance.damagescale = float(entity_data.get('damagescale', 1.0)) # Type: float
class env_firesource(Targetname, Parentname):
icon_sprite = "editor/env_firesource"
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.fireradius = 128 # Type: float
self.firedamage = 10 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.fireradius = float(entity_data.get('fireradius', 128)) # Type: float
instance.firedamage = float(entity_data.get('firedamage', 10)) # Type: float
class env_firesensor(Targetname, Parentname):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.fireradius = 128 # Type: float
self.heatlevel = 32 # Type: float
self.heattime = None # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.fireradius = float(entity_data.get('fireradius', 128)) # Type: float
instance.heatlevel = float(entity_data.get('heatlevel', 32)) # Type: float
instance.heattime = float(entity_data.get('heattime', 0)) # Type: float
class env_entity_igniter(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
self.lifetime = 10 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
instance.lifetime = float(entity_data.get('lifetime', 10)) # Type: float
class env_fog_controller(Targetname, DXLevelChoice, Angles):
icon_sprite = "editor/fog_controller.vmt"
def __init__(self):
super(Targetname).__init__()
super(DXLevelChoice).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.fogenable = None # Type: choices
self.fogblend = None # Type: choices
self.use_angles = None # Type: choices
self.fogcolor = [255, 255, 255] # Type: color255
self.fogcolor2 = [255, 255, 255] # Type: color255
self.fogdir = "1 0 0" # Type: string
self.fogstart = "500.0" # Type: string
self.fogend = "2000.0" # Type: string
self.fogmaxdensity = 1 # Type: float
self.foglerptime = 0 # Type: float
self.farz = "-1" # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
DXLevelChoice.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.fogenable = entity_data.get('fogenable', None) # Type: choices
instance.fogblend = entity_data.get('fogblend', None) # Type: choices
instance.use_angles = entity_data.get('use_angles', None) # Type: choices
instance.fogcolor = parse_int_vector(entity_data.get('fogcolor', "255 255 255")) # Type: color255
instance.fogcolor2 = parse_int_vector(entity_data.get('fogcolor2', "255 255 255")) # Type: color255
instance.fogdir = entity_data.get('fogdir', "1 0 0") # Type: string
instance.fogstart = entity_data.get('fogstart', "500.0") # Type: string
instance.fogend = entity_data.get('fogend', "2000.0") # Type: string
instance.fogmaxdensity = float(entity_data.get('fogmaxdensity', 1)) # Type: float
instance.foglerptime = float(entity_data.get('foglerptime', 0)) # Type: float
instance.farz = entity_data.get('farz', "-1") # Type: string
class env_steam(Targetname, Parentname, Angles):
viewport_model = "models/editor/spot_cone.mdl"
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.InitialState = None # Type: choices
self.type = None # Type: choices
self.SpreadSpeed = 15 # Type: integer
self.Speed = 120 # Type: integer
self.StartSize = 10 # Type: integer
self.EndSize = 25 # Type: integer
self.Rate = 26 # Type: integer
self.rendercolor = [255, 255, 255] # Type: color255
self.JetLength = 80 # Type: integer
self.renderamt = 255 # Type: integer
self.rollspeed = 8 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.InitialState = entity_data.get('initialstate', None) # Type: choices
instance.type = entity_data.get('type', None) # Type: choices
instance.SpreadSpeed = parse_source_value(entity_data.get('spreadspeed', 15)) # Type: integer
instance.Speed = parse_source_value(entity_data.get('speed', 120)) # Type: integer
instance.StartSize = parse_source_value(entity_data.get('startsize', 10)) # Type: integer
instance.EndSize = parse_source_value(entity_data.get('endsize', 25)) # Type: integer
instance.Rate = parse_source_value(entity_data.get('rate', 26)) # Type: integer
instance.rendercolor = parse_int_vector(entity_data.get('rendercolor', "255 255 255")) # Type: color255
instance.JetLength = parse_source_value(entity_data.get('jetlength', 80)) # Type: integer
instance.renderamt = parse_source_value(entity_data.get('renderamt', 255)) # Type: integer
instance.rollspeed = float(entity_data.get('rollspeed', 8)) # Type: float
class env_laser(Targetname, RenderFxChoices, Parentname):
def __init__(self):
super(Targetname).__init__()
super(RenderFxChoices).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.LaserTarget = None # Type: target_destination
self.renderamt = 100 # Type: integer
self.rendercolor = [255, 255, 255] # Type: color255
self.width = 2 # Type: float
self.NoiseAmplitude = None # Type: integer
self.texture = "sprites/laserbeam.spr" # Type: sprite
self.EndSprite = None # Type: sprite
self.TextureScroll = 35 # Type: integer
self.framestart = None # Type: integer
self.damage = "100" # Type: string
self.dissolvetype = "CHOICES NOT SUPPORTED" # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
RenderFxChoices.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.LaserTarget = entity_data.get('lasertarget', None) # Type: target_destination
instance.renderamt = parse_source_value(entity_data.get('renderamt', 100)) # Type: integer
instance.rendercolor = parse_int_vector(entity_data.get('rendercolor', "255 255 255")) # Type: color255
instance.width = float(entity_data.get('width', 2)) # Type: float
instance.NoiseAmplitude = parse_source_value(entity_data.get('noiseamplitude', 0)) # Type: integer
instance.texture = entity_data.get('texture', "sprites/laserbeam.spr") # Type: sprite
instance.EndSprite = entity_data.get('endsprite', None) # Type: sprite
instance.TextureScroll = parse_source_value(entity_data.get('texturescroll', 35)) # Type: integer
instance.framestart = parse_source_value(entity_data.get('framestart', 0)) # Type: integer
instance.damage = entity_data.get('damage', "100") # Type: string
instance.dissolvetype = entity_data.get('dissolvetype', "CHOICES NOT SUPPORTED") # Type: choices
class env_message(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.message = None # Type: string
self.messagesound = None # Type: sound
self.messagevolume = "10" # Type: string
self.messageattenuation = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.message = entity_data.get('message', None) # Type: string
instance.messagesound = entity_data.get('messagesound', None) # Type: sound
instance.messagevolume = entity_data.get('messagevolume', "10") # Type: string
instance.messageattenuation = entity_data.get('messageattenuation', None) # Type: choices
class env_hudhint(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.message = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.message = entity_data.get('message', None) # Type: string
class env_shake(Targetname, Parentname):
icon_sprite = "editor/env_shake.vmt"
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.amplitude = 4 # Type: float
self.radius = 500 # Type: float
self.duration = 1 # Type: float
self.frequency = 2.5 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.amplitude = float(entity_data.get('amplitude', 4)) # Type: float
instance.radius = float(entity_data.get('radius', 500)) # Type: float
instance.duration = float(entity_data.get('duration', 1)) # Type: float
instance.frequency = float(entity_data.get('frequency', 2.5)) # Type: float
class env_viewpunch(Targetname, Parentname):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.punchangle = [0.0, 0.0, 90.0] # Type: angle
self.radius = 500 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.punchangle = parse_float_vector(entity_data.get('punchangle', "0 0 90")) # Type: angle
instance.radius = float(entity_data.get('radius', 500)) # Type: float
class env_rotorwash_emitter(Targetname, Parentname):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.altitude = 1024 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.altitude = float(entity_data.get('altitude', 1024)) # Type: float
class gibshooter(gibshooterbase):
icon_sprite = "editor/gibshooter.vmt"
def __init__(self):
super(gibshooterbase).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
gibshooterbase.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class env_shooter(gibshooterbase, RenderFields):
icon_sprite = "editor/env_shooter.vmt"
def __init__(self):
super(gibshooterbase).__init__()
super(RenderFields).__init__()
self.origin = [0, 0, 0]
self.shootmodel = None # Type: studio
self.shootsounds = "CHOICES NOT SUPPORTED" # Type: choices
self.simulation = None # Type: choices
self.skin = None # Type: integer
self.nogibshadows = None # Type: choices
self.gibgravityscale = 1 # Type: float
self.massoverride = 0 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
gibshooterbase.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.shootmodel = entity_data.get('shootmodel', None) # Type: studio
instance.shootsounds = entity_data.get('shootsounds', "CHOICES NOT SUPPORTED") # Type: choices
instance.simulation = entity_data.get('simulation', None) # Type: choices
instance.skin = parse_source_value(entity_data.get('skin', 0)) # Type: integer
instance.nogibshadows = entity_data.get('nogibshadows', None) # Type: choices
instance.gibgravityscale = float(entity_data.get('gibgravityscale', 1)) # Type: float
instance.massoverride = float(entity_data.get('massoverride', 0)) # Type: float
class env_rotorshooter(gibshooterbase, RenderFields):
icon_sprite = "editor/env_shooter.vmt"
def __init__(self):
super(gibshooterbase).__init__()
super(RenderFields).__init__()
self.origin = [0, 0, 0]
self.shootmodel = None # Type: studio
self.shootsounds = "CHOICES NOT SUPPORTED" # Type: choices
self.simulation = None # Type: choices
self.skin = None # Type: integer
self.rotortime = 1 # Type: float
self.rotortimevariance = 0.3 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
gibshooterbase.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.shootmodel = entity_data.get('shootmodel', None) # Type: studio
instance.shootsounds = entity_data.get('shootsounds', "CHOICES NOT SUPPORTED") # Type: choices
instance.simulation = entity_data.get('simulation', None) # Type: choices
instance.skin = parse_source_value(entity_data.get('skin', 0)) # Type: integer
instance.rotortime = float(entity_data.get('rotortime', 1)) # Type: float
instance.rotortimevariance = float(entity_data.get('rotortimevariance', 0.3)) # Type: float
class env_soundscape_proxy(Targetname, Parentname):
icon_sprite = "editor/env_soundscape.vmt"
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.MainSoundscapeName = None # Type: target_destination
self.radius = 128 # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.MainSoundscapeName = entity_data.get('mainsoundscapename', None) # Type: target_destination
instance.radius = parse_source_value(entity_data.get('radius', 128)) # Type: integer
class env_soundscape(Targetname, Parentname, EnableDisable):
icon_sprite = "editor/env_soundscape.vmt"
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(EnableDisable).__init__()
self.origin = [0, 0, 0]
self.radius = 128 # Type: integer
self.soundscape = "CHOICES NOT SUPPORTED" # Type: choices
self.position0 = None # Type: target_destination
self.position1 = None # Type: target_destination
self.position2 = None # Type: target_destination
self.position3 = None # Type: target_destination
self.position4 = None # Type: target_destination
self.position5 = None # Type: target_destination
self.position6 = None # Type: target_destination
self.position7 = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.radius = parse_source_value(entity_data.get('radius', 128)) # Type: integer
instance.soundscape = entity_data.get('soundscape', "CHOICES NOT SUPPORTED") # Type: choices
instance.position0 = entity_data.get('position0', None) # Type: target_destination
instance.position1 = entity_data.get('position1', None) # Type: target_destination
instance.position2 = entity_data.get('position2', None) # Type: target_destination
instance.position3 = entity_data.get('position3', None) # Type: target_destination
instance.position4 = entity_data.get('position4', None) # Type: target_destination
instance.position5 = entity_data.get('position5', None) # Type: target_destination
instance.position6 = entity_data.get('position6', None) # Type: target_destination
instance.position7 = entity_data.get('position7', None) # Type: target_destination
class env_soundscape_triggerable(env_soundscape):
icon_sprite = "editor/env_soundscape.vmt"
def __init__(self):
super(env_soundscape).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
env_soundscape.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class env_spark(Targetname, Parentname, Angles):
icon_sprite = "editor/env_spark.vmt"
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.MaxDelay = "0" # Type: string
self.Magnitude = "CHOICES NOT SUPPORTED" # Type: choices
self.TrailLength = "CHOICES NOT SUPPORTED" # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.MaxDelay = entity_data.get('maxdelay', "0") # Type: string
instance.Magnitude = entity_data.get('magnitude', "CHOICES NOT SUPPORTED") # Type: choices
instance.TrailLength = entity_data.get('traillength', "CHOICES NOT SUPPORTED") # Type: choices
class env_sprite(Targetname, Parentname, RenderFields, DXLevelChoice):
def __init__(self):
super(RenderFields).__init__()
super(Targetname).__init__()
super(Parentname).__init__()
super(DXLevelChoice).__init__()
self.origin = [0, 0, 0]
self.framerate = "10.0" # Type: string
self.model = "sprites/glow01.spr" # Type: sprite
self.scale = None # Type: string
self.GlowProxySize = 2.0 # Type: float
self.HDRColorScale = 1.0 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
DXLevelChoice.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.framerate = entity_data.get('framerate', "10.0") # Type: string
instance.model = entity_data.get('model', "sprites/glow01.spr") # Type: sprite
instance.scale = entity_data.get('scale', None) # Type: string
instance.GlowProxySize = float(entity_data.get('glowproxysize', 2.0)) # Type: float
instance.HDRColorScale = float(entity_data.get('hdrcolorscale', 1.0)) # Type: float
class env_sprite_oriented(Angles, env_sprite):
def __init__(self):
super(env_sprite).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Angles.from_dict(instance, entity_data)
env_sprite.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class env_wind(Targetname, Angles):
icon_sprite = "editor/env_wind.vmt"
def __init__(self):
super(Targetname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.minwind = 20 # Type: integer
self.maxwind = 50 # Type: integer
self.mingust = 100 # Type: integer
self.maxgust = 250 # Type: integer
self.mingustdelay = 10 # Type: integer
self.maxgustdelay = 20 # Type: integer
self.gustduration = 5 # Type: integer
self.gustdirchange = 20 # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.minwind = parse_source_value(entity_data.get('minwind', 20)) # Type: integer
instance.maxwind = parse_source_value(entity_data.get('maxwind', 50)) # Type: integer
instance.mingust = parse_source_value(entity_data.get('mingust', 100)) # Type: integer
instance.maxgust = parse_source_value(entity_data.get('maxgust', 250)) # Type: integer
instance.mingustdelay = parse_source_value(entity_data.get('mingustdelay', 10)) # Type: integer
instance.maxgustdelay = parse_source_value(entity_data.get('maxgustdelay', 20)) # Type: integer
instance.gustduration = parse_source_value(entity_data.get('gustduration', 5)) # Type: integer
instance.gustdirchange = parse_source_value(entity_data.get('gustdirchange', 20)) # Type: integer
class sky_camera(Angles):
def __init__(self):
super(Angles).__init__()
self.origin = [0, 0, 0]
self.scale = 16 # Type: integer
self.fogenable = None # Type: choices
self.fogblend = None # Type: choices
self.use_angles = None # Type: choices
self.fogcolor = [255, 255, 255] # Type: color255
self.fogcolor2 = [255, 255, 255] # Type: color255
self.fogdir = "1 0 0" # Type: string
self.fogstart = "500.0" # Type: string
self.fogend = "2000.0" # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.scale = parse_source_value(entity_data.get('scale', 16)) # Type: integer
instance.fogenable = entity_data.get('fogenable', None) # Type: choices
instance.fogblend = entity_data.get('fogblend', None) # Type: choices
instance.use_angles = entity_data.get('use_angles', None) # Type: choices
instance.fogcolor = parse_int_vector(entity_data.get('fogcolor', "255 255 255")) # Type: color255
instance.fogcolor2 = parse_int_vector(entity_data.get('fogcolor2', "255 255 255")) # Type: color255
instance.fogdir = entity_data.get('fogdir', "1 0 0") # Type: string
instance.fogstart = entity_data.get('fogstart', "500.0") # Type: string
instance.fogend = entity_data.get('fogend', "2000.0") # Type: string
class BaseSpeaker(Targetname, ResponseContext):
def __init__(self):
super(Targetname).__init__()
super(ResponseContext).__init__()
self.delaymin = "15" # Type: string
self.delaymax = "135" # Type: string
self.rulescript = None # Type: string
self.concept = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
ResponseContext.from_dict(instance, entity_data)
instance.delaymin = entity_data.get('delaymin', "15") # Type: string
instance.delaymax = entity_data.get('delaymax', "135") # Type: string
instance.rulescript = entity_data.get('rulescript', None) # Type: string
instance.concept = entity_data.get('concept', None) # Type: string
class game_weapon_manager(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.weaponname = None # Type: string
self.maxpieces = None # Type: integer
self.ammomod = 1 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.weaponname = entity_data.get('weaponname', None) # Type: string
instance.maxpieces = parse_source_value(entity_data.get('maxpieces', 0)) # Type: integer
instance.ammomod = float(entity_data.get('ammomod', 1)) # Type: float
class game_end(Targetname):
icon_sprite = "editor/game_end.vmt"
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.master = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.master = entity_data.get('master', None) # Type: string
class game_player_equip(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.master = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.master = entity_data.get('master', None) # Type: string
class game_player_team(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: string
self.master = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: string
instance.master = entity_data.get('master', None) # Type: string
class game_score(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.points = 1 # Type: integer
self.master = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.points = parse_source_value(entity_data.get('points', 1)) # Type: integer
instance.master = entity_data.get('master', None) # Type: string
class game_text(Targetname):
icon_sprite = "editor/game_text.vmt"
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.message = None # Type: string
self.x = "-1" # Type: string
self.y = "-1" # Type: string
self.effect = None # Type: choices
self.color = [100, 100, 100] # Type: color255
self.color2 = [240, 110, 0] # Type: color255
self.fadein = "1.5" # Type: string
self.fadeout = "0.5" # Type: string
self.holdtime = "1.2" # Type: string
self.fxtime = "0.25" # Type: string
self.channel = "CHOICES NOT SUPPORTED" # Type: choices
self.master = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.message = entity_data.get('message', None) # Type: string
instance.x = entity_data.get('x', "-1") # Type: string
instance.y = entity_data.get('y', "-1") # Type: string
instance.effect = entity_data.get('effect', None) # Type: choices
instance.color = parse_int_vector(entity_data.get('color', "100 100 100")) # Type: color255
instance.color2 = parse_int_vector(entity_data.get('color2', "240 110 0")) # Type: color255
instance.fadein = entity_data.get('fadein', "1.5") # Type: string
instance.fadeout = entity_data.get('fadeout', "0.5") # Type: string
instance.holdtime = entity_data.get('holdtime', "1.2") # Type: string
instance.fxtime = entity_data.get('fxtime', "0.25") # Type: string
instance.channel = entity_data.get('channel', "CHOICES NOT SUPPORTED") # Type: choices
instance.master = entity_data.get('master', None) # Type: string
class point_enable_motion_fixup(Parentname, Angles):
def __init__(self):
super(Parentname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class point_message(Targetname, Parentname):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.message = None # Type: string
self.radius = 128 # Type: integer
self.developeronly = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.message = entity_data.get('message', None) # Type: string
instance.radius = parse_source_value(entity_data.get('radius', 128)) # Type: integer
instance.developeronly = entity_data.get('developeronly', None) # Type: choices
class point_spotlight(Angles, DXLevelChoice, Parentname, RenderFields, Targetname):
model = "models/editor/cone_helper.mdl"
def __init__(self):
super(RenderFields).__init__()
super(Angles).__init__()
super(DXLevelChoice).__init__()
super(Parentname).__init__()
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.IgnoreSolid = None # Type: choices
self.spotlightlength = 500 # Type: integer
self.spotlightwidth = 50 # Type: integer
self.HDRColorScale = 1.0 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Angles.from_dict(instance, entity_data)
DXLevelChoice.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.IgnoreSolid = entity_data.get('ignoresolid', None) # Type: choices
instance.spotlightlength = parse_source_value(entity_data.get('spotlightlength', 500)) # Type: integer
instance.spotlightwidth = parse_source_value(entity_data.get('spotlightwidth', 50)) # Type: integer
instance.HDRColorScale = float(entity_data.get('hdrcolorscale', 1.0)) # Type: float
class point_tesla(Targetname, Parentname):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.m_SourceEntityName = None # Type: string
self.m_SoundName = "DoSpark" # Type: string
self.texture = "sprites/physbeam.vmt" # Type: sprite
self.m_Color = [255, 255, 255] # Type: color255
self.m_flRadius = 200 # Type: integer
self.beamcount_min = 6 # Type: integer
self.beamcount_max = 8 # Type: integer
self.thick_min = "4" # Type: string
self.thick_max = "5" # Type: string
self.lifetime_min = "0.3" # Type: string
self.lifetime_max = "0.3" # Type: string
self.interval_min = "0.5" # Type: string
self.interval_max = "2" # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.m_SourceEntityName = entity_data.get('m_sourceentityname', None) # Type: string
instance.m_SoundName = entity_data.get('m_soundname', "DoSpark") # Type: string
instance.texture = entity_data.get('texture', "sprites/physbeam.vmt") # Type: sprite
instance.m_Color = parse_int_vector(entity_data.get('m_color', "255 255 255")) # Type: color255
instance.m_flRadius = parse_source_value(entity_data.get('m_flradius', 200)) # Type: integer
instance.beamcount_min = parse_source_value(entity_data.get('beamcount_min', 6)) # Type: integer
instance.beamcount_max = parse_source_value(entity_data.get('beamcount_max', 8)) # Type: integer
instance.thick_min = entity_data.get('thick_min', "4") # Type: string
instance.thick_max = entity_data.get('thick_max', "5") # Type: string
instance.lifetime_min = entity_data.get('lifetime_min', "0.3") # Type: string
instance.lifetime_max = entity_data.get('lifetime_max', "0.3") # Type: string
instance.interval_min = entity_data.get('interval_min', "0.5") # Type: string
instance.interval_max = entity_data.get('interval_max', "2") # Type: string
class point_clientcommand(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class point_servercommand(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class point_bonusmaps_accessor(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.filename = None # Type: string
self.mapname = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.filename = entity_data.get('filename', None) # Type: string
instance.mapname = entity_data.get('mapname', None) # Type: string
class game_ui(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.FieldOfView = -1.0 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.FieldOfView = float(entity_data.get('fieldofview', -1.0)) # Type: float
class game_zone_player(Targetname, Parentname):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
class infodecal(Targetname):
model = "models/editor/axis_helper_thick.mdl"
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.texture = None # Type: decal
self.LowPriority = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.texture = entity_data.get('texture', None) # Type: decal
instance.LowPriority = entity_data.get('lowpriority', None) # Type: choices
class info_projecteddecal(Angles, Targetname):
model = "models/editor/axis_helper_thick.mdl"
def __init__(self):
super(Angles).__init__()
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.texture = None # Type: decal
self.Distance = 64 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Angles.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.texture = entity_data.get('texture', None) # Type: decal
instance.Distance = float(entity_data.get('distance', 64)) # Type: float
class info_no_dynamic_shadow(Base):
def __init__(self):
super().__init__()
self.origin = [0, 0, 0]
self.sides = None # Type: sidelist
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.sides = entity_data.get('sides', None) # Type: sidelist
class info_player_start(Angles, PlayerClass):
model = "models/editor/playerstart.mdl"
def __init__(self):
super(Angles).__init__()
super(PlayerClass).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Angles.from_dict(instance, entity_data)
PlayerClass.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class info_overlay(Targetname):
model = "models/editor/overlay_helper.mdl"
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.material = None # Type: material
self.sides = None # Type: sidelist
self.RenderOrder = None # Type: integer
self.StartU = 0.0 # Type: float
self.EndU = 1.0 # Type: float
self.StartV = 0.0 # Type: float
self.EndV = 1.0 # Type: float
self.BasisOrigin = None # Type: vector
self.BasisU = None # Type: vector
self.BasisV = None # Type: vector
self.BasisNormal = None # Type: vector
self.uv0 = None # Type: vector
self.uv1 = None # Type: vector
self.uv2 = None # Type: vector
self.uv3 = None # Type: vector
self.fademindist = -1 # Type: float
self.fademaxdist = None # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.material = entity_data.get('material', None) # Type: material
instance.sides = entity_data.get('sides', None) # Type: sidelist
instance.RenderOrder = parse_source_value(entity_data.get('renderorder', 0)) # Type: integer
instance.StartU = float(entity_data.get('startu', 0.0)) # Type: float
instance.EndU = float(entity_data.get('endu', 1.0)) # Type: float
instance.StartV = float(entity_data.get('startv', 0.0)) # Type: float
instance.EndV = float(entity_data.get('endv', 1.0)) # Type: float
instance.BasisOrigin = parse_float_vector(entity_data.get('basisorigin', "0 0 0")) # Type: vector
instance.BasisU = parse_float_vector(entity_data.get('basisu', "0 0 0")) # Type: vector
instance.BasisV = parse_float_vector(entity_data.get('basisv', "0 0 0")) # Type: vector
instance.BasisNormal = parse_float_vector(entity_data.get('basisnormal', "0 0 0")) # Type: vector
instance.uv0 = parse_float_vector(entity_data.get('uv0', "0 0 0")) # Type: vector
instance.uv1 = parse_float_vector(entity_data.get('uv1', "0 0 0")) # Type: vector
instance.uv2 = parse_float_vector(entity_data.get('uv2', "0 0 0")) # Type: vector
instance.uv3 = parse_float_vector(entity_data.get('uv3', "0 0 0")) # Type: vector
instance.fademindist = float(entity_data.get('fademindist', -1)) # Type: float
instance.fademaxdist = float(entity_data.get('fademaxdist', 0)) # Type: float
class info_overlay_transition(Base):
def __init__(self):
super().__init__()
self.origin = [0, 0, 0]
self.material = None # Type: material
self.sides = None # Type: sidelist
self.sides2 = None # Type: sidelist
self.LengthTexcoordStart = 0.0 # Type: float
self.LengthTexcoordEnd = 1.0 # Type: float
self.WidthTexcoordStart = 0.0 # Type: float
self.WidthTexcoordEnd = 1.0 # Type: float
self.Width1 = 25.0 # Type: float
self.Width2 = 25.0 # Type: float
self.DebugDraw = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.material = entity_data.get('material', None) # Type: material
instance.sides = entity_data.get('sides', None) # Type: sidelist
instance.sides2 = entity_data.get('sides2', None) # Type: sidelist
instance.LengthTexcoordStart = float(entity_data.get('lengthtexcoordstart', 0.0)) # Type: float
instance.LengthTexcoordEnd = float(entity_data.get('lengthtexcoordend', 1.0)) # Type: float
instance.WidthTexcoordStart = float(entity_data.get('widthtexcoordstart', 0.0)) # Type: float
instance.WidthTexcoordEnd = float(entity_data.get('widthtexcoordend', 1.0)) # Type: float
instance.Width1 = float(entity_data.get('width1', 25.0)) # Type: float
instance.Width2 = float(entity_data.get('width2', 25.0)) # Type: float
instance.DebugDraw = parse_source_value(entity_data.get('debugdraw', 0)) # Type: integer
class info_intermission(Base):
def __init__(self):
super().__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
class info_landmark(Targetname):
icon_sprite = "editor/info_landmark"
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class info_null(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class info_target(Targetname, Parentname, Angles):
icon_sprite = "editor/info_target.vmt"
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class info_particle_system(Targetname, Parentname, Angles):
model = "models/editor/cone_helper.mdl"
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.effect_name = None # Type: string
self.start_active = None # Type: choices
self.flag_as_weather = None # Type: choices
self.cpoint1 = None # Type: target_destination
self.cpoint2 = None # Type: target_destination
self.cpoint3 = None # Type: target_destination
self.cpoint4 = None # Type: target_destination
self.cpoint5 = None # Type: target_destination
self.cpoint6 = None # Type: target_destination
self.cpoint7 = None # Type: target_destination
self.cpoint8 = None # Type: target_destination
self.cpoint9 = None # Type: target_destination
self.cpoint10 = None # Type: target_destination
self.cpoint11 = None # Type: target_destination
self.cpoint12 = None # Type: target_destination
self.cpoint13 = None # Type: target_destination
self.cpoint14 = None # Type: target_destination
self.cpoint15 = None # Type: target_destination
self.cpoint16 = None # Type: target_destination
self.cpoint17 = None # Type: target_destination
self.cpoint18 = None # Type: target_destination
self.cpoint19 = None # Type: target_destination
self.cpoint20 = None # Type: target_destination
self.cpoint21 = None # Type: target_destination
self.cpoint22 = None # Type: target_destination
self.cpoint23 = None # Type: target_destination
self.cpoint24 = None # Type: target_destination
self.cpoint25 = None # Type: target_destination
self.cpoint26 = None # Type: target_destination
self.cpoint27 = None # Type: target_destination
self.cpoint28 = None # Type: target_destination
self.cpoint29 = None # Type: target_destination
self.cpoint30 = None # Type: target_destination
self.cpoint31 = None # Type: target_destination
self.cpoint32 = None # Type: target_destination
self.cpoint33 = None # Type: target_destination
self.cpoint34 = None # Type: target_destination
self.cpoint35 = None # Type: target_destination
self.cpoint36 = None # Type: target_destination
self.cpoint37 = None # Type: target_destination
self.cpoint38 = None # Type: target_destination
self.cpoint39 = None # Type: target_destination
self.cpoint40 = None # Type: target_destination
self.cpoint41 = None # Type: target_destination
self.cpoint42 = None # Type: target_destination
self.cpoint43 = None # Type: target_destination
self.cpoint44 = None # Type: target_destination
self.cpoint45 = None # Type: target_destination
self.cpoint46 = None # Type: target_destination
self.cpoint47 = None # Type: target_destination
self.cpoint48 = None # Type: target_destination
self.cpoint49 = None # Type: target_destination
self.cpoint50 = None # Type: target_destination
self.cpoint51 = None # Type: target_destination
self.cpoint52 = None # Type: target_destination
self.cpoint53 = None # Type: target_destination
self.cpoint54 = None # Type: target_destination
self.cpoint55 = None # Type: target_destination
self.cpoint56 = None # Type: target_destination
self.cpoint57 = None # Type: target_destination
self.cpoint58 = None # Type: target_destination
self.cpoint59 = None # Type: target_destination
self.cpoint60 = None # Type: target_destination
self.cpoint61 = None # Type: target_destination
self.cpoint62 = None # Type: target_destination
self.cpoint63 = None # Type: target_destination
self.cpoint1_parent = None # Type: integer
self.cpoint2_parent = None # Type: integer
self.cpoint3_parent = None # Type: integer
self.cpoint4_parent = None # Type: integer
self.cpoint5_parent = None # Type: integer
self.cpoint6_parent = None # Type: integer
self.cpoint7_parent = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.effect_name = entity_data.get('effect_name', None) # Type: string
instance.start_active = entity_data.get('start_active', None) # Type: choices
instance.flag_as_weather = entity_data.get('flag_as_weather', None) # Type: choices
instance.cpoint1 = entity_data.get('cpoint1', None) # Type: target_destination
instance.cpoint2 = entity_data.get('cpoint2', None) # Type: target_destination
instance.cpoint3 = entity_data.get('cpoint3', None) # Type: target_destination
instance.cpoint4 = entity_data.get('cpoint4', None) # Type: target_destination
instance.cpoint5 = entity_data.get('cpoint5', None) # Type: target_destination
instance.cpoint6 = entity_data.get('cpoint6', None) # Type: target_destination
instance.cpoint7 = entity_data.get('cpoint7', None) # Type: target_destination
instance.cpoint8 = entity_data.get('cpoint8', None) # Type: target_destination
instance.cpoint9 = entity_data.get('cpoint9', None) # Type: target_destination
instance.cpoint10 = entity_data.get('cpoint10', None) # Type: target_destination
instance.cpoint11 = entity_data.get('cpoint11', None) # Type: target_destination
instance.cpoint12 = entity_data.get('cpoint12', None) # Type: target_destination
instance.cpoint13 = entity_data.get('cpoint13', None) # Type: target_destination
instance.cpoint14 = entity_data.get('cpoint14', None) # Type: target_destination
instance.cpoint15 = entity_data.get('cpoint15', None) # Type: target_destination
instance.cpoint16 = entity_data.get('cpoint16', None) # Type: target_destination
instance.cpoint17 = entity_data.get('cpoint17', None) # Type: target_destination
instance.cpoint18 = entity_data.get('cpoint18', None) # Type: target_destination
instance.cpoint19 = entity_data.get('cpoint19', None) # Type: target_destination
instance.cpoint20 = entity_data.get('cpoint20', None) # Type: target_destination
instance.cpoint21 = entity_data.get('cpoint21', None) # Type: target_destination
instance.cpoint22 = entity_data.get('cpoint22', None) # Type: target_destination
instance.cpoint23 = entity_data.get('cpoint23', None) # Type: target_destination
instance.cpoint24 = entity_data.get('cpoint24', None) # Type: target_destination
instance.cpoint25 = entity_data.get('cpoint25', None) # Type: target_destination
instance.cpoint26 = entity_data.get('cpoint26', None) # Type: target_destination
instance.cpoint27 = entity_data.get('cpoint27', None) # Type: target_destination
instance.cpoint28 = entity_data.get('cpoint28', None) # Type: target_destination
instance.cpoint29 = entity_data.get('cpoint29', None) # Type: target_destination
instance.cpoint30 = entity_data.get('cpoint30', None) # Type: target_destination
instance.cpoint31 = entity_data.get('cpoint31', None) # Type: target_destination
instance.cpoint32 = entity_data.get('cpoint32', None) # Type: target_destination
instance.cpoint33 = entity_data.get('cpoint33', None) # Type: target_destination
instance.cpoint34 = entity_data.get('cpoint34', None) # Type: target_destination
instance.cpoint35 = entity_data.get('cpoint35', None) # Type: target_destination
instance.cpoint36 = entity_data.get('cpoint36', None) # Type: target_destination
instance.cpoint37 = entity_data.get('cpoint37', None) # Type: target_destination
instance.cpoint38 = entity_data.get('cpoint38', None) # Type: target_destination
instance.cpoint39 = entity_data.get('cpoint39', None) # Type: target_destination
instance.cpoint40 = entity_data.get('cpoint40', None) # Type: target_destination
instance.cpoint41 = entity_data.get('cpoint41', None) # Type: target_destination
instance.cpoint42 = entity_data.get('cpoint42', None) # Type: target_destination
instance.cpoint43 = entity_data.get('cpoint43', None) # Type: target_destination
instance.cpoint44 = entity_data.get('cpoint44', None) # Type: target_destination
instance.cpoint45 = entity_data.get('cpoint45', None) # Type: target_destination
instance.cpoint46 = entity_data.get('cpoint46', None) # Type: target_destination
instance.cpoint47 = entity_data.get('cpoint47', None) # Type: target_destination
instance.cpoint48 = entity_data.get('cpoint48', None) # Type: target_destination
instance.cpoint49 = entity_data.get('cpoint49', None) # Type: target_destination
instance.cpoint50 = entity_data.get('cpoint50', None) # Type: target_destination
instance.cpoint51 = entity_data.get('cpoint51', None) # Type: target_destination
instance.cpoint52 = entity_data.get('cpoint52', None) # Type: target_destination
instance.cpoint53 = entity_data.get('cpoint53', None) # Type: target_destination
instance.cpoint54 = entity_data.get('cpoint54', None) # Type: target_destination
instance.cpoint55 = entity_data.get('cpoint55', None) # Type: target_destination
instance.cpoint56 = entity_data.get('cpoint56', None) # Type: target_destination
instance.cpoint57 = entity_data.get('cpoint57', None) # Type: target_destination
instance.cpoint58 = entity_data.get('cpoint58', None) # Type: target_destination
instance.cpoint59 = entity_data.get('cpoint59', None) # Type: target_destination
instance.cpoint60 = entity_data.get('cpoint60', None) # Type: target_destination
instance.cpoint61 = entity_data.get('cpoint61', None) # Type: target_destination
instance.cpoint62 = entity_data.get('cpoint62', None) # Type: target_destination
instance.cpoint63 = entity_data.get('cpoint63', None) # Type: target_destination
instance.cpoint1_parent = parse_source_value(entity_data.get('cpoint1_parent', 0)) # Type: integer
instance.cpoint2_parent = parse_source_value(entity_data.get('cpoint2_parent', 0)) # Type: integer
instance.cpoint3_parent = parse_source_value(entity_data.get('cpoint3_parent', 0)) # Type: integer
instance.cpoint4_parent = parse_source_value(entity_data.get('cpoint4_parent', 0)) # Type: integer
instance.cpoint5_parent = parse_source_value(entity_data.get('cpoint5_parent', 0)) # Type: integer
instance.cpoint6_parent = parse_source_value(entity_data.get('cpoint6_parent', 0)) # Type: integer
instance.cpoint7_parent = parse_source_value(entity_data.get('cpoint7_parent', 0)) # Type: integer
class phys_ragdollmagnet(Targetname, Parentname, EnableDisable, Angles):
icon_sprite = "editor/info_target.vmt"
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(EnableDisable).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.axis = None # Type: vecline
self.radius = 512 # Type: float
self.force = 5000 # Type: float
self.target = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.axis = entity_data.get('axis', None) # Type: vecline
instance.radius = float(entity_data.get('radius', 512)) # Type: float
instance.force = float(entity_data.get('force', 5000)) # Type: float
instance.target = entity_data.get('target', None) # Type: string
class info_lighting(Targetname):
icon_sprite = "editor/info_lighting.vmt"
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class info_teleport_destination(Targetname, Parentname, PlayerClass, Angles):
model = "models/editor/playerstart.mdl"
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(PlayerClass).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
PlayerClass.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class info_node(Node):
model = "models/editor/ground_node.mdl"
def __init__(self):
super(Node).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Node.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class info_node_hint(Targetname, HintNode, Angles):
model = "models/editor/ground_node_hint.mdl"
def __init__(self):
super(HintNode).__init__()
super(Targetname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
HintNode.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class info_node_air(Node):
model = "models/editor/air_node.mdl"
def __init__(self):
super(Node).__init__()
self.origin = [0, 0, 0]
self.nodeheight = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Node.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.nodeheight = parse_source_value(entity_data.get('nodeheight', 0)) # Type: integer
class info_node_air_hint(Angles, HintNode, Targetname):
model = "models/editor/air_node_hint.mdl"
def __init__(self):
super(HintNode).__init__()
super(Angles).__init__()
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.nodeheight = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Angles.from_dict(instance, entity_data)
HintNode.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.nodeheight = parse_source_value(entity_data.get('nodeheight', 0)) # Type: integer
class info_hint(Targetname, HintNode, Angles):
model = "models/editor/node_hint.mdl"
def __init__(self):
super(HintNode).__init__()
super(Targetname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
HintNode.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class info_node_link(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.StartNode = None # Type: node_dest
self.EndNode = None # Type: node_dest
self.initialstate = "CHOICES NOT SUPPORTED" # Type: choices
self.linktype = "CHOICES NOT SUPPORTED" # Type: choices
self.AllowUse = None # Type: string
self.InvertAllow = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.StartNode = parse_source_value(entity_data.get('startnode', 0)) # Type: node_dest
instance.EndNode = parse_source_value(entity_data.get('endnode', 0)) # Type: node_dest
instance.initialstate = entity_data.get('initialstate', "CHOICES NOT SUPPORTED") # Type: choices
instance.linktype = entity_data.get('linktype', "CHOICES NOT SUPPORTED") # Type: choices
instance.AllowUse = entity_data.get('allowuse', None) # Type: string
instance.InvertAllow = entity_data.get('invertallow', None) # Type: choices
class info_node_link_controller(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.mins = [-8.0, -32.0, -36.0] # Type: vector
self.maxs = [8.0, 32.0, 36.0] # Type: vector
self.initialstate = "CHOICES NOT SUPPORTED" # Type: choices
self.useairlinkradius = None # Type: choices
self.AllowUse = None # Type: string
self.InvertAllow = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.mins = parse_float_vector(entity_data.get('mins', "-8 -32 -36")) # Type: vector
instance.maxs = parse_float_vector(entity_data.get('maxs', "8 32 36")) # Type: vector
instance.initialstate = entity_data.get('initialstate', "CHOICES NOT SUPPORTED") # Type: choices
instance.useairlinkradius = entity_data.get('useairlinkradius', None) # Type: choices
instance.AllowUse = entity_data.get('allowuse', None) # Type: string
instance.InvertAllow = entity_data.get('invertallow', None) # Type: choices
class info_radial_link_controller(Targetname, Parentname):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.radius = 120 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.radius = float(entity_data.get('radius', 120)) # Type: float
class info_node_climb(Targetname, HintNode, Angles):
model = "models/editor/climb_node.mdl"
def __init__(self):
super(HintNode).__init__()
super(Targetname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
HintNode.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class light(Targetname, Light):
icon_sprite = "editor/light.vmt"
def __init__(self):
super(Targetname).__init__()
super(Light).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
self._distance = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Light.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
instance._distance = parse_source_value(entity_data.get('_distance', 0)) # Type: integer
class light_environment(Angles):
icon_sprite = "editor/light_env.vmt"
def __init__(self):
super(Angles).__init__()
self.origin = [0, 0, 0]
self.pitch = None # Type: integer
self._light = [255, 255, 255, 200] # Type: color255
self._ambient = [255, 255, 255, 20] # Type: color255
self._lightHDR = [-1, -1, -1, 1] # Type: color255
self._lightscaleHDR = 1 # Type: float
self._ambientHDR = [-1, -1, -1, 1] # Type: color255
self._AmbientScaleHDR = 1 # Type: float
self.SunSpreadAngle = None # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.pitch = parse_source_value(entity_data.get('pitch', 0)) # Type: integer
instance._light = parse_int_vector(entity_data.get('_light', "255 255 255 200")) # Type: color255
instance._ambient = parse_int_vector(entity_data.get('_ambient', "255 255 255 20")) # Type: color255
instance._lightHDR = parse_int_vector(entity_data.get('_lighthdr', "-1 -1 -1 1")) # Type: color255
instance._lightscaleHDR = float(entity_data.get('_lightscalehdr', 1)) # Type: float
instance._ambientHDR = parse_int_vector(entity_data.get('_ambienthdr', "-1 -1 -1 1")) # Type: color255
instance._AmbientScaleHDR = float(entity_data.get('_ambientscalehdr', 1)) # Type: float
instance.SunSpreadAngle = float(entity_data.get('sunspreadangle', 0)) # Type: float
class light_spot(Targetname, Light, Angles):
def __init__(self):
super(Targetname).__init__()
super(Light).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
self._inner_cone = 30 # Type: integer
self._cone = 45 # Type: integer
self._exponent = 1 # Type: integer
self._distance = None # Type: integer
self.pitch = -90 # Type: angle_negative_pitch
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Light.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
instance._inner_cone = parse_source_value(entity_data.get('_inner_cone', 30)) # Type: integer
instance._cone = parse_source_value(entity_data.get('_cone', 45)) # Type: integer
instance._exponent = parse_source_value(entity_data.get('_exponent', 1)) # Type: integer
instance._distance = parse_source_value(entity_data.get('_distance', 0)) # Type: integer
instance.pitch = float(entity_data.get('pitch', -90)) # Type: angle_negative_pitch
class light_dynamic(Targetname, Parentname, Angles):
icon_sprite = "editor/light.vmt"
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
self._light = [255, 255, 255, 200] # Type: color255
self.brightness = None # Type: integer
self._inner_cone = 30 # Type: integer
self._cone = 45 # Type: integer
self.pitch = -90 # Type: integer
self.distance = 120 # Type: float
self.spotlight_radius = 80 # Type: float
self.style = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
instance._light = parse_int_vector(entity_data.get('_light', "255 255 255 200")) # Type: color255
instance.brightness = parse_source_value(entity_data.get('brightness', 0)) # Type: integer
instance._inner_cone = parse_source_value(entity_data.get('_inner_cone', 30)) # Type: integer
instance._cone = parse_source_value(entity_data.get('_cone', 45)) # Type: integer
instance.pitch = parse_source_value(entity_data.get('pitch', -90)) # Type: integer
instance.distance = float(entity_data.get('distance', 120)) # Type: float
instance.spotlight_radius = float(entity_data.get('spotlight_radius', 80)) # Type: float
instance.style = entity_data.get('style', None) # Type: choices
class shadow_control(Targetname):
icon_sprite = "editor/shadow_control.vmt"
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.angles = "80 30 0" # Type: string
self.color = [128, 128, 128] # Type: color255
self.distance = 75 # Type: float
self.disableallshadows = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.angles = entity_data.get('angles', "80 30 0") # Type: string
instance.color = parse_int_vector(entity_data.get('color', "128 128 128")) # Type: color255
instance.distance = float(entity_data.get('distance', 75)) # Type: float
instance.disableallshadows = entity_data.get('disableallshadows', None) # Type: choices
class color_correction(Targetname, EnableDisable):
icon_sprite = "editor/color_correction.vmt"
def __init__(self):
super(Targetname).__init__()
super(EnableDisable).__init__()
self.origin = [0, 0, 0]
self.minfalloff = 0.0 # Type: float
self.maxfalloff = 200.0 # Type: float
self.maxweight = 1.0 # Type: float
self.filename = None # Type: string
self.fadeInDuration = 0.0 # Type: float
self.fadeOutDuration = 0.0 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.minfalloff = float(entity_data.get('minfalloff', 0.0)) # Type: float
instance.maxfalloff = float(entity_data.get('maxfalloff', 200.0)) # Type: float
instance.maxweight = float(entity_data.get('maxweight', 1.0)) # Type: float
instance.filename = entity_data.get('filename', None) # Type: string
instance.fadeInDuration = float(entity_data.get('fadeinduration', 0.0)) # Type: float
instance.fadeOutDuration = float(entity_data.get('fadeoutduration', 0.0)) # Type: float
class color_correction_volume(Targetname, EnableDisable):
def __init__(self):
super(Targetname).__init__()
super(EnableDisable).__init__()
self.fadeDuration = 10.0 # Type: float
self.maxweight = 1.0 # Type: float
self.filename = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
instance.fadeDuration = float(entity_data.get('fadeduration', 10.0)) # Type: float
instance.maxweight = float(entity_data.get('maxweight', 1.0)) # Type: float
instance.filename = entity_data.get('filename', None) # Type: string
class KeyFrame(Base):
def __init__(self):
super().__init__()
self.NextKey = None # Type: target_destination
self.MoveSpeed = 64 # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.NextKey = entity_data.get('nextkey', None) # Type: target_destination
instance.MoveSpeed = parse_source_value(entity_data.get('movespeed', 64)) # Type: integer
class Mover(Base):
def __init__(self):
super().__init__()
self.PositionInterpolator = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.PositionInterpolator = entity_data.get('positioninterpolator', None) # Type: choices
class func_movelinear(Targetname, Parentname, RenderFields, Origin):
def __init__(self):
super(RenderFields).__init__()
super(Targetname).__init__()
super(Parentname).__init__()
super(Origin).__init__()
self.movedir = [0.0, 0.0, 0.0] # Type: angle
self.startposition = None # Type: float
self.speed = 100 # Type: integer
self.movedistance = 100 # Type: float
self.blockdamage = None # Type: float
self.startsound = None # Type: sound
self.stopsound = None # Type: sound
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
Origin.from_dict(instance, entity_data)
instance.movedir = parse_float_vector(entity_data.get('movedir', "0 0 0")) # Type: angle
instance.startposition = float(entity_data.get('startposition', 0)) # Type: float
instance.speed = parse_source_value(entity_data.get('speed', 100)) # Type: integer
instance.movedistance = float(entity_data.get('movedistance', 100)) # Type: float
instance.blockdamage = float(entity_data.get('blockdamage', 0)) # Type: float
instance.startsound = entity_data.get('startsound', None) # Type: sound
instance.stopsound = entity_data.get('stopsound', None) # Type: sound
class func_water_analog(Targetname, Parentname, Origin):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(Origin).__init__()
self.movedir = [0.0, 0.0, 0.0] # Type: angle
self.startposition = None # Type: float
self.speed = 100 # Type: integer
self.movedistance = 100 # Type: float
self.startsound = None # Type: sound
self.stopsound = None # Type: sound
self.WaveHeight = "3.0" # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Origin.from_dict(instance, entity_data)
instance.movedir = parse_float_vector(entity_data.get('movedir', "0 0 0")) # Type: angle
instance.startposition = float(entity_data.get('startposition', 0)) # Type: float
instance.speed = parse_source_value(entity_data.get('speed', 100)) # Type: integer
instance.movedistance = float(entity_data.get('movedistance', 100)) # Type: float
instance.startsound = entity_data.get('startsound', None) # Type: sound
instance.stopsound = entity_data.get('stopsound', None) # Type: sound
instance.WaveHeight = entity_data.get('waveheight', "3.0") # Type: string
class func_rotating(Angles, Origin, Shadow, Parentname, RenderFields, Targetname):
def __init__(self):
super(RenderFields).__init__()
super(Angles).__init__()
super(Origin).__init__()
super(Shadow).__init__()
super(Parentname).__init__()
super(Targetname).__init__()
self.maxspeed = 100 # Type: integer
self.fanfriction = 20 # Type: integer
self.message = None # Type: sound
self.volume = 10 # Type: integer
self._minlight = None # Type: string
self.dmg = None # Type: integer
self.solidbsp = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Angles.from_dict(instance, entity_data)
Origin.from_dict(instance, entity_data)
Shadow.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.maxspeed = parse_source_value(entity_data.get('maxspeed', 100)) # Type: integer
instance.fanfriction = parse_source_value(entity_data.get('fanfriction', 20)) # Type: integer
instance.message = entity_data.get('message', None) # Type: sound
instance.volume = parse_source_value(entity_data.get('volume', 10)) # Type: integer
instance._minlight = entity_data.get('_minlight', None) # Type: string
instance.dmg = parse_source_value(entity_data.get('dmg', 0)) # Type: integer
instance.solidbsp = entity_data.get('solidbsp', None) # Type: choices
class func_platrot(Angles, Origin, Shadow, BasePlat, Parentname, RenderFields, Targetname):
def __init__(self):
super(RenderFields).__init__()
super(Angles).__init__()
super(Origin).__init__()
super(Shadow).__init__()
super(BasePlat).__init__()
super(Parentname).__init__()
super(Targetname).__init__()
self.noise1 = None # Type: sound
self.noise2 = None # Type: sound
self.speed = 50 # Type: integer
self.height = None # Type: integer
self.rotation = None # Type: integer
self._minlight = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Angles.from_dict(instance, entity_data)
Origin.from_dict(instance, entity_data)
Shadow.from_dict(instance, entity_data)
BasePlat.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.noise1 = entity_data.get('noise1', None) # Type: sound
instance.noise2 = entity_data.get('noise2', None) # Type: sound
instance.speed = parse_source_value(entity_data.get('speed', 50)) # Type: integer
instance.height = parse_source_value(entity_data.get('height', 0)) # Type: integer
instance.rotation = parse_source_value(entity_data.get('rotation', 0)) # Type: integer
instance._minlight = entity_data.get('_minlight', None) # Type: string
class keyframe_track(Targetname, KeyFrame, Parentname, Angles):
def __init__(self):
super(Targetname).__init__()
super(KeyFrame).__init__()
super(Parentname).__init__()
super(Angles).__init__()
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
KeyFrame.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
class move_keyframed(Targetname, KeyFrame, Parentname, Mover):
def __init__(self):
super(Targetname).__init__()
super(KeyFrame).__init__()
super(Parentname).__init__()
super(Mover).__init__()
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
KeyFrame.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Mover.from_dict(instance, entity_data)
class move_track(Targetname, KeyFrame, Parentname, Mover):
def __init__(self):
super(Targetname).__init__()
super(KeyFrame).__init__()
super(Parentname).__init__()
super(Mover).__init__()
self.WheelBaseLength = 50 # Type: integer
self.Damage = None # Type: integer
self.NoRotate = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
KeyFrame.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Mover.from_dict(instance, entity_data)
instance.WheelBaseLength = parse_source_value(entity_data.get('wheelbaselength', 50)) # Type: integer
instance.Damage = parse_source_value(entity_data.get('damage', 0)) # Type: integer
instance.NoRotate = entity_data.get('norotate', None) # Type: choices
class RopeKeyFrame(DXLevelChoice):
def __init__(self):
super(DXLevelChoice).__init__()
self.Slack = 25 # Type: integer
self.Type = None # Type: choices
self.Subdiv = 2 # Type: integer
self.Barbed = None # Type: choices
self.Width = "2" # Type: string
self.TextureScale = "1" # Type: string
self.Collide = None # Type: choices
self.Dangling = None # Type: choices
self.Breakable = None # Type: choices
self.RopeMaterial = "cable/cable.vmt" # Type: material
self.NoWind = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
DXLevelChoice.from_dict(instance, entity_data)
instance.Slack = parse_source_value(entity_data.get('slack', 25)) # Type: integer
instance.Type = entity_data.get('type', None) # Type: choices
instance.Subdiv = parse_source_value(entity_data.get('subdiv', 2)) # Type: integer
instance.Barbed = entity_data.get('barbed', None) # Type: choices
instance.Width = entity_data.get('width', "2") # Type: string
instance.TextureScale = entity_data.get('texturescale', "1") # Type: string
instance.Collide = entity_data.get('collide', None) # Type: choices
instance.Dangling = entity_data.get('dangling', None) # Type: choices
instance.Breakable = entity_data.get('breakable', None) # Type: choices
instance.RopeMaterial = entity_data.get('ropematerial', "cable/cable.vmt") # Type: material
instance.NoWind = entity_data.get('nowind', None) # Type: choices
class keyframe_rope(Targetname, KeyFrame, Parentname, RopeKeyFrame):
model = "models/editor/axis_helper_thick.mdl"
def __init__(self):
super(RopeKeyFrame).__init__()
super(Targetname).__init__()
super(KeyFrame).__init__()
super(Parentname).__init__()
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
KeyFrame.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
RopeKeyFrame.from_dict(instance, entity_data)
class move_rope(Targetname, KeyFrame, Parentname, RopeKeyFrame):
model = "models/editor/axis_helper.mdl"
def __init__(self):
super(RopeKeyFrame).__init__()
super(Targetname).__init__()
super(KeyFrame).__init__()
super(Parentname).__init__()
self.PositionInterpolator = "CHOICES NOT SUPPORTED" # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
KeyFrame.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
RopeKeyFrame.from_dict(instance, entity_data)
instance.PositionInterpolator = entity_data.get('positioninterpolator', "CHOICES NOT SUPPORTED") # Type: choices
class Button(Base):
def __init__(self):
super().__init__()
pass
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
class func_button(DamageFilter, Origin, Button, Parentname, RenderFields, Targetname):
def __init__(self):
super(RenderFields).__init__()
super(DamageFilter).__init__()
super(Origin).__init__()
super(Button).__init__()
super(Parentname).__init__()
super(Targetname).__init__()
self.movedir = [0.0, 0.0, 0.0] # Type: angle
self.speed = 5 # Type: integer
self.health = None # Type: integer
self.lip = None # Type: integer
self.master = None # Type: string
self.sounds = None # Type: choices
self.wait = 3 # Type: integer
self.locked_sound = None # Type: choices
self.unlocked_sound = None # Type: choices
self.locked_sentence = None # Type: choices
self.unlocked_sentence = None # Type: choices
self._minlight = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
DamageFilter.from_dict(instance, entity_data)
Origin.from_dict(instance, entity_data)
Button.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.movedir = parse_float_vector(entity_data.get('movedir', "0 0 0")) # Type: angle
instance.speed = parse_source_value(entity_data.get('speed', 5)) # Type: integer
instance.health = parse_source_value(entity_data.get('health', 0)) # Type: integer
instance.lip = parse_source_value(entity_data.get('lip', 0)) # Type: integer
instance.master = entity_data.get('master', None) # Type: string
instance.sounds = entity_data.get('sounds', None) # Type: choices
instance.wait = parse_source_value(entity_data.get('wait', 3)) # Type: integer
instance.locked_sound = entity_data.get('locked_sound', None) # Type: choices
instance.unlocked_sound = entity_data.get('unlocked_sound', None) # Type: choices
instance.locked_sentence = entity_data.get('locked_sentence', None) # Type: choices
instance.unlocked_sentence = entity_data.get('unlocked_sentence', None) # Type: choices
instance._minlight = entity_data.get('_minlight', None) # Type: string
class func_rot_button(Global, Angles, Origin, Button, EnableDisable, Parentname, Targetname):
def __init__(self):
super(Global).__init__()
super(Angles).__init__()
super(Origin).__init__()
super(Button).__init__()
super(EnableDisable).__init__()
super(Parentname).__init__()
super(Targetname).__init__()
self.master = None # Type: string
self.speed = 50 # Type: integer
self.health = None # Type: integer
self.sounds = "CHOICES NOT SUPPORTED" # Type: choices
self.wait = 3 # Type: integer
self.distance = 90 # Type: integer
self._minlight = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Global.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
Origin.from_dict(instance, entity_data)
Button.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.master = entity_data.get('master', None) # Type: string
instance.speed = parse_source_value(entity_data.get('speed', 50)) # Type: integer
instance.health = parse_source_value(entity_data.get('health', 0)) # Type: integer
instance.sounds = entity_data.get('sounds', "CHOICES NOT SUPPORTED") # Type: choices
instance.wait = parse_source_value(entity_data.get('wait', 3)) # Type: integer
instance.distance = parse_source_value(entity_data.get('distance', 90)) # Type: integer
instance._minlight = entity_data.get('_minlight', None) # Type: string
class momentary_rot_button(Angles, Origin, Parentname, RenderFields, Targetname):
def __init__(self):
super(RenderFields).__init__()
super(Angles).__init__()
super(Origin).__init__()
super(Parentname).__init__()
super(Targetname).__init__()
self.speed = 50 # Type: integer
self.master = None # Type: string
self.sounds = None # Type: choices
self.distance = 90 # Type: integer
self.returnspeed = None # Type: integer
self._minlight = None # Type: string
self.startposition = None # Type: float
self.startdirection = "CHOICES NOT SUPPORTED" # Type: choices
self.solidbsp = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Angles.from_dict(instance, entity_data)
Origin.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.speed = parse_source_value(entity_data.get('speed', 50)) # Type: integer
instance.master = entity_data.get('master', None) # Type: string
instance.sounds = entity_data.get('sounds', None) # Type: choices
instance.distance = parse_source_value(entity_data.get('distance', 90)) # Type: integer
instance.returnspeed = parse_source_value(entity_data.get('returnspeed', 0)) # Type: integer
instance._minlight = entity_data.get('_minlight', None) # Type: string
instance.startposition = float(entity_data.get('startposition', 0)) # Type: float
instance.startdirection = entity_data.get('startdirection', "CHOICES NOT SUPPORTED") # Type: choices
instance.solidbsp = entity_data.get('solidbsp', None) # Type: choices
class Door(Global, Shadow, Parentname, RenderFields, Targetname):
def __init__(self):
super(RenderFields).__init__()
super(Global).__init__()
super(Shadow).__init__()
super(Parentname).__init__()
super(Targetname).__init__()
self.speed = 100 # Type: integer
self.master = None # Type: string
self.noise1 = None # Type: sound
self.noise2 = None # Type: sound
self.startclosesound = None # Type: sound
self.closesound = None # Type: sound
self.wait = 4 # Type: integer
self.lip = None # Type: integer
self.dmg = None # Type: integer
self.forceclosed = None # Type: choices
self.ignoredebris = None # Type: choices
self.message = None # Type: string
self.health = None # Type: integer
self.locked_sound = None # Type: sound
self.unlocked_sound = None # Type: sound
self.spawnpos = None # Type: choices
self.locked_sentence = None # Type: choices
self.unlocked_sentence = None # Type: choices
self._minlight = None # Type: string
self.loopmovesound = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Global.from_dict(instance, entity_data)
Shadow.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.speed = parse_source_value(entity_data.get('speed', 100)) # Type: integer
instance.master = entity_data.get('master', None) # Type: string
instance.noise1 = entity_data.get('noise1', None) # Type: sound
instance.noise2 = entity_data.get('noise2', None) # Type: sound
instance.startclosesound = entity_data.get('startclosesound', None) # Type: sound
instance.closesound = entity_data.get('closesound', None) # Type: sound
instance.wait = parse_source_value(entity_data.get('wait', 4)) # Type: integer
instance.lip = parse_source_value(entity_data.get('lip', 0)) # Type: integer
instance.dmg = parse_source_value(entity_data.get('dmg', 0)) # Type: integer
instance.forceclosed = entity_data.get('forceclosed', None) # Type: choices
instance.ignoredebris = entity_data.get('ignoredebris', None) # Type: choices
instance.message = entity_data.get('message', None) # Type: string
instance.health = parse_source_value(entity_data.get('health', 0)) # Type: integer
instance.locked_sound = entity_data.get('locked_sound', None) # Type: sound
instance.unlocked_sound = entity_data.get('unlocked_sound', None) # Type: sound
instance.spawnpos = entity_data.get('spawnpos', None) # Type: choices
instance.locked_sentence = entity_data.get('locked_sentence', None) # Type: choices
instance.unlocked_sentence = entity_data.get('unlocked_sentence', None) # Type: choices
instance._minlight = entity_data.get('_minlight', None) # Type: string
instance.loopmovesound = entity_data.get('loopmovesound', None) # Type: choices
class func_door(Door, Origin):
def __init__(self):
super(Door).__init__()
super(Origin).__init__()
self.movedir = [0.0, 0.0, 0.0] # Type: angle
self.filtername = None # Type: filterclass
@staticmethod
def from_dict(instance, entity_data: dict):
Door.from_dict(instance, entity_data)
Origin.from_dict(instance, entity_data)
instance.movedir = parse_float_vector(entity_data.get('movedir', "0 0 0")) # Type: angle
instance.filtername = entity_data.get('filtername', None) # Type: filterclass
class func_door_rotating(Door, Origin, Angles):
def __init__(self):
super(Door).__init__()
super(Origin).__init__()
super(Angles).__init__()
self.distance = 90 # Type: integer
self.solidbsp = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Door.from_dict(instance, entity_data)
Origin.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.distance = parse_source_value(entity_data.get('distance', 90)) # Type: integer
instance.solidbsp = entity_data.get('solidbsp', None) # Type: choices
class prop_door_rotating(Global, Angles, Studiomodel, Parentname, Targetname):
def __init__(self):
super(Global).__init__()
super(Angles).__init__()
super(Studiomodel).__init__()
super(Parentname).__init__()
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.slavename = None # Type: target_destination
self.hardware = "CHOICES NOT SUPPORTED" # Type: choices
self.ajarangles = [0.0, 0.0, 0.0] # Type: angle
self.spawnpos = None # Type: choices
self.axis = None # Type: axis
self.distance = 90 # Type: float
self.speed = 100 # Type: integer
self.soundopenoverride = None # Type: sound
self.soundcloseoverride = None # Type: sound
self.soundmoveoverride = None # Type: sound
self.returndelay = -1 # Type: integer
self.dmg = None # Type: integer
self.health = None # Type: integer
self.soundlockedoverride = None # Type: sound
self.soundunlockedoverride = None # Type: sound
self.forceclosed = None # Type: choices
self.opendir = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Global.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
Studiomodel.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.slavename = entity_data.get('slavename', None) # Type: target_destination
instance.hardware = entity_data.get('hardware', "CHOICES NOT SUPPORTED") # Type: choices
instance.ajarangles = parse_float_vector(entity_data.get('ajarangles', "0 0 0")) # Type: angle
instance.spawnpos = entity_data.get('spawnpos', None) # Type: choices
instance.axis = entity_data.get('axis', None) # Type: axis
instance.distance = float(entity_data.get('distance', 90)) # Type: float
instance.speed = parse_source_value(entity_data.get('speed', 100)) # Type: integer
instance.soundopenoverride = entity_data.get('soundopenoverride', None) # Type: sound
instance.soundcloseoverride = entity_data.get('soundcloseoverride', None) # Type: sound
instance.soundmoveoverride = entity_data.get('soundmoveoverride', None) # Type: sound
instance.returndelay = parse_source_value(entity_data.get('returndelay', -1)) # Type: integer
instance.dmg = parse_source_value(entity_data.get('dmg', 0)) # Type: integer
instance.health = parse_source_value(entity_data.get('health', 0)) # Type: integer
instance.soundlockedoverride = entity_data.get('soundlockedoverride', None) # Type: sound
instance.soundunlockedoverride = entity_data.get('soundunlockedoverride', None) # Type: sound
instance.forceclosed = entity_data.get('forceclosed', None) # Type: choices
instance.opendir = entity_data.get('opendir', None) # Type: choices
class env_cubemap(Base):
icon_sprite = "editor/env_cubemap.vmt"
def __init__(self):
super().__init__()
self.origin = [0, 0, 0]
self.cubemapsize = None # Type: choices
self.sides = None # Type: sidelist
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.cubemapsize = entity_data.get('cubemapsize', None) # Type: choices
instance.sides = entity_data.get('sides', None) # Type: sidelist
class BModelParticleSpawner(Base):
def __init__(self):
super().__init__()
self.StartDisabled = None # Type: choices
self.Color = [255, 255, 255] # Type: color255
self.SpawnRate = 40 # Type: integer
self.SpeedMax = "13" # Type: string
self.LifetimeMin = "3" # Type: string
self.LifetimeMax = "5" # Type: string
self.DistMax = 1024 # Type: integer
self.Frozen = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.StartDisabled = entity_data.get('startdisabled', None) # Type: choices
instance.Color = parse_int_vector(entity_data.get('color', "255 255 255")) # Type: color255
instance.SpawnRate = parse_source_value(entity_data.get('spawnrate', 40)) # Type: integer
instance.SpeedMax = entity_data.get('speedmax', "13") # Type: string
instance.LifetimeMin = entity_data.get('lifetimemin', "3") # Type: string
instance.LifetimeMax = entity_data.get('lifetimemax', "5") # Type: string
instance.DistMax = parse_source_value(entity_data.get('distmax', 1024)) # Type: integer
instance.Frozen = entity_data.get('frozen', None) # Type: choices
class func_dustmotes(Targetname, BModelParticleSpawner):
def __init__(self):
super(Targetname).__init__()
super(BModelParticleSpawner).__init__()
self.SizeMin = "10" # Type: string
self.SizeMax = "20" # Type: string
self.Alpha = 255 # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
BModelParticleSpawner.from_dict(instance, entity_data)
instance.SizeMin = entity_data.get('sizemin', "10") # Type: string
instance.SizeMax = entity_data.get('sizemax', "20") # Type: string
instance.Alpha = parse_source_value(entity_data.get('alpha', 255)) # Type: integer
class func_smokevolume(Targetname):
def __init__(self):
super(Targetname).__init__()
self.Color1 = [255, 255, 255] # Type: color255
self.Color2 = [255, 255, 255] # Type: color255
self.material = "particle/particle_smokegrenade" # Type: material
self.ParticleDrawWidth = 120 # Type: float
self.ParticleSpacingDistance = 80 # Type: float
self.DensityRampSpeed = 1 # Type: float
self.RotationSpeed = 10 # Type: float
self.MovementSpeed = 10 # Type: float
self.Density = 1 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.Color1 = parse_int_vector(entity_data.get('color1', "255 255 255")) # Type: color255
instance.Color2 = parse_int_vector(entity_data.get('color2', "255 255 255")) # Type: color255
instance.material = entity_data.get('material', "particle/particle_smokegrenade") # Type: material
instance.ParticleDrawWidth = float(entity_data.get('particledrawwidth', 120)) # Type: float
instance.ParticleSpacingDistance = float(entity_data.get('particlespacingdistance', 80)) # Type: float
instance.DensityRampSpeed = float(entity_data.get('densityrampspeed', 1)) # Type: float
instance.RotationSpeed = float(entity_data.get('rotationspeed', 10)) # Type: float
instance.MovementSpeed = float(entity_data.get('movementspeed', 10)) # Type: float
instance.Density = float(entity_data.get('density', 1)) # Type: float
class func_dustcloud(Targetname, BModelParticleSpawner):
def __init__(self):
super(Targetname).__init__()
super(BModelParticleSpawner).__init__()
self.Alpha = 30 # Type: integer
self.SizeMin = "100" # Type: string
self.SizeMax = "200" # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
BModelParticleSpawner.from_dict(instance, entity_data)
instance.Alpha = parse_source_value(entity_data.get('alpha', 30)) # Type: integer
instance.SizeMin = entity_data.get('sizemin', "100") # Type: string
instance.SizeMax = entity_data.get('sizemax', "200") # Type: string
class env_dustpuff(Targetname, Parentname, Angles):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.scale = 8 # Type: float
self.speed = 16 # Type: float
self.color = [128, 128, 128] # Type: color255
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.scale = float(entity_data.get('scale', 8)) # Type: float
instance.speed = float(entity_data.get('speed', 16)) # Type: float
instance.color = parse_int_vector(entity_data.get('color', "128 128 128")) # Type: color255
class env_particlescript(Targetname, Parentname, Angles):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.model = "models/Ambient_citadel_paths.mdl" # Type: studio
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.model = entity_data.get('model', "models/Ambient_citadel_paths.mdl") # Type: studio
class env_effectscript(Targetname, Parentname, Angles):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.model = "models/Effects/teleporttrail.mdl" # Type: studio
self.scriptfile = "scripts/effects/testeffect.txt" # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.model = entity_data.get('model', "models/Effects/teleporttrail.mdl") # Type: studio
instance.scriptfile = entity_data.get('scriptfile', "scripts/effects/testeffect.txt") # Type: string
class logic_auto(Base):
icon_sprite = "editor/logic_auto.vmt"
def __init__(self):
super().__init__()
self.origin = [0, 0, 0]
self.globalstate = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.globalstate = entity_data.get('globalstate', None) # Type: choices
class point_viewcontrol(Targetname, Parentname, Angles):
viewport_model = "models/editor/camera.mdl"
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
self.targetattachment = None # Type: string
self.wait = 10 # Type: integer
self.moveto = None # Type: target_destination
self.interpolatepositiontoplayer = None # Type: choices
self.speed = "0" # Type: string
self.acceleration = "500" # Type: string
self.deceleration = "500" # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
instance.targetattachment = entity_data.get('targetattachment', None) # Type: string
instance.wait = parse_source_value(entity_data.get('wait', 10)) # Type: integer
instance.moveto = entity_data.get('moveto', None) # Type: target_destination
instance.interpolatepositiontoplayer = entity_data.get('interpolatepositiontoplayer', None) # Type: choices
instance.speed = entity_data.get('speed', "0") # Type: string
instance.acceleration = entity_data.get('acceleration', "500") # Type: string
instance.deceleration = entity_data.get('deceleration', "500") # Type: string
class point_posecontroller(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.PropName = None # Type: string
self.PoseParameterName = None # Type: string
self.PoseValue = 0.0 # Type: float
self.InterpolationTime = 0.0 # Type: float
self.InterpolationWrap = None # Type: choices
self.CycleFrequency = 0.0 # Type: float
self.FModulationType = None # Type: choices
self.FModTimeOffset = 0.0 # Type: float
self.FModRate = 0.0 # Type: float
self.FModAmplitude = 0.0 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.PropName = entity_data.get('propname', None) # Type: string
instance.PoseParameterName = entity_data.get('poseparametername', None) # Type: string
instance.PoseValue = float(entity_data.get('posevalue', 0.0)) # Type: float
instance.InterpolationTime = float(entity_data.get('interpolationtime', 0.0)) # Type: float
instance.InterpolationWrap = entity_data.get('interpolationwrap', None) # Type: choices
instance.CycleFrequency = float(entity_data.get('cyclefrequency', 0.0)) # Type: float
instance.FModulationType = entity_data.get('fmodulationtype', None) # Type: choices
instance.FModTimeOffset = float(entity_data.get('fmodtimeoffset', 0.0)) # Type: float
instance.FModRate = float(entity_data.get('fmodrate', 0.0)) # Type: float
instance.FModAmplitude = float(entity_data.get('fmodamplitude', 0.0)) # Type: float
class logic_compare(Targetname):
icon_sprite = "editor/logic_compare.vmt"
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.InitialValue = None # Type: integer
self.CompareValue = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.InitialValue = parse_source_value(entity_data.get('initialvalue', 0)) # Type: integer
instance.CompareValue = parse_source_value(entity_data.get('comparevalue', 0)) # Type: integer
class logic_branch(Targetname):
icon_sprite = "editor/logic_branch.vmt"
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.InitialValue = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.InitialValue = parse_source_value(entity_data.get('initialvalue', 0)) # Type: integer
class logic_branch_listener(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.Branch01 = None # Type: target_destination
self.Branch02 = None # Type: target_destination
self.Branch03 = None # Type: target_destination
self.Branch04 = None # Type: target_destination
self.Branch05 = None # Type: target_destination
self.Branch06 = None # Type: target_destination
self.Branch07 = None # Type: target_destination
self.Branch08 = None # Type: target_destination
self.Branch09 = None # Type: target_destination
self.Branch10 = None # Type: target_destination
self.Branch11 = None # Type: target_destination
self.Branch12 = None # Type: target_destination
self.Branch13 = None # Type: target_destination
self.Branch14 = None # Type: target_destination
self.Branch15 = None # Type: target_destination
self.Branch16 = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.Branch01 = entity_data.get('branch01', None) # Type: target_destination
instance.Branch02 = entity_data.get('branch02', None) # Type: target_destination
instance.Branch03 = entity_data.get('branch03', None) # Type: target_destination
instance.Branch04 = entity_data.get('branch04', None) # Type: target_destination
instance.Branch05 = entity_data.get('branch05', None) # Type: target_destination
instance.Branch06 = entity_data.get('branch06', None) # Type: target_destination
instance.Branch07 = entity_data.get('branch07', None) # Type: target_destination
instance.Branch08 = entity_data.get('branch08', None) # Type: target_destination
instance.Branch09 = entity_data.get('branch09', None) # Type: target_destination
instance.Branch10 = entity_data.get('branch10', None) # Type: target_destination
instance.Branch11 = entity_data.get('branch11', None) # Type: target_destination
instance.Branch12 = entity_data.get('branch12', None) # Type: target_destination
instance.Branch13 = entity_data.get('branch13', None) # Type: target_destination
instance.Branch14 = entity_data.get('branch14', None) # Type: target_destination
instance.Branch15 = entity_data.get('branch15', None) # Type: target_destination
instance.Branch16 = entity_data.get('branch16', None) # Type: target_destination
class logic_case(Targetname):
icon_sprite = "editor/logic_case.vmt"
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.Case01 = None # Type: string
self.Case02 = None # Type: string
self.Case03 = None # Type: string
self.Case04 = None # Type: string
self.Case05 = None # Type: string
self.Case06 = None # Type: string
self.Case07 = None # Type: string
self.Case08 = None # Type: string
self.Case09 = None # Type: string
self.Case10 = None # Type: string
self.Case11 = None # Type: string
self.Case12 = None # Type: string
self.Case13 = None # Type: string
self.Case14 = None # Type: string
self.Case15 = None # Type: string
self.Case16 = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.Case01 = entity_data.get('case01', None) # Type: string
instance.Case02 = entity_data.get('case02', None) # Type: string
instance.Case03 = entity_data.get('case03', None) # Type: string
instance.Case04 = entity_data.get('case04', None) # Type: string
instance.Case05 = entity_data.get('case05', None) # Type: string
instance.Case06 = entity_data.get('case06', None) # Type: string
instance.Case07 = entity_data.get('case07', None) # Type: string
instance.Case08 = entity_data.get('case08', None) # Type: string
instance.Case09 = entity_data.get('case09', None) # Type: string
instance.Case10 = entity_data.get('case10', None) # Type: string
instance.Case11 = entity_data.get('case11', None) # Type: string
instance.Case12 = entity_data.get('case12', None) # Type: string
instance.Case13 = entity_data.get('case13', None) # Type: string
instance.Case14 = entity_data.get('case14', None) # Type: string
instance.Case15 = entity_data.get('case15', None) # Type: string
instance.Case16 = entity_data.get('case16', None) # Type: string
class logic_multicompare(Targetname):
icon_sprite = "editor/logic_multicompare.vmt"
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.IntegerValue = None # Type: integer
self.ShouldComparetoValue = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.IntegerValue = parse_source_value(entity_data.get('integervalue', 0)) # Type: integer
instance.ShouldComparetoValue = entity_data.get('shouldcomparetovalue', None) # Type: choices
class logic_relay(Targetname, EnableDisable):
icon_sprite = "editor/logic_relay.vmt"
def __init__(self):
super(Targetname).__init__()
super(EnableDisable).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class logic_timer(Targetname, EnableDisable):
icon_sprite = "editor/logic_timer.vmt"
def __init__(self):
super(Targetname).__init__()
super(EnableDisable).__init__()
self.origin = [0, 0, 0]
self.UseRandomTime = None # Type: choices
self.LowerRandomBound = None # Type: string
self.UpperRandomBound = None # Type: string
self.RefireTime = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.UseRandomTime = entity_data.get('userandomtime', None) # Type: choices
instance.LowerRandomBound = entity_data.get('lowerrandombound', None) # Type: string
instance.UpperRandomBound = entity_data.get('upperrandombound', None) # Type: string
instance.RefireTime = entity_data.get('refiretime', None) # Type: string
class hammer_updateignorelist(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.IgnoredName01 = None # Type: target_destination
self.IgnoredName02 = None # Type: target_destination
self.IgnoredName03 = None # Type: target_destination
self.IgnoredName04 = None # Type: target_destination
self.IgnoredName05 = None # Type: target_destination
self.IgnoredName06 = None # Type: target_destination
self.IgnoredName07 = None # Type: target_destination
self.IgnoredName08 = None # Type: target_destination
self.IgnoredName09 = None # Type: target_destination
self.IgnoredName10 = None # Type: target_destination
self.IgnoredName11 = None # Type: target_destination
self.IgnoredName12 = None # Type: target_destination
self.IgnoredName13 = None # Type: target_destination
self.IgnoredName14 = None # Type: target_destination
self.IgnoredName15 = None # Type: target_destination
self.IgnoredName16 = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.IgnoredName01 = entity_data.get('ignoredname01', None) # Type: target_destination
instance.IgnoredName02 = entity_data.get('ignoredname02', None) # Type: target_destination
instance.IgnoredName03 = entity_data.get('ignoredname03', None) # Type: target_destination
instance.IgnoredName04 = entity_data.get('ignoredname04', None) # Type: target_destination
instance.IgnoredName05 = entity_data.get('ignoredname05', None) # Type: target_destination
instance.IgnoredName06 = entity_data.get('ignoredname06', None) # Type: target_destination
instance.IgnoredName07 = entity_data.get('ignoredname07', None) # Type: target_destination
instance.IgnoredName08 = entity_data.get('ignoredname08', None) # Type: target_destination
instance.IgnoredName09 = entity_data.get('ignoredname09', None) # Type: target_destination
instance.IgnoredName10 = entity_data.get('ignoredname10', None) # Type: target_destination
instance.IgnoredName11 = entity_data.get('ignoredname11', None) # Type: target_destination
instance.IgnoredName12 = entity_data.get('ignoredname12', None) # Type: target_destination
instance.IgnoredName13 = entity_data.get('ignoredname13', None) # Type: target_destination
instance.IgnoredName14 = entity_data.get('ignoredname14', None) # Type: target_destination
instance.IgnoredName15 = entity_data.get('ignoredname15', None) # Type: target_destination
instance.IgnoredName16 = entity_data.get('ignoredname16', None) # Type: target_destination
class logic_collision_pair(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.attach1 = None # Type: target_destination
self.attach2 = None # Type: target_destination
self.startdisabled = "CHOICES NOT SUPPORTED" # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.attach1 = entity_data.get('attach1', None) # Type: target_destination
instance.attach2 = entity_data.get('attach2', None) # Type: target_destination
instance.startdisabled = entity_data.get('startdisabled', "CHOICES NOT SUPPORTED") # Type: choices
class env_microphone(Targetname, Parentname, EnableDisable):
icon_sprite = "editor/env_microphone.vmt"
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(EnableDisable).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
self.SpeakerName = None # Type: target_destination
self.ListenFilter = None # Type: filterclass
self.speaker_dsp_preset = None # Type: choices
self.Sensitivity = 1 # Type: float
self.SmoothFactor = None # Type: float
self.MaxRange = 240 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
instance.SpeakerName = entity_data.get('speakername', None) # Type: target_destination
instance.ListenFilter = entity_data.get('listenfilter', None) # Type: filterclass
instance.speaker_dsp_preset = entity_data.get('speaker_dsp_preset', None) # Type: choices
instance.Sensitivity = float(entity_data.get('sensitivity', 1)) # Type: float
instance.SmoothFactor = float(entity_data.get('smoothfactor', 0)) # Type: float
instance.MaxRange = float(entity_data.get('maxrange', 240)) # Type: float
class math_remap(Targetname, EnableDisable):
def __init__(self):
super(Targetname).__init__()
super(EnableDisable).__init__()
self.origin = [0, 0, 0]
self.in1 = None # Type: integer
self.in2 = 1 # Type: integer
self.out1 = None # Type: integer
self.out2 = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.in1 = parse_source_value(entity_data.get('in1', 0)) # Type: integer
instance.in2 = parse_source_value(entity_data.get('in2', 1)) # Type: integer
instance.out1 = parse_source_value(entity_data.get('out1', 0)) # Type: integer
instance.out2 = parse_source_value(entity_data.get('out2', 0)) # Type: integer
class math_colorblend(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.inmin = None # Type: integer
self.inmax = 1 # Type: integer
self.colormin = [0, 0, 0] # Type: color255
self.colormax = [255, 255, 255] # Type: color255
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.inmin = parse_source_value(entity_data.get('inmin', 0)) # Type: integer
instance.inmax = parse_source_value(entity_data.get('inmax', 1)) # Type: integer
instance.colormin = parse_int_vector(entity_data.get('colormin', "0 0 0")) # Type: color255
instance.colormax = parse_int_vector(entity_data.get('colormax', "255 255 255")) # Type: color255
class math_counter(Targetname, EnableDisable):
icon_sprite = "editor/math_counter.vmt"
def __init__(self):
super(Targetname).__init__()
super(EnableDisable).__init__()
self.origin = [0, 0, 0]
self.startvalue = None # Type: integer
self.min = None # Type: integer
self.max = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.startvalue = parse_source_value(entity_data.get('startvalue', 0)) # Type: integer
instance.min = parse_source_value(entity_data.get('min', 0)) # Type: integer
instance.max = parse_source_value(entity_data.get('max', 0)) # Type: integer
class logic_lineto(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.source = None # Type: target_destination
self.target = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.source = entity_data.get('source', None) # Type: target_destination
instance.target = entity_data.get('target', None) # Type: target_destination
class logic_navigation(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.target = None # Set to none due to bug in BlackMesa base.fgd file # Type: target_destination
self.navprop = "CHOICES NOT SUPPORTED" # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Set to none due to bug in BlackMesa base.fgd file # Type: target_destination
instance.navprop = entity_data.get('navprop', "CHOICES NOT SUPPORTED") # Type: choices
class logic_autosave(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.NewLevelUnit = None # Type: choices
self.MinimumHitPoints = None # Type: integer
self.MinHitPointsToCommit = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.NewLevelUnit = entity_data.get('newlevelunit', None) # Type: choices
instance.MinimumHitPoints = parse_source_value(entity_data.get('minimumhitpoints', 0)) # Type: integer
instance.MinHitPointsToCommit = parse_source_value(entity_data.get('minhitpointstocommit', 0)) # Type: integer
class logic_active_autosave(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.MinimumHitPoints = 30 # Type: integer
self.TriggerHitPoints = 75 # Type: integer
self.TimeToTrigget = None # Type: float
self.DangerousTime = 10 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.MinimumHitPoints = parse_source_value(entity_data.get('minimumhitpoints', 30)) # Type: integer
instance.TriggerHitPoints = parse_source_value(entity_data.get('triggerhitpoints', 75)) # Type: integer
instance.TimeToTrigget = float(entity_data.get('timetotrigget', 0)) # Type: float
instance.DangerousTime = float(entity_data.get('dangeroustime', 10)) # Type: float
class point_template(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.Template01 = None # Type: target_destination
self.Template02 = None # Type: target_destination
self.Template03 = None # Type: target_destination
self.Template04 = None # Type: target_destination
self.Template05 = None # Type: target_destination
self.Template06 = None # Type: target_destination
self.Template07 = None # Type: target_destination
self.Template08 = None # Type: target_destination
self.Template09 = None # Type: target_destination
self.Template10 = None # Type: target_destination
self.Template11 = None # Type: target_destination
self.Template12 = None # Type: target_destination
self.Template13 = None # Type: target_destination
self.Template14 = None # Type: target_destination
self.Template15 = None # Type: target_destination
self.Template16 = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.Template01 = entity_data.get('template01', None) # Type: target_destination
instance.Template02 = entity_data.get('template02', None) # Type: target_destination
instance.Template03 = entity_data.get('template03', None) # Type: target_destination
instance.Template04 = entity_data.get('template04', None) # Type: target_destination
instance.Template05 = entity_data.get('template05', None) # Type: target_destination
instance.Template06 = entity_data.get('template06', None) # Type: target_destination
instance.Template07 = entity_data.get('template07', None) # Type: target_destination
instance.Template08 = entity_data.get('template08', None) # Type: target_destination
instance.Template09 = entity_data.get('template09', None) # Type: target_destination
instance.Template10 = entity_data.get('template10', None) # Type: target_destination
instance.Template11 = entity_data.get('template11', None) # Type: target_destination
instance.Template12 = entity_data.get('template12', None) # Type: target_destination
instance.Template13 = entity_data.get('template13', None) # Type: target_destination
instance.Template14 = entity_data.get('template14', None) # Type: target_destination
instance.Template15 = entity_data.get('template15', None) # Type: target_destination
instance.Template16 = entity_data.get('template16', None) # Type: target_destination
class env_entity_maker(Targetname, Parentname, Angles):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.EntityTemplate = None # Type: target_destination
self.PostSpawnSpeed = 0 # Type: float
self.PostSpawnDirection = [0.0, 0.0, 0.0] # Type: angle
self.PostSpawnDirectionVariance = 0.15 # Type: float
self.PostSpawnInheritAngles = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.EntityTemplate = entity_data.get('entitytemplate', None) # Type: target_destination
instance.PostSpawnSpeed = float(entity_data.get('postspawnspeed', 0)) # Type: float
instance.PostSpawnDirection = parse_float_vector(entity_data.get('postspawndirection', "0 0 0")) # Type: angle
instance.PostSpawnDirectionVariance = float(entity_data.get('postspawndirectionvariance', 0.15)) # Type: float
instance.PostSpawnInheritAngles = entity_data.get('postspawninheritangles', None) # Type: choices
class BaseFilter(Targetname):
def __init__(self):
super(Targetname).__init__()
self.Negated = "CHOICES NOT SUPPORTED" # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.Negated = entity_data.get('negated', "CHOICES NOT SUPPORTED") # Type: choices
class filter_multi(BaseFilter):
icon_sprite = "editor/filter_multiple.vmt"
def __init__(self):
super(BaseFilter).__init__()
self.filtertype = None # Type: choices
self.Filter01 = None # Type: filterclass
self.Filter02 = None # Type: filterclass
self.Filter03 = None # Type: filterclass
self.Filter04 = None # Type: filterclass
self.Filter05 = None # Type: filterclass
@staticmethod
def from_dict(instance, entity_data: dict):
BaseFilter.from_dict(instance, entity_data)
instance.filtertype = entity_data.get('filtertype', None) # Type: choices
instance.Filter01 = entity_data.get('filter01', None) # Type: filterclass
instance.Filter02 = entity_data.get('filter02', None) # Type: filterclass
instance.Filter03 = entity_data.get('filter03', None) # Type: filterclass
instance.Filter04 = entity_data.get('filter04', None) # Type: filterclass
instance.Filter05 = entity_data.get('filter05', None) # Type: filterclass
class filter_activator_name(BaseFilter):
icon_sprite = "editor/filter_name.vmt"
def __init__(self):
super(BaseFilter).__init__()
self.filtername = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
BaseFilter.from_dict(instance, entity_data)
instance.filtername = entity_data.get('filtername', None) # Type: target_destination
class filter_activator_class(BaseFilter):
icon_sprite = "editor/filter_class.vmt"
def __init__(self):
super(BaseFilter).__init__()
self.filterclass = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
BaseFilter.from_dict(instance, entity_data)
instance.filterclass = entity_data.get('filterclass', None) # Type: string
class filter_activator_mass_greater(BaseFilter):
icon_sprite = "editor/filter_class.vmt"
def __init__(self):
super(BaseFilter).__init__()
self.filtermass = None # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
BaseFilter.from_dict(instance, entity_data)
instance.filtermass = float(entity_data.get('filtermass', 0)) # Type: float
class filter_damage_type(BaseFilter):
def __init__(self):
super(BaseFilter).__init__()
self.damagetype = "CHOICES NOT SUPPORTED" # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
BaseFilter.from_dict(instance, entity_data)
instance.damagetype = entity_data.get('damagetype', "CHOICES NOT SUPPORTED") # Type: choices
class filter_enemy(BaseFilter):
icon_sprite = "editor/filter_class.vmt"
def __init__(self):
super(BaseFilter).__init__()
self.filtername = None # Type: string
self.filter_radius = None # Type: float
self.filter_outer_radius = None # Type: float
self.filter_max_per_enemy = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
BaseFilter.from_dict(instance, entity_data)
instance.filtername = entity_data.get('filtername', None) # Type: string
instance.filter_radius = float(entity_data.get('filter_radius', 0)) # Type: float
instance.filter_outer_radius = float(entity_data.get('filter_outer_radius', 0)) # Type: float
instance.filter_max_per_enemy = parse_source_value(entity_data.get('filter_max_per_enemy', 0)) # Type: integer
class point_anglesensor(Targetname, Parentname, EnableDisable):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(EnableDisable).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
self.lookatname = None # Type: target_destination
self.duration = None # Type: float
self.tolerance = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
instance.lookatname = entity_data.get('lookatname', None) # Type: target_destination
instance.duration = float(entity_data.get('duration', 0)) # Type: float
instance.tolerance = parse_source_value(entity_data.get('tolerance', 0)) # Type: integer
class point_angularvelocitysensor(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
self.threshold = None # Type: float
self.fireinterval = 0.2 # Type: float
self.axis = None # Type: vecline
self.usehelper = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
instance.threshold = float(entity_data.get('threshold', 0)) # Type: float
instance.fireinterval = float(entity_data.get('fireinterval', 0.2)) # Type: float
instance.axis = entity_data.get('axis', None) # Type: vecline
instance.usehelper = entity_data.get('usehelper', None) # Type: choices
class point_velocitysensor(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
self.axis = None # Type: vecline
self.enabled = "CHOICES NOT SUPPORTED" # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
instance.axis = entity_data.get('axis', None) # Type: vecline
instance.enabled = entity_data.get('enabled', "CHOICES NOT SUPPORTED") # Type: choices
class point_proximity_sensor(Targetname, Parentname, EnableDisable, Angles):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(EnableDisable).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
class point_teleport(Targetname, Angles):
def __init__(self):
super(Targetname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
class point_hurt(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.DamageTarget = None # Type: string
self.DamageRadius = 256 # Type: float
self.Damage = 5 # Type: integer
self.DamageDelay = 1 # Type: float
self.DamageType = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.DamageTarget = entity_data.get('damagetarget', None) # Type: string
instance.DamageRadius = float(entity_data.get('damageradius', 256)) # Type: float
instance.Damage = parse_source_value(entity_data.get('damage', 5)) # Type: integer
instance.DamageDelay = float(entity_data.get('damagedelay', 1)) # Type: float
instance.DamageType = entity_data.get('damagetype', None) # Type: choices
class point_playermoveconstraint(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.radius = 256 # Type: float
self.width = 75.0 # Type: float
self.speedfactor = 0.15 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.radius = float(entity_data.get('radius', 256)) # Type: float
instance.width = float(entity_data.get('width', 75.0)) # Type: float
instance.speedfactor = float(entity_data.get('speedfactor', 0.15)) # Type: float
class func_physbox(Origin, RenderFields, BreakableBrush):
def __init__(self):
super(BreakableBrush).__init__()
super(RenderFields).__init__()
super(Shadow).__init__()
super(Origin).__init__()
super(Targetname).__init__()
self._minlight = None # Type: string
self.Damagetype = None # Type: choices
self.massScale = 0 # Type: float
self.overridescript = None # Type: string
self.damagetoenablemotion = None # Type: integer
self.forcetoenablemotion = None # Type: float
self.preferredcarryangles = [0.0, 0.0, 0.0] # Type: vector
self.notsolid = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Shadow.from_dict(instance, entity_data)
Origin.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
BreakableBrush.from_dict(instance, entity_data)
instance._minlight = entity_data.get('_minlight', None) # Type: string
instance.Damagetype = entity_data.get('damagetype', None) # Type: choices
instance.massScale = float(entity_data.get('massscale', 0)) # Type: float
instance.overridescript = entity_data.get('overridescript', None) # Type: string
instance.damagetoenablemotion = parse_source_value(entity_data.get('damagetoenablemotion', 0)) # Type: integer
instance.forcetoenablemotion = float(entity_data.get('forcetoenablemotion', 0)) # Type: float
instance.preferredcarryangles = parse_float_vector(entity_data.get('preferredcarryangles', "0 0 0")) # Type: vector
instance.notsolid = entity_data.get('notsolid', None) # Type: choices
class TwoObjectPhysics(Targetname):
def __init__(self):
super(Targetname).__init__()
self.attach1 = None # Type: target_destination
self.attach2 = None # Type: target_destination
self.constraintsystem = None # Type: target_destination
self.forcelimit = 0 # Type: float
self.torquelimit = 0 # Type: float
self.breaksound = None # Type: sound
self.teleportfollowdistance = 0 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.attach1 = entity_data.get('attach1', None) # Type: target_destination
instance.attach2 = entity_data.get('attach2', None) # Type: target_destination
instance.constraintsystem = entity_data.get('constraintsystem', None) # Type: target_destination
instance.forcelimit = float(entity_data.get('forcelimit', 0)) # Type: float
instance.torquelimit = float(entity_data.get('torquelimit', 0)) # Type: float
instance.breaksound = entity_data.get('breaksound', None) # Type: sound
instance.teleportfollowdistance = float(entity_data.get('teleportfollowdistance', 0)) # Type: float
class phys_constraintsystem(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.additionaliterations = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.additionaliterations = parse_source_value(entity_data.get('additionaliterations', 0)) # Type: integer
class phys_keepupright(Targetname, Angles):
def __init__(self):
super(Targetname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.attach1 = None # Type: target_destination
self.angularlimit = 15 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.attach1 = entity_data.get('attach1', None) # Type: target_destination
instance.angularlimit = float(entity_data.get('angularlimit', 15)) # Type: float
class physics_cannister(Targetname, Angles):
def __init__(self):
super(Targetname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.model = "models/fire_equipment/w_weldtank.mdl" # Type: studio
self.expdamage = "200.0" # Type: string
self.expradius = "250.0" # Type: string
self.health = 25 # Type: integer
self.thrust = "3000.0" # Type: string
self.fuel = "12.0" # Type: string
self.rendercolor = [255, 255, 255] # Type: color255
self.renderamt = 128 # Type: integer
self.gassound = "ambient/objects/cannister_loop.wav" # Type: sound
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.model = entity_data.get('model', "models/fire_equipment/w_weldtank.mdl") # Type: studio
instance.expdamage = entity_data.get('expdamage', "200.0") # Type: string
instance.expradius = entity_data.get('expradius', "250.0") # Type: string
instance.health = parse_source_value(entity_data.get('health', 25)) # Type: integer
instance.thrust = entity_data.get('thrust', "3000.0") # Type: string
instance.fuel = entity_data.get('fuel', "12.0") # Type: string
instance.rendercolor = parse_int_vector(entity_data.get('rendercolor', "255 255 255")) # Type: color255
instance.renderamt = parse_source_value(entity_data.get('renderamt', 128)) # Type: integer
instance.gassound = entity_data.get('gassound', "ambient/objects/cannister_loop.wav") # Type: sound
class info_constraint_anchor(Targetname, Parentname):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.massScale = 1 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.massScale = float(entity_data.get('massscale', 1)) # Type: float
class info_mass_center(Base):
def __init__(self):
super().__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
class phys_spring(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.attach1 = None # Type: target_destination
self.attach2 = None # Type: target_destination
self.springaxis = None # Type: vecline
self.length = "0" # Type: string
self.constant = "50" # Type: string
self.damping = "2.0" # Type: string
self.relativedamping = "0.1" # Type: string
self.breaklength = "0" # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.attach1 = entity_data.get('attach1', None) # Type: target_destination
instance.attach2 = entity_data.get('attach2', None) # Type: target_destination
instance.springaxis = entity_data.get('springaxis', None) # Type: vecline
instance.length = entity_data.get('length', "0") # Type: string
instance.constant = entity_data.get('constant', "50") # Type: string
instance.damping = entity_data.get('damping', "2.0") # Type: string
instance.relativedamping = entity_data.get('relativedamping', "0.1") # Type: string
instance.breaklength = entity_data.get('breaklength', "0") # Type: string
class phys_hinge(TwoObjectPhysics):
def __init__(self):
super(TwoObjectPhysics).__init__()
self.origin = [0, 0, 0]
self.hingefriction = 0 # Type: float
self.hingeaxis = None # Type: vecline
self.SystemLoadScale = 1 # Type: float
self.minSoundThreshold = 6 # Type: float
self.maxSoundThreshold = 80 # Type: float
self.slidesoundfwd = None # Type: sound
self.slidesoundback = None # Type: sound
self.reversalsoundthresholdSmall = 0 # Type: float
self.reversalsoundthresholdMedium = 0 # Type: float
self.reversalsoundthresholdLarge = 0 # Type: float
self.reversalsoundSmall = None # Type: sound
self.reversalsoundMedium = None # Type: sound
self.reversalsoundLarge = None # Type: sound
@staticmethod
def from_dict(instance, entity_data: dict):
TwoObjectPhysics.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.hingefriction = float(entity_data.get('hingefriction', 0)) # Type: float
instance.hingeaxis = entity_data.get('hingeaxis', None) # Type: vecline
instance.SystemLoadScale = float(entity_data.get('systemloadscale', 1)) # Type: float
instance.minSoundThreshold = float(entity_data.get('minsoundthreshold', 6)) # Type: float
instance.maxSoundThreshold = float(entity_data.get('maxsoundthreshold', 80)) # Type: float
instance.slidesoundfwd = entity_data.get('slidesoundfwd', None) # Type: sound
instance.slidesoundback = entity_data.get('slidesoundback', None) # Type: sound
instance.reversalsoundthresholdSmall = float(entity_data.get('reversalsoundthresholdsmall', 0)) # Type: float
instance.reversalsoundthresholdMedium = float(entity_data.get('reversalsoundthresholdmedium', 0)) # Type: float
instance.reversalsoundthresholdLarge = float(entity_data.get('reversalsoundthresholdlarge', 0)) # Type: float
instance.reversalsoundSmall = entity_data.get('reversalsoundsmall', None) # Type: sound
instance.reversalsoundMedium = entity_data.get('reversalsoundmedium', None) # Type: sound
instance.reversalsoundLarge = entity_data.get('reversalsoundlarge', None) # Type: sound
class phys_ballsocket(TwoObjectPhysics):
icon_sprite = "editor/phys_ballsocket.vmt"
def __init__(self):
super(TwoObjectPhysics).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
TwoObjectPhysics.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class phys_constraint(TwoObjectPhysics):
model = "models/editor/axis_helper.mdl"
def __init__(self):
super(TwoObjectPhysics).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
TwoObjectPhysics.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class phys_pulleyconstraint(TwoObjectPhysics):
model = "models/editor/axis_helper.mdl"
def __init__(self):
super(TwoObjectPhysics).__init__()
self.origin = [0, 0, 0]
self.addlength = 0 # Type: float
self.gearratio = 1 # Type: float
self.position2 = None # Type: vecline
@staticmethod
def from_dict(instance, entity_data: dict):
TwoObjectPhysics.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.addlength = float(entity_data.get('addlength', 0)) # Type: float
instance.gearratio = float(entity_data.get('gearratio', 1)) # Type: float
instance.position2 = entity_data.get('position2', None) # Type: vecline
class phys_slideconstraint(TwoObjectPhysics):
model = "models/editor/axis_helper.mdl"
def __init__(self):
super(TwoObjectPhysics).__init__()
self.origin = [0, 0, 0]
self.slideaxis = None # Type: vecline
self.slidefriction = 0 # Type: float
self.SystemLoadScale = 1 # Type: float
self.minSoundThreshold = 6 # Type: float
self.maxSoundThreshold = 80 # Type: float
self.slidesoundfwd = None # Type: sound
self.slidesoundback = None # Type: sound
self.reversalsoundthresholdSmall = 0 # Type: float
self.reversalsoundthresholdMedium = 0 # Type: float
self.reversalsoundthresholdLarge = 0 # Type: float
self.reversalsoundSmall = None # Type: sound
self.reversalsoundMedium = None # Type: sound
self.reversalsoundLarge = None # Type: sound
@staticmethod
def from_dict(instance, entity_data: dict):
TwoObjectPhysics.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.slideaxis = entity_data.get('slideaxis', None) # Type: vecline
instance.slidefriction = float(entity_data.get('slidefriction', 0)) # Type: float
instance.SystemLoadScale = float(entity_data.get('systemloadscale', 1)) # Type: float
instance.minSoundThreshold = float(entity_data.get('minsoundthreshold', 6)) # Type: float
instance.maxSoundThreshold = float(entity_data.get('maxsoundthreshold', 80)) # Type: float
instance.slidesoundfwd = entity_data.get('slidesoundfwd', None) # Type: sound
instance.slidesoundback = entity_data.get('slidesoundback', None) # Type: sound
instance.reversalsoundthresholdSmall = float(entity_data.get('reversalsoundthresholdsmall', 0)) # Type: float
instance.reversalsoundthresholdMedium = float(entity_data.get('reversalsoundthresholdmedium', 0)) # Type: float
instance.reversalsoundthresholdLarge = float(entity_data.get('reversalsoundthresholdlarge', 0)) # Type: float
instance.reversalsoundSmall = entity_data.get('reversalsoundsmall', None) # Type: sound
instance.reversalsoundMedium = entity_data.get('reversalsoundmedium', None) # Type: sound
instance.reversalsoundLarge = entity_data.get('reversalsoundlarge', None) # Type: sound
class phys_lengthconstraint(TwoObjectPhysics):
model = "models/editor/axis_helper.mdl"
def __init__(self):
super(TwoObjectPhysics).__init__()
self.origin = [0, 0, 0]
self.addlength = 0 # Type: float
self.minlength = 0 # Type: float
self.attachpoint = None # Set to none due to bug in BlackMesa base.fgd file # Type: vecline
@staticmethod
def from_dict(instance, entity_data: dict):
TwoObjectPhysics.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.addlength = float(entity_data.get('addlength', 0)) # Type: float
instance.minlength = float(entity_data.get('minlength', 0)) # Type: float
instance.attachpoint = entity_data.get('attachpoint', None) # Set to none due to bug in BlackMesa base.fgd file # Type: vecline
class phys_ragdollconstraint(TwoObjectPhysics):
model = "models/editor/axis_helper.mdl"
def __init__(self):
super(TwoObjectPhysics).__init__()
self.origin = [0, 0, 0]
self.xmin = -90 # Type: float
self.xmax = 90 # Type: float
self.ymin = 0 # Type: float
self.ymax = 0 # Type: float
self.zmin = 0 # Type: float
self.zmax = 0 # Type: float
self.xfriction = 0 # Type: float
self.yfriction = 0 # Type: float
self.zfriction = 0 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
TwoObjectPhysics.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.xmin = float(entity_data.get('xmin', -90)) # Type: float
instance.xmax = float(entity_data.get('xmax', 90)) # Type: float
instance.ymin = float(entity_data.get('ymin', 0)) # Type: float
instance.ymax = float(entity_data.get('ymax', 0)) # Type: float
instance.zmin = float(entity_data.get('zmin', 0)) # Type: float
instance.zmax = float(entity_data.get('zmax', 0)) # Type: float
instance.xfriction = float(entity_data.get('xfriction', 0)) # Type: float
instance.yfriction = float(entity_data.get('yfriction', 0)) # Type: float
instance.zfriction = float(entity_data.get('zfriction', 0)) # Type: float
class phys_convert(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
self.swapmodel = None # Type: string
self.massoverride = 0 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
instance.swapmodel = entity_data.get('swapmodel', None) # Type: string
instance.massoverride = float(entity_data.get('massoverride', 0)) # Type: float
class ForceController(Targetname):
def __init__(self):
super(Targetname).__init__()
self.attach1 = None # Type: target_destination
self.forcetime = "0" # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.attach1 = entity_data.get('attach1', None) # Type: target_destination
instance.forcetime = entity_data.get('forcetime', "0") # Type: string
class phys_thruster(Angles, ForceController):
def __init__(self):
super(ForceController).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.force = "0" # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Angles.from_dict(instance, entity_data)
ForceController.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.force = entity_data.get('force', "0") # Type: string
class phys_torque(ForceController):
def __init__(self):
super(ForceController).__init__()
self.origin = [0, 0, 0]
self.force = "0" # Type: string
self.axis = None # Type: vecline
@staticmethod
def from_dict(instance, entity_data: dict):
ForceController.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.force = entity_data.get('force', "0") # Type: string
instance.axis = entity_data.get('axis', None) # Type: vecline
class phys_motor(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.speed = "0" # Type: string
self.spinup = "1" # Type: string
self.inertiafactor = 1.0 # Type: float
self.axis = None # Type: vecline
self.attach1 = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.speed = entity_data.get('speed', "0") # Type: string
instance.spinup = entity_data.get('spinup', "1") # Type: string
instance.inertiafactor = float(entity_data.get('inertiafactor', 1.0)) # Type: float
instance.axis = entity_data.get('axis', None) # Type: vecline
instance.attach1 = entity_data.get('attach1', None) # Type: target_destination
class phys_magnet(Targetname, Parentname, Studiomodel, Angles):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(Studiomodel).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.forcelimit = 0 # Type: float
self.torquelimit = 0 # Type: float
self.massScale = 0 # Type: float
self.overridescript = None # Type: string
self.maxobjects = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Studiomodel.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.forcelimit = float(entity_data.get('forcelimit', 0)) # Type: float
instance.torquelimit = float(entity_data.get('torquelimit', 0)) # Type: float
instance.massScale = float(entity_data.get('massscale', 0)) # Type: float
instance.overridescript = entity_data.get('overridescript', None) # Type: string
instance.maxobjects = parse_source_value(entity_data.get('maxobjects', 0)) # Type: integer
class prop_detail_base(Base):
def __init__(self):
super().__init__()
self.model = None # Type: studio
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.model = entity_data.get('model', None) # Type: studio
class prop_static_base(Angles, DXLevelChoice):
def __init__(self):
super(Angles).__init__()
super(DXLevelChoice).__init__()
self.model = None # Type: studio
self.skin = None # Type: integer
self.solid = "CHOICES NOT SUPPORTED" # Type: choices
self.disableshadows = None # Type: choices
self.screenspacefade = None # Type: choices
self.fademindist = -1 # Type: float
self.fademaxdist = None # Type: float
self.fadescale = 1 # Type: float
self.lightingorigin = None # Type: target_destination
self.disablevertexlighting = None # Type: choices
self.disableselfshadowing = None # Type: choices
self.ignorenormals = None # Type: choices
self.generatelightmaps = None # Type: choices
self.lightmapresolutionx = 32 # Type: integer
self.lightmapresolutiony = 32 # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Angles.from_dict(instance, entity_data)
DXLevelChoice.from_dict(instance, entity_data)
instance.model = entity_data.get('model', None) # Type: studio
instance.skin = parse_source_value(entity_data.get('skin', 0)) # Type: integer
instance.solid = entity_data.get('solid', "CHOICES NOT SUPPORTED") # Type: choices
instance.disableshadows = entity_data.get('disableshadows', None) # Type: choices
instance.screenspacefade = entity_data.get('screenspacefade', None) # Type: choices
instance.fademindist = float(entity_data.get('fademindist', -1)) # Type: float
instance.fademaxdist = float(entity_data.get('fademaxdist', 0)) # Type: float
instance.fadescale = float(entity_data.get('fadescale', 1)) # Type: float
instance.lightingorigin = entity_data.get('lightingorigin', None) # Type: target_destination
instance.disablevertexlighting = entity_data.get('disablevertexlighting', None) # Type: choices
instance.disableselfshadowing = entity_data.get('disableselfshadowing', None) # Type: choices
instance.ignorenormals = entity_data.get('ignorenormals', None) # Type: choices
instance.generatelightmaps = entity_data.get('generatelightmaps', None) # Type: choices
instance.lightmapresolutionx = parse_source_value(entity_data.get('lightmapresolutionx', 32)) # Type: integer
instance.lightmapresolutiony = parse_source_value(entity_data.get('lightmapresolutiony', 32)) # Type: integer
class BaseFadeProp(Base):
def __init__(self):
super().__init__()
self.fademindist = -1 # Type: float
self.fademaxdist = None # Type: float
self.fadescale = 1 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.fademindist = float(entity_data.get('fademindist', -1)) # Type: float
instance.fademaxdist = float(entity_data.get('fademaxdist', 0)) # Type: float
instance.fadescale = float(entity_data.get('fadescale', 1)) # Type: float
class prop_dynamic_base(Global, Angles, Studiomodel, DXLevelChoice, Parentname, BreakableProp, BaseFadeProp, RenderFields):
def __init__(self):
super(BreakableProp).__init__()
super(RenderFields).__init__()
super(Global).__init__()
super(Angles).__init__()
super(Studiomodel).__init__()
super(DXLevelChoice).__init__()
super(Parentname).__init__()
super(BaseFadeProp).__init__()
self.solid = "CHOICES NOT SUPPORTED" # Type: choices
self.DefaultAnim = None # Type: string
self.RandomAnimation = None # Type: choices
self.MinAnimTime = 5 # Type: float
self.MaxAnimTime = 10 # Type: float
self.SetBodyGroup = None # Type: integer
self.DisableBoneFollowers = None # Type: choices
self.lightingorigin = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Global.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
Studiomodel.from_dict(instance, entity_data)
DXLevelChoice.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
BreakableProp.from_dict(instance, entity_data)
BaseFadeProp.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
instance.solid = entity_data.get('solid', "CHOICES NOT SUPPORTED") # Type: choices
instance.DefaultAnim = entity_data.get('defaultanim', None) # Type: string
instance.RandomAnimation = entity_data.get('randomanimation', None) # Type: choices
instance.MinAnimTime = float(entity_data.get('minanimtime', 5)) # Type: float
instance.MaxAnimTime = float(entity_data.get('maxanimtime', 10)) # Type: float
instance.SetBodyGroup = parse_source_value(entity_data.get('setbodygroup', 0)) # Type: integer
instance.DisableBoneFollowers = entity_data.get('disablebonefollowers', None) # Type: choices
instance.lightingorigin = entity_data.get('lightingorigin', None) # Type: target_destination
class prop_detail(prop_detail_base):
def __init__(self):
super(prop_detail_base).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
prop_detail_base.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class prop_static(prop_static_base):
def __init__(self):
super(prop_static_base).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
prop_static_base.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class prop_dynamic(EnableDisable, prop_dynamic_base):
def __init__(self):
super(prop_dynamic_base).__init__()
super(EnableDisable).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
EnableDisable.from_dict(instance, entity_data)
prop_dynamic_base.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class prop_dynamic_override(prop_dynamic_base):
def __init__(self):
super(prop_dynamic_base).__init__()
self.origin = [0, 0, 0]
self.health = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
prop_dynamic_base.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.health = parse_source_value(entity_data.get('health', 0)) # Type: integer
class BasePropPhysics(Global, Angles, Studiomodel, DXLevelChoice, BreakableProp, BaseFadeProp):
def __init__(self):
super(BreakableProp).__init__()
super(Global).__init__()
super(Angles).__init__()
super(Studiomodel).__init__()
super(DXLevelChoice).__init__()
super(BaseFadeProp).__init__()
super(Targetname).__init__()
self.minhealthdmg = None # Type: integer
self.shadowcastdist = None # Type: integer
self.physdamagescale = 0.1 # Type: float
self.Damagetype = None # Type: choices
self.nodamageforces = None # Type: choices
self.inertiaScale = 1.0 # Type: float
self.massScale = 0 # Type: float
self.overridescript = None # Type: string
self.damagetoenablemotion = None # Type: integer
self.forcetoenablemotion = None # Type: float
self.puntsound = None # Type: sound
@staticmethod
def from_dict(instance, entity_data: dict):
Global.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
Studiomodel.from_dict(instance, entity_data)
DXLevelChoice.from_dict(instance, entity_data)
BreakableProp.from_dict(instance, entity_data)
BaseFadeProp.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.minhealthdmg = parse_source_value(entity_data.get('minhealthdmg', 0)) # Type: integer
instance.shadowcastdist = parse_source_value(entity_data.get('shadowcastdist', 0)) # Type: integer
instance.physdamagescale = float(entity_data.get('physdamagescale', 0.1)) # Type: float
instance.Damagetype = entity_data.get('damagetype', None) # Type: choices
instance.nodamageforces = entity_data.get('nodamageforces', None) # Type: choices
instance.inertiaScale = float(entity_data.get('inertiascale', 1.0)) # Type: float
instance.massScale = float(entity_data.get('massscale', 0)) # Type: float
instance.overridescript = entity_data.get('overridescript', None) # Type: string
instance.damagetoenablemotion = parse_source_value(entity_data.get('damagetoenablemotion', 0)) # Type: integer
instance.forcetoenablemotion = float(entity_data.get('forcetoenablemotion', 0)) # Type: float
instance.puntsound = entity_data.get('puntsound', None) # Type: sound
class prop_physics_override(BasePropPhysics):
def __init__(self):
super(BasePropPhysics).__init__()
self.origin = [0, 0, 0]
self.health = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
BasePropPhysics.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.health = parse_source_value(entity_data.get('health', 0)) # Type: integer
class prop_physics(RenderFields, BasePropPhysics):
def __init__(self):
super(BasePropPhysics).__init__()
super(RenderFields).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
RenderFields.from_dict(instance, entity_data)
BasePropPhysics.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class prop_physics_multiplayer(prop_physics):
def __init__(self):
super(prop_physics).__init__()
self.origin = [0, 0, 0]
self.physicsmode = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
prop_physics.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.physicsmode = entity_data.get('physicsmode', None) # Type: choices
class prop_ragdoll(Angles, DXLevelChoice, Studiomodel, EnableDisable, BaseFadeProp, Targetname):
def __init__(self):
super(Angles).__init__()
super(DXLevelChoice).__init__()
super(Studiomodel).__init__()
super(EnableDisable).__init__()
super(BaseFadeProp).__init__()
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.angleOverride = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Angles.from_dict(instance, entity_data)
DXLevelChoice.from_dict(instance, entity_data)
Studiomodel.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
BaseFadeProp.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.angleOverride = entity_data.get('angleoverride', None) # Type: string
class prop_dynamic_ornament(prop_dynamic_base):
def __init__(self):
super(prop_dynamic_base).__init__()
self.origin = [0, 0, 0]
self.InitialOwner = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
prop_dynamic_base.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.InitialOwner = entity_data.get('initialowner', None) # Type: string
class func_areaportal(Targetname):
def __init__(self):
super(Targetname).__init__()
self.target = None # Type: target_destination
self.StartOpen = "CHOICES NOT SUPPORTED" # Type: choices
self.PortalVersion = 1 # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.target = entity_data.get('target', None) # Type: target_destination
instance.StartOpen = entity_data.get('startopen', "CHOICES NOT SUPPORTED") # Type: choices
instance.PortalVersion = parse_source_value(entity_data.get('portalversion', 1)) # Type: integer
class func_occluder(Targetname):
def __init__(self):
super(Targetname).__init__()
self.StartActive = "CHOICES NOT SUPPORTED" # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.StartActive = entity_data.get('startactive', "CHOICES NOT SUPPORTED") # Type: choices
class func_breakable(BreakableBrush, RenderFields, Origin):
def __init__(self):
super(BreakableBrush).__init__()
super(RenderFields).__init__()
super(Shadow).__init__()
super(Origin).__init__()
self.minhealthdmg = None # Type: integer
self._minlight = None # Type: string
self.physdamagescale = 1.0 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
BreakableBrush.from_dict(instance, entity_data)
Shadow.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
Origin.from_dict(instance, entity_data)
instance.minhealthdmg = parse_source_value(entity_data.get('minhealthdmg', 0)) # Type: integer
instance._minlight = entity_data.get('_minlight', None) # Type: string
instance.physdamagescale = float(entity_data.get('physdamagescale', 1.0)) # Type: float
class func_breakable_surf(BreakableBrush, RenderFields):
def __init__(self):
super(BreakableBrush).__init__()
super(RenderFields).__init__()
super(Shadow).__init__()
self.fragility = 100 # Type: integer
self.surfacetype = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
BreakableBrush.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
Shadow.from_dict(instance, entity_data)
instance.fragility = parse_source_value(entity_data.get('fragility', 100)) # Type: integer
instance.surfacetype = entity_data.get('surfacetype', None) # Type: choices
class func_conveyor(Targetname, Parentname, RenderFields, Shadow):
def __init__(self):
super(RenderFields).__init__()
super(Targetname).__init__()
super(Parentname).__init__()
super(Shadow).__init__()
self.movedir = [0.0, 0.0, 0.0] # Type: angle
self.speed = "100" # Type: string
self._minlight = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
Shadow.from_dict(instance, entity_data)
instance.movedir = parse_float_vector(entity_data.get('movedir', "0 0 0")) # Type: angle
instance.speed = entity_data.get('speed', "100") # Type: string
instance._minlight = entity_data.get('_minlight', None) # Type: string
class func_detail(DXLevelChoice):
def __init__(self):
super(DXLevelChoice).__init__()
@staticmethod
def from_dict(instance, entity_data: dict):
DXLevelChoice.from_dict(instance, entity_data)
class func_viscluster(Base):
def __init__(self):
super().__init__()
pass
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
class func_illusionary(Origin, Shadow, Parentname, RenderFields, Targetname):
def __init__(self):
super(RenderFields).__init__()
super(Origin).__init__()
super(Shadow).__init__()
super(Parentname).__init__()
super(Targetname).__init__()
self._minlight = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Origin.from_dict(instance, entity_data)
Shadow.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance._minlight = entity_data.get('_minlight', None) # Type: string
class func_precipitation(Targetname, Parentname):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
self.renderamt = 5 # Type: integer
self.rendercolor = [100, 100, 100] # Type: color255
self.preciptype = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
instance.renderamt = parse_source_value(entity_data.get('renderamt', 5)) # Type: integer
instance.rendercolor = parse_int_vector(entity_data.get('rendercolor', "100 100 100")) # Type: color255
instance.preciptype = entity_data.get('preciptype', None) # Type: choices
class func_wall_toggle(func_wall):
def __init__(self):
super(func_wall).__init__()
@staticmethod
def from_dict(instance, entity_data: dict):
func_wall.from_dict(instance, entity_data)
class func_guntarget(Targetname, Parentname, RenderFields, Global):
def __init__(self):
super(RenderFields).__init__()
super(Targetname).__init__()
super(Parentname).__init__()
super(Global).__init__()
self.speed = 100 # Type: integer
self.target = None # Type: target_destination
self.health = None # Type: integer
self._minlight = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
Global.from_dict(instance, entity_data)
instance.speed = parse_source_value(entity_data.get('speed', 100)) # Type: integer
instance.target = entity_data.get('target', None) # Type: target_destination
instance.health = parse_source_value(entity_data.get('health', 0)) # Type: integer
instance._minlight = entity_data.get('_minlight', None) # Type: string
class func_fish_pool(Base):
def __init__(self):
super().__init__()
self.origin = [0, 0, 0]
self.model = "models/Junkola.mdl" # Type: studio
self.fish_count = 10 # Type: integer
self.max_range = 150 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.model = entity_data.get('model', "models/Junkola.mdl") # Type: studio
instance.fish_count = parse_source_value(entity_data.get('fish_count', 10)) # Type: integer
instance.max_range = float(entity_data.get('max_range', 150)) # Type: float
class PlatSounds(Base):
def __init__(self):
super().__init__()
self.movesnd = None # Type: choices
self.stopsnd = None # Type: choices
self.volume = "0.85" # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.movesnd = entity_data.get('movesnd', None) # Type: choices
instance.stopsnd = entity_data.get('stopsnd', None) # Type: choices
instance.volume = entity_data.get('volume', "0.85") # Type: string
class Trackchange(Global, PlatSounds, Parentname, RenderFields, Targetname):
def __init__(self):
super(RenderFields).__init__()
super(Global).__init__()
super(PlatSounds).__init__()
super(Parentname).__init__()
super(Targetname).__init__()
self.height = None # Type: integer
self.rotation = None # Type: integer
self.train = None # Type: target_destination
self.toptrack = None # Type: target_destination
self.bottomtrack = None # Type: target_destination
self.speed = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Global.from_dict(instance, entity_data)
PlatSounds.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.height = parse_source_value(entity_data.get('height', 0)) # Type: integer
instance.rotation = parse_source_value(entity_data.get('rotation', 0)) # Type: integer
instance.train = entity_data.get('train', None) # Type: target_destination
instance.toptrack = entity_data.get('toptrack', None) # Type: target_destination
instance.bottomtrack = entity_data.get('bottomtrack', None) # Type: target_destination
instance.speed = parse_source_value(entity_data.get('speed', 0)) # Type: integer
class BaseTrain(Global, Origin, Shadow, Parentname, RenderFields, Targetname):
def __init__(self):
super(RenderFields).__init__()
super(Global).__init__()
super(Origin).__init__()
super(Shadow).__init__()
super(Parentname).__init__()
super(Targetname).__init__()
self.target = None # Type: target_destination
self.startspeed = 100 # Type: integer
self.speed = None # Type: integer
self.velocitytype = None # Type: choices
self.orientationtype = "CHOICES NOT SUPPORTED" # Type: choices
self.wheels = 50 # Type: integer
self.height = 4 # Type: integer
self.bank = "0" # Type: string
self.dmg = None # Type: integer
self._minlight = None # Type: string
self.MoveSound = None # Type: sound
self.MovePingSound = None # Type: sound
self.StartSound = None # Type: sound
self.StopSound = None # Type: sound
self.volume = 10 # Type: integer
self.MoveSoundMinPitch = 60 # Type: integer
self.MoveSoundMaxPitch = 200 # Type: integer
self.MoveSoundMinTime = None # Type: float
self.MoveSoundMaxTime = None # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Global.from_dict(instance, entity_data)
Origin.from_dict(instance, entity_data)
Shadow.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
RenderFields.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.target = entity_data.get('target', None) # Type: target_destination
instance.startspeed = parse_source_value(entity_data.get('startspeed', 100)) # Type: integer
instance.speed = parse_source_value(entity_data.get('speed', 0)) # Type: integer
instance.velocitytype = entity_data.get('velocitytype', None) # Type: choices
instance.orientationtype = entity_data.get('orientationtype', "CHOICES NOT SUPPORTED") # Type: choices
instance.wheels = parse_source_value(entity_data.get('wheels', 50)) # Type: integer
instance.height = parse_source_value(entity_data.get('height', 4)) # Type: integer
instance.bank = entity_data.get('bank', "0") # Type: string
instance.dmg = parse_source_value(entity_data.get('dmg', 0)) # Type: integer
instance._minlight = entity_data.get('_minlight', None) # Type: string
instance.MoveSound = entity_data.get('movesound', None) # Type: sound
instance.MovePingSound = entity_data.get('movepingsound', None) # Type: sound
instance.StartSound = entity_data.get('startsound', None) # Type: sound
instance.StopSound = entity_data.get('stopsound', None) # Type: sound
instance.volume = parse_source_value(entity_data.get('volume', 10)) # Type: integer
instance.MoveSoundMinPitch = parse_source_value(entity_data.get('movesoundminpitch', 60)) # Type: integer
instance.MoveSoundMaxPitch = parse_source_value(entity_data.get('movesoundmaxpitch', 200)) # Type: integer
instance.MoveSoundMinTime = float(entity_data.get('movesoundmintime', 0)) # Type: float
instance.MoveSoundMaxTime = float(entity_data.get('movesoundmaxtime', 0)) # Type: float
class func_trackautochange(Trackchange):
def __init__(self):
super(Trackchange).__init__()
self._minlight = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Trackchange.from_dict(instance, entity_data)
instance._minlight = entity_data.get('_minlight', None) # Type: string
class func_trackchange(Trackchange):
def __init__(self):
super(Trackchange).__init__()
self._minlight = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Trackchange.from_dict(instance, entity_data)
instance._minlight = entity_data.get('_minlight', None) # Type: string
class func_tracktrain(BaseTrain):
def __init__(self):
super(BaseTrain).__init__()
self.ManualSpeedChanges = None # Type: choices
self.ManualAccelSpeed = None # Type: float
self.ManualDecelSpeed = None # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
BaseTrain.from_dict(instance, entity_data)
instance.ManualSpeedChanges = entity_data.get('manualspeedchanges', None) # Type: choices
instance.ManualAccelSpeed = float(entity_data.get('manualaccelspeed', 0)) # Type: float
instance.ManualDecelSpeed = float(entity_data.get('manualdecelspeed', 0)) # Type: float
class func_tanktrain(BaseTrain):
def __init__(self):
super(BaseTrain).__init__()
self.health = 100 # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
BaseTrain.from_dict(instance, entity_data)
instance.health = parse_source_value(entity_data.get('health', 100)) # Type: integer
class func_traincontrols(Parentname, Global):
def __init__(self):
super(Parentname).__init__()
super(Global).__init__()
self.target = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Parentname.from_dict(instance, entity_data)
Global.from_dict(instance, entity_data)
instance.target = entity_data.get('target', None) # Type: target_destination
class tanktrain_aitarget(Targetname):
icon_sprite = "editor/tanktrain_aitarget.vmt"
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
self.newtarget = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
instance.newtarget = entity_data.get('newtarget', None) # Type: target_destination
class tanktrain_ai(Targetname):
icon_sprite = "editor/tanktrain_ai.vmt"
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
self.startsound = "vehicles/diesel_start1.wav" # Type: sound
self.enginesound = "vehicles/diesel_turbo_loop1.wav" # Type: sound
self.movementsound = "vehicles/tank_treads_loop1.wav" # Type: sound
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
instance.startsound = entity_data.get('startsound', "vehicles/diesel_start1.wav") # Type: sound
instance.enginesound = entity_data.get('enginesound', "vehicles/diesel_turbo_loop1.wav") # Type: sound
instance.movementsound = entity_data.get('movementsound', "vehicles/tank_treads_loop1.wav") # Type: sound
class path_track(Targetname, Parentname, Angles):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
self.altpath = None # Type: target_destination
self.speed = None # Type: float
self.radius = None # Type: float
self.orientationtype = "CHOICES NOT SUPPORTED" # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
instance.altpath = entity_data.get('altpath', None) # Type: target_destination
instance.speed = float(entity_data.get('speed', 0)) # Type: float
instance.radius = float(entity_data.get('radius', 0)) # Type: float
instance.orientationtype = entity_data.get('orientationtype', "CHOICES NOT SUPPORTED") # Type: choices
class test_traceline(Angles):
def __init__(self):
super(Angles).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class trigger_autosave(Targetname):
def __init__(self):
super(Targetname).__init__()
self.master = None # Type: string
self.NewLevelUnit = None # Type: choices
self.DangerousTimer = None # Type: float
self.MinimumHitPoints = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.master = entity_data.get('master', None) # Type: string
instance.NewLevelUnit = entity_data.get('newlevelunit', None) # Type: choices
instance.DangerousTimer = float(entity_data.get('dangeroustimer', 0)) # Type: float
instance.MinimumHitPoints = parse_source_value(entity_data.get('minimumhitpoints', 0)) # Type: integer
class trigger_changelevel(EnableDisable):
def __init__(self):
super(EnableDisable).__init__()
self.targetname = None # Type: target_source
self.map = None # Type: string
self.landmark = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
EnableDisable.from_dict(instance, entity_data)
instance.targetname = entity_data.get('targetname', None) # Type: target_source
instance.map = entity_data.get('map', None) # Type: string
instance.landmark = entity_data.get('landmark', None) # Type: target_destination
class trigger_gravity(Trigger):
def __init__(self):
super(Trigger).__init__()
self.gravity = 1 # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Trigger.from_dict(instance, entity_data)
instance.gravity = parse_source_value(entity_data.get('gravity', 1)) # Type: integer
class trigger_playermovement(Trigger):
def __init__(self):
super(Trigger).__init__()
@staticmethod
def from_dict(instance, entity_data: dict):
Trigger.from_dict(instance, entity_data)
class trigger_soundscape(Trigger):
def __init__(self):
super(Trigger).__init__()
self.soundscape = None # Type: target_source
@staticmethod
def from_dict(instance, entity_data: dict):
Trigger.from_dict(instance, entity_data)
instance.soundscape = entity_data.get('soundscape', None) # Type: target_source
class trigger_hurt(Trigger):
def __init__(self):
super(Trigger).__init__()
super(Targetname).__init__()
self.master = None # Type: string
self.damage = 10 # Type: integer
self.damagecap = 20 # Type: integer
self.damagetype = None # Type: choices
self.damagemodel = None # Type: choices
self.nodmgforce = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Trigger.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.master = entity_data.get('master', None) # Type: string
instance.damage = parse_source_value(entity_data.get('damage', 10)) # Type: integer
instance.damagecap = parse_source_value(entity_data.get('damagecap', 20)) # Type: integer
instance.damagetype = entity_data.get('damagetype', None) # Type: choices
instance.damagemodel = entity_data.get('damagemodel', None) # Type: choices
instance.nodmgforce = entity_data.get('nodmgforce', None) # Type: choices
class trigger_remove(Trigger):
def __init__(self):
super(Trigger).__init__()
super(Targetname).__init__()
@staticmethod
def from_dict(instance, entity_data: dict):
Trigger.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
class trigger_multiple(Trigger):
def __init__(self):
super(Trigger).__init__()
self.wait = 1 # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Trigger.from_dict(instance, entity_data)
instance.wait = parse_source_value(entity_data.get('wait', 1)) # Type: integer
class trigger_once(TriggerOnce):
def __init__(self):
super(TriggerOnce).__init__()
@staticmethod
def from_dict(instance, entity_data: dict):
TriggerOnce.from_dict(instance, entity_data)
class trigger_look(Trigger):
def __init__(self):
super(Trigger).__init__()
self.target = None # Type: target_destination
self.LookTime = "0.5" # Type: string
self.FieldOfView = "0.9" # Type: string
self.Timeout = 0 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Trigger.from_dict(instance, entity_data)
instance.target = entity_data.get('target', None) # Type: target_destination
instance.LookTime = entity_data.get('looktime', "0.5") # Type: string
instance.FieldOfView = entity_data.get('fieldofview', "0.9") # Type: string
instance.Timeout = float(entity_data.get('timeout', 0)) # Type: float
class trigger_push(Trigger):
def __init__(self):
super(Trigger).__init__()
self.pushdir = [0.0, 0.0, 0.0] # Type: angle
self.speed = 40 # Type: integer
self.alternateticksfix = 0 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Trigger.from_dict(instance, entity_data)
instance.pushdir = parse_float_vector(entity_data.get('pushdir', "0 0 0")) # Type: angle
instance.speed = parse_source_value(entity_data.get('speed', 40)) # Type: integer
instance.alternateticksfix = float(entity_data.get('alternateticksfix', 0)) # Type: float
class trigger_wind(Trigger, Angles):
def __init__(self):
super(Trigger).__init__()
super(Angles).__init__()
self.Speed = 200 # Type: integer
self.SpeedNoise = None # Type: integer
self.DirectionNoise = 10 # Type: integer
self.HoldTime = None # Type: integer
self.HoldNoise = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Trigger.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.Speed = parse_source_value(entity_data.get('speed', 200)) # Type: integer
instance.SpeedNoise = parse_source_value(entity_data.get('speednoise', 0)) # Type: integer
instance.DirectionNoise = parse_source_value(entity_data.get('directionnoise', 10)) # Type: integer
instance.HoldTime = parse_source_value(entity_data.get('holdtime', 0)) # Type: integer
instance.HoldNoise = parse_source_value(entity_data.get('holdnoise', 0)) # Type: integer
class trigger_impact(Targetname, Origin, Angles):
def __init__(self):
super(Targetname).__init__()
super(Origin).__init__()
super(Angles).__init__()
self.Magnitude = 200 # Type: float
self.noise = 0.1 # Type: float
self.viewkick = 0.05 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Origin.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.Magnitude = float(entity_data.get('magnitude', 200)) # Type: float
instance.noise = float(entity_data.get('noise', 0.1)) # Type: float
instance.viewkick = float(entity_data.get('viewkick', 0.05)) # Type: float
class trigger_proximity(Trigger):
def __init__(self):
super(Trigger).__init__()
self.measuretarget = None # Type: target_destination
self.radius = "256" # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Trigger.from_dict(instance, entity_data)
instance.measuretarget = entity_data.get('measuretarget', None) # Type: target_destination
instance.radius = entity_data.get('radius', "256") # Type: string
class trigger_teleport(Trigger):
def __init__(self):
super(Trigger).__init__()
self.target = None # Type: target_destination
self.landmark = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Trigger.from_dict(instance, entity_data)
instance.target = entity_data.get('target', None) # Type: target_destination
instance.landmark = entity_data.get('landmark', None) # Type: target_destination
class trigger_teleport_relative(Trigger):
def __init__(self):
super(Trigger).__init__()
self.teleportoffset = [0.0, 0.0, 0.0] # Type: vector
@staticmethod
def from_dict(instance, entity_data: dict):
Trigger.from_dict(instance, entity_data)
instance.teleportoffset = parse_float_vector(entity_data.get('teleportoffset', "0 0 0")) # Type: vector
class trigger_transition(Targetname):
def __init__(self):
super(Targetname).__init__()
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
class trigger_serverragdoll(Targetname):
def __init__(self):
super(Targetname).__init__()
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
class ai_speechfilter(Targetname, EnableDisable, ResponseContext):
def __init__(self):
super(Targetname).__init__()
super(EnableDisable).__init__()
super(ResponseContext).__init__()
self.origin = [0, 0, 0]
self.subject = None # Type: target_destination
self.IdleModifier = 1.0 # Type: float
self.NeverSayHello = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
ResponseContext.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.subject = entity_data.get('subject', None) # Type: target_destination
instance.IdleModifier = float(entity_data.get('idlemodifier', 1.0)) # Type: float
instance.NeverSayHello = entity_data.get('neversayhello', None) # Type: choices
class water_lod_control(Targetname):
icon_sprite = "editor/waterlodcontrol.vmt"
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.cheapwaterstartdistance = 1000 # Type: float
self.cheapwaterenddistance = 2000 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.cheapwaterstartdistance = float(entity_data.get('cheapwaterstartdistance', 1000)) # Type: float
instance.cheapwaterenddistance = float(entity_data.get('cheapwaterenddistance', 2000)) # Type: float
class info_camera_link(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
self.PointCamera = None # Type: target_destination
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
instance.PointCamera = entity_data.get('pointcamera', None) # Type: target_destination
class logic_measure_movement(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.MeasureTarget = None # Type: target_destination
self.MeasureReference = None # Type: target_destination
self.Target = None # Type: target_destination
self.TargetReference = None # Type: target_destination
self.TargetScale = 1 # Type: float
self.MeasureType = None # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.MeasureTarget = entity_data.get('measuretarget', None) # Type: target_destination
instance.MeasureReference = entity_data.get('measurereference', None) # Type: target_destination
instance.Target = entity_data.get('target', None) # Type: target_destination
instance.TargetReference = entity_data.get('targetreference', None) # Type: target_destination
instance.TargetScale = float(entity_data.get('targetscale', 1)) # Type: float
instance.MeasureType = entity_data.get('measuretype', None) # Type: choices
class npc_furniture(Parentname, BaseNPC):
def __init__(self):
super(BaseNPC).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.model = None # Type: studio
@staticmethod
def from_dict(instance, entity_data: dict):
Parentname.from_dict(instance, entity_data)
BaseNPC.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.model = entity_data.get('model', None) # Type: studio
class env_credits(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class material_modify_control(Parentname, Targetname):
def __init__(self):
super(Parentname).__init__()
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.materialName = None # Type: string
self.materialVar = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Parentname.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.materialName = entity_data.get('materialname', None) # Type: string
instance.materialVar = entity_data.get('materialvar', None) # Type: string
class point_devshot_camera(Angles):
viewport_model = "models/editor/camera.mdl"
def __init__(self):
super(Angles).__init__()
self.origin = [0, 0, 0]
self.cameraname = None # Type: string
self.FOV = 75 # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.cameraname = entity_data.get('cameraname', None) # Type: string
instance.FOV = parse_source_value(entity_data.get('fov', 75)) # Type: integer
class logic_playerproxy(Targetname, DamageFilter):
def __init__(self):
super(Targetname).__init__()
super(DamageFilter).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
DamageFilter.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class env_spritetrail(Parentname, Targetname):
def __init__(self):
super(Parentname).__init__()
super(Targetname).__init__()
self.origin = [0, 0, 0]
self.lifetime = 0.5 # Type: float
self.startwidth = 8.0 # Type: float
self.endwidth = 1.0 # Type: float
self.spritename = "sprites/bluelaser1.vmt" # Type: string
self.renderamt = 255 # Type: integer
self.rendercolor = [255, 255, 255] # Type: color255
self.rendermode = "CHOICES NOT SUPPORTED" # Type: choices
@staticmethod
def from_dict(instance, entity_data: dict):
Parentname.from_dict(instance, entity_data)
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.lifetime = float(entity_data.get('lifetime', 0.5)) # Type: float
instance.startwidth = float(entity_data.get('startwidth', 8.0)) # Type: float
instance.endwidth = float(entity_data.get('endwidth', 1.0)) # Type: float
instance.spritename = entity_data.get('spritename', "sprites/bluelaser1.vmt") # Type: string
instance.renderamt = parse_source_value(entity_data.get('renderamt', 255)) # Type: integer
instance.rendercolor = parse_int_vector(entity_data.get('rendercolor', "255 255 255")) # Type: color255
instance.rendermode = entity_data.get('rendermode', "CHOICES NOT SUPPORTED") # Type: choices
class env_projectedtexture(Targetname, Parentname, Angles):
def __init__(self):
super(Targetname).__init__()
super(Parentname).__init__()
super(Angles).__init__()
self.origin = [0, 0, 0]
self.target = None # Type: target_destination
self.lightfov = 90.0 # Type: float
self.nearz = 4.0 # Type: float
self.farz = 750.0 # Type: float
self.enableshadows = None # Type: choices
self.shadowquality = "CHOICES NOT SUPPORTED" # Type: choices
self.lightonlytarget = None # Type: choices
self.lightworld = "CHOICES NOT SUPPORTED" # Type: choices
self.lightcolor = [255, 255, 255, 200] # Type: color255
self.cameraspace = None # Type: integer
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.target = entity_data.get('target', None) # Type: target_destination
instance.lightfov = float(entity_data.get('lightfov', 90.0)) # Type: float
instance.nearz = float(entity_data.get('nearz', 4.0)) # Type: float
instance.farz = float(entity_data.get('farz', 750.0)) # Type: float
instance.enableshadows = entity_data.get('enableshadows', None) # Type: choices
instance.shadowquality = entity_data.get('shadowquality', "CHOICES NOT SUPPORTED") # Type: choices
instance.lightonlytarget = entity_data.get('lightonlytarget', None) # Type: choices
instance.lightworld = entity_data.get('lightworld', "CHOICES NOT SUPPORTED") # Type: choices
instance.lightcolor = parse_int_vector(entity_data.get('lightcolor', "255 255 255 200")) # Type: color255
instance.cameraspace = parse_source_value(entity_data.get('cameraspace', 0)) # Type: integer
class func_reflective_glass(func_brush):
def __init__(self):
super(func_brush).__init__()
@staticmethod
def from_dict(instance, entity_data: dict):
func_brush.from_dict(instance, entity_data)
class env_particle_performance_monitor(Targetname):
def __init__(self):
super(Targetname).__init__()
self.origin = [0, 0, 0]
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
class npc_puppet(Studiomodel, Parentname, BaseNPC):
def __init__(self):
super(BaseNPC).__init__()
super(Targetname).__init__()
super(Studiomodel).__init__()
super(Parentname).__init__()
self.origin = [0, 0, 0]
self.animationtarget = None # Type: target_source
self.attachmentname = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
Studiomodel.from_dict(instance, entity_data)
Parentname.from_dict(instance, entity_data)
BaseNPC.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.animationtarget = entity_data.get('animationtarget', None) # Type: target_source
instance.attachmentname = entity_data.get('attachmentname', None) # Type: string
class point_gamestats_counter(Targetname, EnableDisable, Origin):
def __init__(self):
super(Targetname).__init__()
super(EnableDisable).__init__()
super(Origin).__init__()
self.origin = [0, 0, 0]
self.Name = None # Type: string
@staticmethod
def from_dict(instance, entity_data: dict):
Targetname.from_dict(instance, entity_data)
EnableDisable.from_dict(instance, entity_data)
Origin.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.Name = entity_data.get('name', None) # Type: string
class func_instance(Angles):
def __init__(self):
super(Angles).__init__()
self.origin = [0, 0, 0]
self.targetname = None # Type: target_source
self.file = None # Type: instance_file
self.fixup_style = None # Type: choices
self.replace01 = None # Type: instance_variable
self.replace02 = None # Type: instance_variable
self.replace03 = None # Type: instance_variable
self.replace04 = None # Type: instance_variable
self.replace05 = None # Type: instance_variable
self.replace06 = None # Type: instance_variable
self.replace07 = None # Type: instance_variable
self.replace08 = None # Type: instance_variable
self.replace09 = None # Type: instance_variable
self.replace10 = None # Type: instance_variable
@staticmethod
def from_dict(instance, entity_data: dict):
Angles.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.targetname = entity_data.get('targetname', None) # Type: target_source
instance.file = entity_data.get('file', None) # Type: instance_file
instance.fixup_style = entity_data.get('fixup_style', None) # Type: choices
instance.replace01 = entity_data.get('replace01', None) # Type: instance_variable
instance.replace02 = entity_data.get('replace02', None) # Type: instance_variable
instance.replace03 = entity_data.get('replace03', None) # Type: instance_variable
instance.replace04 = entity_data.get('replace04', None) # Type: instance_variable
instance.replace05 = entity_data.get('replace05', None) # Type: instance_variable
instance.replace06 = entity_data.get('replace06', None) # Type: instance_variable
instance.replace07 = entity_data.get('replace07', None) # Type: instance_variable
instance.replace08 = entity_data.get('replace08', None) # Type: instance_variable
instance.replace09 = entity_data.get('replace09', None) # Type: instance_variable
instance.replace10 = entity_data.get('replace10', None) # Type: instance_variable
class func_instance_parms(Base):
def __init__(self):
super().__init__()
self.origin = [0, 0, 0]
self.parm1 = None # Type: instance_parm
self.parm2 = None # Type: instance_parm
self.parm3 = None # Type: instance_parm
self.parm4 = None # Type: instance_parm
self.parm5 = None # Type: instance_parm
self.parm6 = None # Type: instance_parm
self.parm7 = None # Type: instance_parm
self.parm8 = None # Type: instance_parm
self.parm9 = None # Type: instance_parm
self.parm10 = None # Type: instance_parm
@staticmethod
def from_dict(instance, entity_data: dict):
Base.from_dict(instance, entity_data)
instance.origin = parse_float_vector(entity_data.get('origin', "0 0 0"))
instance.parm1 = entity_data.get('parm1', None) # Type: instance_parm
instance.parm2 = entity_data.get('parm2', None) # Type: instance_parm
instance.parm3 = entity_data.get('parm3', None) # Type: instance_parm
instance.parm4 = entity_data.get('parm4', None) # Type: instance_parm
instance.parm5 = entity_data.get('parm5', None) # Type: instance_parm
instance.parm6 = entity_data.get('parm6', None) # Type: instance_parm
instance.parm7 = entity_data.get('parm7', None) # Type: instance_parm
instance.parm8 = entity_data.get('parm8', None) # Type: instance_parm
instance.parm9 = entity_data.get('parm9', None) # Type: instance_parm
instance.parm10 = entity_data.get('parm10', None) # Type: instance_parm
class trigger_apply_impulse(Trigger):
def __init__(self):
super(Trigger).__init__()
self.impulse_dir = [0.0, 0.0, 0.0] # Type: angle
self.force = 300 # Type: float
@staticmethod
def from_dict(instance, entity_data: dict):
Trigger.from_dict(instance, entity_data)
instance.impulse_dir = parse_float_vector(entity_data.get('impulse_dir', "0 0 0")) # Type: angle
instance.force = float(entity_data.get('force', 300)) # Type: float
entity_class_handle = {
'Angles': Angles,
'Origin': Origin,
'Studiomodel': Studiomodel,
'BasePlat': BasePlat,
'Targetname': Targetname,
'Parentname': Parentname,
'BaseBrush': BaseBrush,
'EnableDisable': EnableDisable,
'RenderFxChoices': RenderFxChoices,
'Shadow': Shadow,
'RenderFields': RenderFields,
'DXLevelChoice': DXLevelChoice,
'Inputfilter': Inputfilter,
'Global': Global,
'EnvGlobal': EnvGlobal,
'DamageFilter': DamageFilter,
'ResponseContext': ResponseContext,
'Breakable': Breakable,
'BreakableBrush': BreakableBrush,
'BreakableProp': BreakableProp,
'BaseNPC': BaseNPC,
'info_npc_spawn_destination': info_npc_spawn_destination,
'BaseNPCMaker': BaseNPCMaker,
'npc_template_maker': npc_template_maker,
'BaseHelicopter': BaseHelicopter,
'PlayerClass': PlayerClass,
'Light': Light,
'Node': Node,
'HintNode': HintNode,
'TriggerOnce': TriggerOnce,
'Trigger': Trigger,
'worldbase': worldbase,
'worldspawn': worldspawn,
'ambient_generic': ambient_generic,
'func_lod': func_lod,
'env_zoom': env_zoom,
'env_screenoverlay': env_screenoverlay,
'env_screeneffect': env_screeneffect,
'env_texturetoggle': env_texturetoggle,
'env_splash': env_splash,
'env_particlelight': env_particlelight,
'env_sun': env_sun,
'game_ragdoll_manager': game_ragdoll_manager,
'game_gib_manager': game_gib_manager,
'env_lightglow': env_lightglow,
'env_smokestack': env_smokestack,
'env_fade': env_fade,
'env_player_surface_trigger': env_player_surface_trigger,
'env_tonemap_controller': env_tonemap_controller,
'func_useableladder': func_useableladder,
'func_ladderendpoint': func_ladderendpoint,
'info_ladder_dismount': info_ladder_dismount,
'func_areaportalwindow': func_areaportalwindow,
'func_wall': func_wall,
'func_clip_vphysics': func_clip_vphysics,
'func_brush': func_brush,
'vgui_screen_base': vgui_screen_base,
'vgui_screen': vgui_screen,
'vgui_slideshow_display': vgui_slideshow_display,
'cycler': cycler,
'gibshooterbase': gibshooterbase,
'env_beam': env_beam,
'env_beverage': env_beverage,
'env_embers': env_embers,
'env_funnel': env_funnel,
'env_blood': env_blood,
'env_bubbles': env_bubbles,
'env_explosion': env_explosion,
'env_smoketrail': env_smoketrail,
'env_physexplosion': env_physexplosion,
'env_physimpact': env_physimpact,
'env_fire': env_fire,
'env_firesource': env_firesource,
'env_firesensor': env_firesensor,
'env_entity_igniter': env_entity_igniter,
'env_fog_controller': env_fog_controller,
'env_steam': env_steam,
'env_laser': env_laser,
'env_message': env_message,
'env_hudhint': env_hudhint,
'env_shake': env_shake,
'env_viewpunch': env_viewpunch,
'env_rotorwash_emitter': env_rotorwash_emitter,
'gibshooter': gibshooter,
'env_shooter': env_shooter,
'env_rotorshooter': env_rotorshooter,
'env_soundscape_proxy': env_soundscape_proxy,
'env_soundscape': env_soundscape,
'env_soundscape_triggerable': env_soundscape_triggerable,
'env_spark': env_spark,
'env_sprite': env_sprite,
'env_sprite_oriented': env_sprite_oriented,
'env_wind': env_wind,
'sky_camera': sky_camera,
'BaseSpeaker': BaseSpeaker,
'game_weapon_manager': game_weapon_manager,
'game_end': game_end,
'game_player_equip': game_player_equip,
'game_player_team': game_player_team,
'game_score': game_score,
'game_text': game_text,
'point_enable_motion_fixup': point_enable_motion_fixup,
'point_message': point_message,
'point_spotlight': point_spotlight,
'point_tesla': point_tesla,
'point_clientcommand': point_clientcommand,
'point_servercommand': point_servercommand,
'point_bonusmaps_accessor': point_bonusmaps_accessor,
'game_ui': game_ui,
'game_zone_player': game_zone_player,
'infodecal': infodecal,
'info_projecteddecal': info_projecteddecal,
'info_no_dynamic_shadow': info_no_dynamic_shadow,
'info_player_start': info_player_start,
'info_overlay': info_overlay,
'info_overlay_transition': info_overlay_transition,
'info_intermission': info_intermission,
'info_landmark': info_landmark,
'info_null': info_null,
'info_target': info_target,
'info_particle_system': info_particle_system,
'phys_ragdollmagnet': phys_ragdollmagnet,
'info_lighting': info_lighting,
'info_teleport_destination': info_teleport_destination,
'info_node': info_node,
'info_node_hint': info_node_hint,
'info_node_air': info_node_air,
'info_node_air_hint': info_node_air_hint,
'info_hint': info_hint,
'info_node_link': info_node_link,
'info_node_link_controller': info_node_link_controller,
'info_radial_link_controller': info_radial_link_controller,
'info_node_climb': info_node_climb,
'light': light,
'light_environment': light_environment,
'light_spot': light_spot,
'light_dynamic': light_dynamic,
'shadow_control': shadow_control,
'color_correction': color_correction,
'color_correction_volume': color_correction_volume,
'KeyFrame': KeyFrame,
'Mover': Mover,
'func_movelinear': func_movelinear,
'func_water_analog': func_water_analog,
'func_rotating': func_rotating,
'func_platrot': func_platrot,
'keyframe_track': keyframe_track,
'move_keyframed': move_keyframed,
'move_track': move_track,
'RopeKeyFrame': RopeKeyFrame,
'keyframe_rope': keyframe_rope,
'move_rope': move_rope,
'Button': Button,
'func_button': func_button,
'func_rot_button': func_rot_button,
'momentary_rot_button': momentary_rot_button,
'Door': Door,
'func_door': func_door,
'func_door_rotating': func_door_rotating,
'prop_door_rotating': prop_door_rotating,
'env_cubemap': env_cubemap,
'BModelParticleSpawner': BModelParticleSpawner,
'func_dustmotes': func_dustmotes,
'func_smokevolume': func_smokevolume,
'func_dustcloud': func_dustcloud,
'env_dustpuff': env_dustpuff,
'env_particlescript': env_particlescript,
'env_effectscript': env_effectscript,
'logic_auto': logic_auto,
'point_viewcontrol': point_viewcontrol,
'point_posecontroller': point_posecontroller,
'logic_compare': logic_compare,
'logic_branch': logic_branch,
'logic_branch_listener': logic_branch_listener,
'logic_case': logic_case,
'logic_multicompare': logic_multicompare,
'logic_relay': logic_relay,
'logic_timer': logic_timer,
'hammer_updateignorelist': hammer_updateignorelist,
'logic_collision_pair': logic_collision_pair,
'env_microphone': env_microphone,
'math_remap': math_remap,
'math_colorblend': math_colorblend,
'math_counter': math_counter,
'logic_lineto': logic_lineto,
'logic_navigation': logic_navigation,
'logic_autosave': logic_autosave,
'logic_active_autosave': logic_active_autosave,
'point_template': point_template,
'env_entity_maker': env_entity_maker,
'BaseFilter': BaseFilter,
'filter_multi': filter_multi,
'filter_activator_name': filter_activator_name,
'filter_activator_class': filter_activator_class,
'filter_activator_mass_greater': filter_activator_mass_greater,
'filter_damage_type': filter_damage_type,
'filter_enemy': filter_enemy,
'point_anglesensor': point_anglesensor,
'point_angularvelocitysensor': point_angularvelocitysensor,
'point_velocitysensor': point_velocitysensor,
'point_proximity_sensor': point_proximity_sensor,
'point_teleport': point_teleport,
'point_hurt': point_hurt,
'point_playermoveconstraint': point_playermoveconstraint,
'func_physbox': func_physbox,
'TwoObjectPhysics': TwoObjectPhysics,
'phys_constraintsystem': phys_constraintsystem,
'phys_keepupright': phys_keepupright,
'physics_cannister': physics_cannister,
'info_constraint_anchor': info_constraint_anchor,
'info_mass_center': info_mass_center,
'phys_spring': phys_spring,
'phys_hinge': phys_hinge,
'phys_ballsocket': phys_ballsocket,
'phys_constraint': phys_constraint,
'phys_pulleyconstraint': phys_pulleyconstraint,
'phys_slideconstraint': phys_slideconstraint,
'phys_lengthconstraint': phys_lengthconstraint,
'phys_ragdollconstraint': phys_ragdollconstraint,
'phys_convert': phys_convert,
'ForceController': ForceController,
'phys_thruster': phys_thruster,
'phys_torque': phys_torque,
'phys_motor': phys_motor,
'phys_magnet': phys_magnet,
'prop_detail_base': prop_detail_base,
'prop_static_base': prop_static_base,
'BaseFadeProp': BaseFadeProp,
'prop_dynamic_base': prop_dynamic_base,
'prop_detail': prop_detail,
'prop_static': prop_static,
'prop_dynamic': prop_dynamic,
'prop_dynamic_override': prop_dynamic_override,
'BasePropPhysics': BasePropPhysics,
'prop_physics_override': prop_physics_override,
'prop_physics': prop_physics,
'prop_physics_multiplayer': prop_physics_multiplayer,
'prop_ragdoll': prop_ragdoll,
'prop_dynamic_ornament': prop_dynamic_ornament,
'func_areaportal': func_areaportal,
'func_occluder': func_occluder,
'func_breakable': func_breakable,
'func_breakable_surf': func_breakable_surf,
'func_conveyor': func_conveyor,
'func_detail': func_detail,
'func_viscluster': func_viscluster,
'func_illusionary': func_illusionary,
'func_precipitation': func_precipitation,
'func_wall_toggle': func_wall_toggle,
'func_guntarget': func_guntarget,
'func_fish_pool': func_fish_pool,
'PlatSounds': PlatSounds,
'Trackchange': Trackchange,
'BaseTrain': BaseTrain,
'func_trackautochange': func_trackautochange,
'func_trackchange': func_trackchange,
'func_tracktrain': func_tracktrain,
'func_tanktrain': func_tanktrain,
'func_traincontrols': func_traincontrols,
'tanktrain_aitarget': tanktrain_aitarget,
'tanktrain_ai': tanktrain_ai,
'path_track': path_track,
'test_traceline': test_traceline,
'trigger_autosave': trigger_autosave,
'trigger_changelevel': trigger_changelevel,
'trigger_gravity': trigger_gravity,
'trigger_playermovement': trigger_playermovement,
'trigger_soundscape': trigger_soundscape,
'trigger_hurt': trigger_hurt,
'trigger_remove': trigger_remove,
'trigger_multiple': trigger_multiple,
'trigger_once': trigger_once,
'trigger_look': trigger_look,
'trigger_push': trigger_push,
'trigger_wind': trigger_wind,
'trigger_impact': trigger_impact,
'trigger_proximity': trigger_proximity,
'trigger_teleport': trigger_teleport,
'trigger_teleport_relative': trigger_teleport_relative,
'trigger_transition': trigger_transition,
'trigger_serverragdoll': trigger_serverragdoll,
'ai_speechfilter': ai_speechfilter,
'water_lod_control': water_lod_control,
'info_camera_link': info_camera_link,
'logic_measure_movement': logic_measure_movement,
'npc_furniture': npc_furniture,
'env_credits': env_credits,
'material_modify_control': material_modify_control,
'point_devshot_camera': point_devshot_camera,
'logic_playerproxy': logic_playerproxy,
'env_spritetrail': env_spritetrail,
'env_projectedtexture': env_projectedtexture,
'func_reflective_glass': func_reflective_glass,
'env_particle_performance_monitor': env_particle_performance_monitor,
'npc_puppet': npc_puppet,
'point_gamestats_counter': point_gamestats_counter,
'func_instance': func_instance,
'func_instance_parms': func_instance_parms,
'trigger_apply_impulse': trigger_apply_impulse,
}
```
#### File: source1/mdl/mdl_file.py
```python
import math
import traceback
from typing import List
import numpy as np
from ...utilities.byte_io_mdl import ByteIO
from ...source_shared.base import Base
from .flex_expressions import *
from .structs.header import Header
from .structs.bone import Bone
from .structs.texture import Material
from .structs.flex import FlexController, FlexRule, FlexControllerUI, FlexOpType
from .structs.anim_desc import AnimDesc
from .structs.sequence import Sequence
from .structs.attachment import Attachment
from .structs.bodygroup import BodyPart
class _AnimBlocks:
def __init__(self):
self.name = ''
self.blocks = []
class Mdl(Base):
def __init__(self, filepath):
self.store_value("MDL", self)
self.reader = ByteIO(filepath)
self.header = Header()
self.bones = [] # type: List[Bone]
self.skin_groups = [] # type: List[List[str]]
self.materials = [] # type: List[Material]
self.materials_paths = []
self.flex_names = [] # type:List[str]
self.flex_controllers = [] # type:List[FlexController]
self.flex_ui_controllers = [] # type:List[FlexControllerUI]
self.flex_rules = [] # type:List[FlexRule]
self.body_parts = [] # type:List[BodyPart]
self.attachments = [] # type:List[Attachment]
self.anim_descs = [] # type:List[AnimDesc]
self.sequences = [] # type:List[Sequence]
self.anim_block = _AnimBlocks()
self.bone_table_by_name = []
@staticmethod
def calculate_crc(buffer):
correct_buffer_size = math.ceil(len(buffer) / 4) * 4
buffer += b'\x00' * (correct_buffer_size - len(buffer))
buffer: np.ndarray = np.frombuffer(buffer, np.uint32).copy()
orig_checksum = buffer[2]
buffer[8 // 4] = 0
buffer[76 // 4] = 0
buffer[1432 // 4:1432 // 4 + 2] = 0
buffer[1520 // 4:(1520 + 36) // 4] = 0
buffer[1604 // 4] = 0
with open('shit.bin', 'wb') as f:
f.write(buffer.tobytes())
new_checksum = 0
for i in range(buffer.shape[0]):
tmp = buffer[i] + (new_checksum >> 27 & 1)
new_checksum = (tmp & 0xFFFFFFFF) + ((2 * new_checksum) & 0xFFFFFFFF)
new_checksum &= 0xFFFFFFFF
print(f'{i * 4 + 4}: {new_checksum:08x} : {new_checksum}')
buffer[2] = new_checksum
print(orig_checksum, new_checksum)
def read(self):
self.header.read(self.reader)
self.reader.seek(self.header.bone_offset)
for _ in range(self.header.bone_count):
bone = Bone()
bone.read(self.reader)
self.bones.append(bone)
self.reader.seek(self.header.texture_offset)
for _ in range(self.header.texture_count):
texture = Material()
texture.read(self.reader)
self.materials.append(texture)
self.reader.seek(self.header.texture_path_offset)
for _ in range(self.header.texture_path_count):
self.materials_paths.append(self.reader.read_source1_string(0))
self.reader.seek(self.header.skin_family_offset)
for _ in range(self.header.skin_family_count):
skin_group = []
for _ in range(self.header.skin_reference_count):
texture_index = self.reader.read_uint16()
skin_group.append(self.materials[texture_index].name)
self.skin_groups.append(skin_group)
diff_start = 0
for skin_info in self.skin_groups[1:]:
for n, (a, b) in enumerate(zip(self.skin_groups[0], skin_info)):
if a == b:
diff_start = max(n, diff_start)
break
for n, skin_info in enumerate(self.skin_groups):
self.skin_groups[n] = skin_info[:diff_start]
self.reader.seek(self.header.flex_desc_offset)
for _ in range(self.header.flex_desc_count):
self.flex_names.append(self.reader.read_source1_string(self.reader.tell()))
self.reader.seek(self.header.flex_controller_offset)
for _ in range(self.header.flex_controller_count):
controller = FlexController()
controller.read(self.reader)
self.flex_controllers.append(controller)
self.reader.seek(self.header.flex_rule_offset)
for _ in range(self.header.flex_rule_count):
rule = FlexRule()
rule.read(self.reader)
self.flex_rules.append(rule)
self.reader.seek(self.header.local_attachment_offset)
for _ in range(self.header.local_attachment_count):
attachment = Attachment()
attachment.read(self.reader)
self.attachments.append(attachment)
self.reader.seek(self.header.flex_controller_ui_offset)
for _ in range(self.header.flex_controller_ui_count):
flex_controller = FlexControllerUI()
flex_controller.read(self.reader)
self.flex_ui_controllers.append(flex_controller)
self.reader.seek(self.header.body_part_offset)
for _ in range(self.header.body_part_count):
body_part = BodyPart()
body_part.read(self.reader)
self.body_parts.append(body_part)
# self.reader.seek(self.header.local_animation_offset)
# for _ in range(self.header.local_animation_count):
# anim_desc = AnimDesc()
# anim_desc.read(self.reader)
# self.anim_descs.append(anim_desc)
#
# self.reader.seek(self.header.local_sequence_offset)
# for _ in range(self.header.local_sequence_count):
# seq = Sequence()
# seq.read(self.reader)
# self.sequences.append(seq)
# self.anim_block.name = self.reader.read_from_offset(self.header.anim_block_name_offset,
# self.reader.read_ascii_string)
# self.reader.seek(self.header.anim_block_offset)
# for _ in range(self.header.anim_block_count):
# self.anim_block.blocks.append(self.reader.read_fmt('2i'))
#
# if self.header.bone_table_by_name_offset and self.bones:
# self.reader.seek(self.header.bone_table_by_name_offset)
# self.bone_table_by_name = [self.reader.read_uint8() for _ in range(len(self.bones))]
# for anim
def rebuild_flex_rules(self):
rules = {}
for rule in self.flex_rules:
stack = []
try:
for op in rule.flex_ops:
flex_op = op.op
if flex_op == FlexOpType.CONST:
stack.append(Value(op.value))
elif flex_op == FlexOpType.FETCH1:
stack.append(FetchController(self.flex_controllers[op.index].name))
elif flex_op == FlexOpType.FETCH2:
stack.append(FetchFlex(self.flex_names[op.index]))
elif flex_op == FlexOpType.ADD:
right = stack.pop(-1)
left = stack.pop(-1)
stack.append(Add(left, right))
elif flex_op == FlexOpType.SUB:
right = stack.pop(-1)
left = stack.pop(-1)
stack.append(Sub(left, right))
elif flex_op == FlexOpType.MUL:
right = stack.pop(-1)
left = stack.pop(-1)
stack.append(Mul(left, right))
elif flex_op == FlexOpType.DIV:
right = stack.pop(-1)
left = stack.pop(-1)
stack.append(Div(left, right))
elif flex_op == FlexOpType.NEG:
stack.append(Neg(stack.pop(-1)))
elif flex_op == FlexOpType.MAX:
right = stack.pop(-1)
left = stack.pop(-1)
stack.append(Max(left, right))
elif flex_op == FlexOpType.MIN:
right = stack.pop(-1)
left = stack.pop(-1)
stack.append(Min(left, right))
elif flex_op == FlexOpType.COMBO:
count = op.index
values = [stack.pop(-1) for _ in range(count)]
combo = Combo(*values)
stack.append(combo)
elif flex_op == FlexOpType.DOMINATE:
count = op.index + 1
values = [stack.pop(-1) for _ in range(count)]
dom = Dominator(*values)
stack.append(dom)
elif flex_op == FlexOpType.TWO_WAY_0:
mx = Max(Add(FetchController(self.flex_controllers[op.index].name), Value(1.0)), Value(0.0))
mn = Min(mx, Value(1.0))
res = Sub(1, mn)
stack.append(res)
elif flex_op == FlexOpType.TWO_WAY_1:
mx = Max(FetchController(self.flex_controllers[op.index].name), Value(0.0))
mn = Min(mx, Value(1.0))
stack.append(mn)
elif flex_op == FlexOpType.NWAY:
flex_cnt_value = int(stack.pop(-1).value)
flex_cnt = FetchController(self.flex_controllers[flex_cnt_value].name)
f_w = stack.pop(-1)
f_z = stack.pop(-1)
f_y = stack.pop(-1)
f_x = stack.pop(-1)
gtx = Min(Value(1.0), Neg(Min(Value(0.0), Sub(f_x, flex_cnt))))
lty = Min(Value(1.0), Neg(Min(Value(0.0), Sub(flex_cnt, f_y))))
remap_x = Min(Max(Div(Sub(flex_cnt, f_x), (Sub(f_y, f_x))), Value(0.0)), Value(1.0))
gtey = Neg(Sub(Min(Value(1.0), Neg(Min(Value(0.0), Sub(flex_cnt, f_y)))), Value(1.0)))
ltez = Neg(Sub(Min(Value(1.0), Neg(Min(Value(0.0), Sub(f_z, flex_cnt)))), Value(1.0)))
gtz = Min(Value(1.0), Neg(Min(Value(0.0), Sub(f_z, flex_cnt))))
ltw = Min(Value(1.0), Neg(Min(Value(0.0), Sub(flex_cnt, f_w))))
remap_z = Sub(Value(1.0),
Min(Max(Div(Sub(flex_cnt, f_z), (Sub(f_w, f_z))), Value(0.0)), Value(1.0)))
final_expr = Add(Add(Mul(Mul(gtx, lty), remap_x), Mul(gtey, ltez)), Mul(Mul(gtz, ltw), remap_z))
final_expr = Mul(final_expr, FetchController(self.flex_controllers[op.index].name))
stack.append(final_expr)
elif flex_op == FlexOpType.DME_UPPER_EYELID:
stack.pop(-1)
stack.pop(-1)
stack.pop(-1)
stack.append(Value(1.0))
elif flex_op == FlexOpType.DME_LOWER_EYELID:
stack.pop(-1)
stack.pop(-1)
stack.pop(-1)
stack.append(Value(1.0))
else:
print("Unknown OP", op)
if len(stack) > 1 or not stack:
print(f"failed to parse ({self.flex_names[rule.flex_index]}) flex rule")
print(stack)
continue
final_expr = stack.pop(-1)
# name = self.get_value('stereo_flexes').get(rule.flex_index, self.flex_names[rule.flex_index])
name = self.flex_names[rule.flex_index]
rules[name] = final_expr
except Exception as ex:
traceback.print_exc()
print(f"failed to parse ({self.flex_names[rule.flex_index]}) flex rule")
print(stack)
return rules
```
#### File: source2/resouce_types/valve_model.py
```python
from pathlib import Path
# noinspection PyUnresolvedReferences
import bpy
# noinspection PyUnresolvedReferences
from mathutils import Vector, Matrix, Quaternion, Euler
import math
from .valve_material import ValveCompiledMaterial
from .vavle_morph import ValveCompiledMorph
from ..utils.decode_animations import parse_anim_data
from ..blocks.vbib_block import VertexBuffer
from ..common import convert_normals
from ..source2 import ValveCompiledFile
import numpy as np
from ...bpy_utilities.utils import get_material, get_or_create_collection, get_new_unique_collection
from ...source_shared.content_manager import ContentManager
class ValveCompiledModel(ValveCompiledFile):
def __init__(self, path_or_file, re_use_meshes=False):
super().__init__(path_or_file)
if isinstance(path_or_file, (Path, str)):
ContentManager().scan_for_content(path_or_file)
self.re_use_meshes = re_use_meshes
self.strip_from_name = ''
self.lod_collections = {}
self.objects = []
data_block = self.get_data_block(block_name='DATA')
assert len(data_block) == 1
data_block = data_block[0]
self.name = data_block.data['m_name']
self.main_collection = None
self.armature = None
self.materials = []
def load_mesh(self, invert_uv, strip_from_name='',
parent_collection: bpy.types.Collection = None):
self.strip_from_name = strip_from_name
name = self.name.replace(self.strip_from_name, "")
self.main_collection = get_or_create_collection(name, parent_collection or bpy.context.scene.collection)
data_block = self.get_data_block(block_name='DATA')[0]
model_skeleton = data_block.data['m_modelSkeleton']
bone_names = model_skeleton['m_boneName']
if bone_names:
self.armature = self.build_armature()
self.objects.append(self.armature)
self.build_meshes(self.main_collection, self.armature, invert_uv)
self.load_materials()
def build_meshes(self, collection, armature, invert_uv: bool = True):
content_manager = ContentManager()
data_block = self.get_data_block(block_name='DATA')[0]
use_external_meshes = len(self.get_data_block(block_name='CTRL')) == 0
if use_external_meshes:
for mesh_index, mesh_ref in enumerate(data_block.data['m_refMeshes']):
if data_block.data['m_refLODGroupMasks'][mesh_index] & 1 == 0:
continue
mesh_ref_path = self.available_resources.get(mesh_ref, None) # type:Path
if mesh_ref_path:
mesh_ref_file = content_manager.find_file(mesh_ref_path)
if mesh_ref_file:
mesh = ValveCompiledFile(mesh_ref_file)
self.available_resources.update(mesh.available_resources)
mesh.read_block_info()
mesh.check_external_resources()
mesh_data_block = mesh.get_data_block(block_name="DATA")[0]
buffer_block = mesh.get_data_block(block_name="VBIB")[0]
name = mesh_ref_path.stem
vmorf_actual_path = mesh.available_resources.get(mesh_data_block.data['m_morphSet'],
None) # type:Path
morph_block = None
if vmorf_actual_path:
vmorf_path = content_manager.find_file(vmorf_actual_path)
if vmorf_path is not None:
morph = ValveCompiledMorph(vmorf_path)
morph.read_block_info()
morph.check_external_resources()
morph_block = morph.get_data_block(block_name="DATA")[0]
self.build_mesh(name, armature, collection,
mesh_data_block, buffer_block, data_block, morph_block,
invert_uv, mesh_index)
else:
control_block = self.get_data_block(block_name="CTRL")[0]
e_meshes = control_block.data['embedded_meshes']
for e_mesh in e_meshes:
name = e_mesh['name']
name = name.replace(self.strip_from_name, "")
data_block_index = e_mesh['data_block']
mesh_index = e_mesh['mesh_index']
if data_block.data['m_refLODGroupMasks'][mesh_index] & 1 == 0:
continue
buffer_block_index = e_mesh['vbib_block']
morph_block_index = e_mesh['morph_block']
mesh_data_block = self.get_data_block(block_id=data_block_index)
buffer_block = self.get_data_block(block_id=buffer_block_index)
morph_block = self.get_data_block(block_id=morph_block_index)
self.build_mesh(name, armature, collection,
mesh_data_block, buffer_block, data_block, morph_block,
invert_uv, mesh_index)
# //noinspection PyTypeChecker,PyUnresolvedReferences
def build_mesh(self, name, armature, collection,
mesh_data_block, buffer_block, data_block, morph_block,
invert_uv,
mesh_index):
morphs_available = morph_block is not None and morph_block.read_morphs()
if morphs_available:
flex_trunc = bpy.data.texts.get(f"{name}_flexes", None) or bpy.data.texts.new(f"{name}_flexes")
for flex in morph_block.data['m_morphDatas']:
if flex['m_name']:
flex_trunc.write(f"{flex['m_name'][:63]}->{flex['m_name']}\n")
for scene in mesh_data_block.data["m_sceneObjects"]:
draw_calls = scene["m_drawCalls"]
global_vertex_offset = 0
for draw_call in draw_calls:
self.materials.append(draw_call['m_material'])
material_name = Path(draw_call['m_material']).stem
model_name = name + "_" + material_name
used_copy = False
mesh_obj = None
if self.re_use_meshes:
mesh_obj_original = bpy.data.objects.get(model_name, None)
mesh_data_original = bpy.data.meshes.get(f'{model_name}_mesh', False)
if mesh_obj_original and mesh_data_original:
model_mesh = mesh_data_original.copy()
mesh_obj = mesh_obj_original.copy()
mesh_obj['skin_groups'] = mesh_obj_original['skin_groups']
mesh_obj['active_skin'] = mesh_obj_original['active_skin']
mesh_obj['model_type'] = 'S2'
mesh_obj.data = model_mesh
used_copy = True
if not self.re_use_meshes or not used_copy:
model_mesh = bpy.data.meshes.new(f'{model_name}_mesh')
mesh_obj = bpy.data.objects.new(f'{model_name}', model_mesh)
if data_block.data['m_materialGroups']:
default_skin = data_block.data['m_materialGroups'][0]
if draw_call['m_material'] in default_skin['m_materials']:
mat_id = default_skin['m_materials'].index(draw_call['m_material'])
mat_groups = {}
for skin_group in data_block.data['m_materialGroups']:
mat_groups[skin_group['m_name']] = skin_group['m_materials'][mat_id]
mesh_obj['active_skin'] = 'default'
mesh_obj['skin_groups'] = mat_groups
else:
mesh_obj['active_skin'] = 'default'
mesh_obj['skin_groups'] = []
material_name = Path(draw_call['m_material']).stem
mesh = mesh_obj.data # type:bpy.types.Mesh
self.objects.append(mesh_obj)
collection.objects.link(mesh_obj)
if armature:
modifier = mesh_obj.modifiers.new(
type="ARMATURE", name="Armature")
modifier.object = armature
if used_copy:
continue
get_material(material_name, mesh_obj)
base_vertex = draw_call['m_nBaseVertex']
vertex_count = draw_call['m_nVertexCount']
start_index = draw_call['m_nStartIndex'] // 3
index_count = draw_call['m_nIndexCount'] // 3
index_buffer = buffer_block.index_buffer[draw_call['m_indexBuffer']['m_hBuffer']]
vertex_buffer: VertexBuffer = buffer_block.vertex_buffer[draw_call['m_vertexBuffers'][0]['m_hBuffer']]
used_range = slice(base_vertex, base_vertex + vertex_count)
used_vertices = vertex_buffer.vertexes['POSITION'][used_range]
normals = vertex_buffer.vertexes['NORMAL'][used_range]
if normals.dtype.char == 'B' and normals.shape[1] == 4:
normals = convert_normals(normals)
mesh.from_pydata(used_vertices, [],
index_buffer.indexes[start_index:start_index + index_count].tolist())
mesh.update()
n = 0
for attrib in vertex_buffer.attributes:
if 'TEXCOORD' in attrib.name.upper():
uv_layer = vertex_buffer.vertexes[attrib.name].copy()
if uv_layer.shape[1] != 2:
continue
if invert_uv:
uv_layer[:, 1] = np.subtract(1, uv_layer[:, 1])
uv_data = mesh.uv_layers.new(name=attrib.name).data
vertex_indices = np.zeros((len(mesh.loops, )), dtype=np.uint32)
mesh.loops.foreach_get('vertex_index', vertex_indices)
new_uv_data = uv_layer[used_range][vertex_indices]
uv_data.foreach_set('uv', new_uv_data.flatten())
n += 1
if armature:
model_skeleton = data_block.data['m_modelSkeleton']
bone_names = model_skeleton['m_boneName']
remap_table = data_block.data['m_remappingTable']
remap_table_starts = data_block.data['m_remappingTableStarts']
remaps_start = remap_table_starts[mesh_index]
new_bone_names = bone_names.copy()
weight_groups = {bone: mesh_obj.vertex_groups.new(name=bone) for bone in new_bone_names}
if 'BLENDWEIGHT' in vertex_buffer.attribute_names and 'BLENDINDICES' in vertex_buffer.attribute_names:
weights_array = vertex_buffer.vertexes["BLENDWEIGHT"] / 255
indices_array = vertex_buffer.vertexes["BLENDINDICES"]
else:
weights_array = []
indices_array = []
for n, bone_indices in enumerate(indices_array):
if len(weights_array) > 0:
weights = weights_array[n]
for bone_index, weight in zip(bone_indices, weights):
if weight > 0:
bone_name = new_bone_names[remap_table[remaps_start:][int(bone_index)]]
weight_groups[bone_name].add([n], weight, 'REPLACE')
else:
for bone_index in bone_indices:
bone_name = new_bone_names[remap_table[remaps_start:][int(bone_index)]]
weight_groups[bone_name].add([n], 1.0, 'REPLACE')
mesh.polygons.foreach_set("use_smooth", np.ones(len(mesh.polygons)))
mesh.normals_split_custom_set_from_vertices(normals)
mesh.use_auto_smooth = True
if morphs_available:
mesh_obj.shape_key_add(name='base')
bundle_id = morph_block.data['m_bundleTypes'].index('MORPH_BUNDLE_TYPE_POSITION_SPEED')
if bundle_id != -1:
for n, (flex_name, flex_data) in enumerate(morph_block.flex_data.items()):
print(f"Importing {flex_name} {n + 1}/{len(morph_block.flex_data)}")
if flex_name is None:
continue
shape = mesh_obj.shape_key_add(name=flex_name)
vertices = np.zeros((len(mesh.vertices) * 3,), dtype=np.float32)
mesh.vertices.foreach_get('co', vertices)
vertices = vertices.reshape((-1, 3))
pre_computed_data = np.add(
flex_data[bundle_id][global_vertex_offset:global_vertex_offset + vertex_count][:, :3],
vertices)
shape.data.foreach_set("co", pre_computed_data.reshape((-1,)))
global_vertex_offset += vertex_count
# noinspection PyUnresolvedReferences
def build_armature(self):
data_block = self.get_data_block(block_name='DATA')[0]
model_skeleton = data_block.data['m_modelSkeleton']
bone_names = model_skeleton['m_boneName']
bone_positions = model_skeleton['m_bonePosParent']
bone_rotations = model_skeleton['m_boneRotParent']
bone_parents = model_skeleton['m_nParent']
armature_obj = bpy.data.objects.new(self.name + "_ARM", bpy.data.armatures.new(self.name + "_ARM_DATA"))
armature_obj.show_in_front = True
self.main_collection.objects.link(armature_obj)
bpy.ops.object.select_all(action="DESELECT")
armature_obj.select_set(True)
bpy.context.view_layer.objects.active = armature_obj
armature_obj.rotation_euler = Euler([math.radians(180), 0, math.radians(90)])
armature = armature_obj.data
bpy.ops.object.mode_set(mode='EDIT')
bones = []
for bone_name in bone_names:
bl_bone = armature.edit_bones.new(name=bone_name)
bl_bone.tail = Vector([0, 0, 1]) + bl_bone.head
bones.append((bl_bone, bone_name))
for n, bone_name in enumerate(bone_names):
bl_bone = armature.edit_bones.get(bone_name)
parent_id = bone_parents[n]
if parent_id != -1:
bl_parent, parent = bones[parent_id]
bl_bone.parent = bl_parent
bpy.ops.object.mode_set(mode='POSE')
for n, (bl_bone, bone_name) in enumerate(bones):
pose_bone = armature_obj.pose.bones.get(bone_name)
if pose_bone is None:
print("Missing", bone_name, 'bone')
parent_id = bone_parents[n]
bone_pos = bone_positions[n]
bone_rot = bone_rotations[n]
bone_pos = Vector([bone_pos[1], bone_pos[0], -bone_pos[2]])
bone_rot = Quaternion([-bone_rot[3], -bone_rot[1], -bone_rot[0], bone_rot[2]])
mat = (Matrix.Translation(bone_pos) @ bone_rot.to_matrix().to_4x4())
pose_bone.matrix_basis.identity()
if parent_id != -1:
parent_bone = armature_obj.pose.bones.get(bone_names[parent_id])
pose_bone.matrix = parent_bone.matrix @ mat
else:
pose_bone.matrix = mat
bpy.ops.pose.armature_apply()
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action="DESELECT")
armature_obj.select_set(True)
bpy.context.view_layer.objects.active = armature_obj
bpy.ops.object.transform_apply(location=True, rotation=True, scale=False)
return armature_obj
def load_attachments(self):
all_attachment = {}
for block in self.get_data_block(block_name="MDAT"):
for attachment in block.data['m_attachments']:
if attachment['key'] not in all_attachment:
all_attachment[attachment['key']] = attachment['value']
attachment_collection = bpy.data.collections.get('ATTACHMENTS', None) or bpy.data.collections.new('ATTACHMENTS')
if attachment_collection.name not in self.main_collection.children:
self.main_collection.children.link(attachment_collection)
for name, attachment in all_attachment.items():
empty = bpy.data.objects.new(name, None)
attachment_collection.objects.link(empty)
pos = attachment['m_vInfluenceOffsets'][0]
rot = Quaternion(attachment['m_vInfluenceRotations'][0])
empty.matrix_basis.identity()
if attachment['m_influenceNames'][0]:
empty.parent = self.armature
empty.parent_type = 'BONE'
empty.parent_bone = attachment['m_influenceNames'][0]
empty.location = Vector([pos[1], pos[0], pos[2]])
empty.rotation_quaternion = rot
def load_animations(self):
if not self.get_data_block(block_name='CTRL'):
return
if self.armature:
if not self.armature.animation_data:
self.armature.animation_data_create()
bpy.ops.object.select_all(action="DESELECT")
self.armature.select_set(True)
bpy.context.view_layer.objects.active = self.armature
bpy.ops.object.mode_set(mode='POSE')
ctrl_block = self.get_data_block(block_name='CTRL')[0]
embedded_anim = ctrl_block.data['embedded_animation']
agrp = self.get_data_block(block_id=embedded_anim['group_data_block'])
anim_data = self.get_data_block(block_id=embedded_anim['anim_data_block'])
animations = parse_anim_data(anim_data.data, agrp.data)
bone_array = agrp.data['m_decodeKey']['m_boneArray']
for animation in animations:
print(f"Loading animation {animation.name}")
action = bpy.data.actions.new(animation.name)
self.armature.animation_data.action = action
curve_per_bone = {}
for bone in bone_array:
bone_string = f'pose.bones["{bone["m_name"]}"].'
group = action.groups.new(name=bone['m_name'])
pos_curves = []
rot_curves = []
for i in range(3):
pos_curve = action.fcurves.new(data_path=bone_string + "location", index=i)
pos_curve.keyframe_points.add(len(animation.frames))
pos_curves.append(pos_curve)
pos_curve.group = group
for i in range(4):
rot_curve = action.fcurves.new(data_path=bone_string + "rotation_quaternion", index=i)
rot_curve.keyframe_points.add(len(animation.frames))
rot_curves.append(rot_curve)
rot_curve.group = group
curve_per_bone[bone['m_name']] = pos_curves, rot_curves
for n, frame in enumerate(animation.frames):
for bone_name, bone_data in frame.bone_data.items():
bone_data = frame.bone_data[bone_name]
pos_curves, rot_curves = curve_per_bone[bone_name]
pos_type, pos = bone_data['Position']
rot_type, rot = bone_data['Angle']
bone_pos = Vector([pos[1], pos[0], -pos[2]])
bone_rot = Quaternion([-rot[3], -rot[1], -rot[0], rot[2]])
bone = self.armature.pose.bones[bone_name]
# mat = (Matrix.Translation(bone_pos) @ bone_rot.to_matrix().to_4x4())
if 'Position' in bone_data:
if pos_type in ['CCompressedFullVector3',
'CCompressedAnimVector3',
'CCompressedStaticFullVector3']:
translation_mat = Matrix.Translation(bone_pos)
# elif pos_type == "CCompressedDeltaVector3":
# 'CCompressedStaticVector3',
# a, b, c = decompose(mat)
# a += bone_pos
# translation_mat = compose(a, b, c)
else:
translation_mat = Matrix.Identity(4)
pass
if 'Angle' in bone_data:
if rot_type in ['CCompressedAnimQuaternion',
'CCompressedFullQuaternion',
'CCompressedStaticQuaternion']:
rotation_mat = bone_rot.to_matrix().to_4x4()
else:
rotation_mat = Matrix.Identity(4)
mat = translation_mat @ rotation_mat
if bone.parent:
bone.matrix = bone.parent.matrix @ mat
else:
bone.matrix = bone.matrix @ mat
if 'Position' in bone_data:
for i in range(3):
pos_curves[i].keyframe_points.add(1)
pos_curves[i].keyframe_points[-1].co = (n, bone.location[i])
if 'Angle' in bone_data:
for i in range(4):
rot_curves[i].keyframe_points.add(1)
rot_curves[i].keyframe_points[-1].co = (n, bone.rotation_quaternion[i])
def load_materials(self):
content_manager = ContentManager()
for material in self.materials:
print(f'Loading {material}')
file = self.available_resources.get(material, None)
if file:
file = content_manager.find_file(file)
if file: # duh
material = ValveCompiledMaterial(file)
material.load()
``` |
{
"source": "joe-doe/flask-gunicorn-logging",
"score": 2
} |
#### File: joe-doe/flask-gunicorn-logging/main.py
```python
from flask import Flask
import logger
import logging
default_logger = logging.getLogger()
my_logger = logging.getLogger('my.package')
app = Flask(__name__)
default_logger.error("Error !!!!")
default_logger.info("Info !!!!")
my_logger.info('Info !!!')
my_logger.error('Error !!!')
@app.route('/')
def index():
my_logger.info('Visited index')
return 'OK'
``` |
{
"source": "joedomino874/hummingbot",
"score": 2
} |
#### File: derivative/binance_perpetual/binance_perpetual_user_stream_tracker.py
```python
import asyncio
import logging
import hummingbot.connector.derivative.binance_perpetual.constants as CONSTANTS
from typing import Optional
from hummingbot.core.api_throttler.async_throttler import AsyncThrottler
from hummingbot.core.data_type.user_stream_tracker import UserStreamTracker
from hummingbot.core.data_type.user_stream_tracker_data_source import UserStreamTrackerDataSource
from hummingbot.core.utils.async_utils import safe_gather, safe_ensure_future
from hummingbot.logger import HummingbotLogger
from hummingbot.connector.derivative.binance_perpetual.binance_perpetual_user_stream_data_source import \
BinancePerpetualUserStreamDataSource
class BinancePerpetualUserStreamTracker(UserStreamTracker):
_bpust_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._bust_logger is None:
cls._bust_logger = logging.getLogger(__name__)
return cls._bust_logger
@classmethod
def _get_throttler_instance(cls) -> AsyncThrottler:
return AsyncThrottler(CONSTANTS.RATE_LIMITS)
def __init__(self, api_key: str, domain: str = CONSTANTS.DOMAIN, throttler: Optional[AsyncThrottler] = None):
super().__init__()
self._api_key: str = api_key
self._ev_loop: asyncio.events.AbstractEventLoop = asyncio.get_event_loop()
self._data_source: Optional[UserStreamTrackerDataSource] = None
self._user_stream_tracking_task: Optional[asyncio.Task] = None
self._domain = domain
self._throttler = throttler
@property
def exchange_name(self) -> str:
return self._domain
@property
def data_source(self) -> UserStreamTrackerDataSource:
if self._data_source is None:
self._data_source = BinancePerpetualUserStreamDataSource(api_key=self._api_key, domain=self._domain, throttler=self._throttler)
return self._data_source
async def start(self):
self._user_stream_tracking_task = safe_ensure_future(
self.data_source.listen_for_user_stream(self._ev_loop, self._user_stream)
)
await safe_gather(self._user_stream_tracking_task)
```
#### File: exchange/bitfinex/bitfinex_api_order_book_data_source.py
```python
from collections import namedtuple
import logging
import time
import aiohttp
import asyncio
import ujson
import pandas as pd
from typing import (
Any,
AsyncIterable,
Dict,
List,
Optional,
)
import websockets
from websockets.exceptions import ConnectionClosed
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.order_book_row import OrderBookRow
from hummingbot.core.data_type.order_book_tracker_data_source import OrderBookTrackerDataSource
from hummingbot.core.data_type.order_book_tracker_entry import (
OrderBookTrackerEntry
)
from hummingbot.core.data_type.order_book_message import (
OrderBookMessage,
OrderBookMessageType,
)
from hummingbot.core.utils.async_utils import safe_gather
from hummingbot.logger import HummingbotLogger
from hummingbot.connector.exchange.bitfinex import (
BITFINEX_REST_URL,
BITFINEX_WS_URI,
ContentEventType,
)
from hummingbot.connector.exchange.bitfinex.bitfinex_utils import (
join_paths,
convert_to_exchange_trading_pair,
convert_from_exchange_trading_pair,
)
from hummingbot.connector.exchange.bitfinex.bitfinex_active_order_tracker import BitfinexActiveOrderTracker
from hummingbot.connector.exchange.bitfinex.bitfinex_order_book import BitfinexOrderBook
from hummingbot.connector.exchange.bitfinex.bitfinex_order_book_message import \
BitfinexOrderBookMessage
from hummingbot.connector.exchange.bitfinex.bitfinex_order_book_tracker_entry import \
BitfinexOrderBookTrackerEntry
BOOK_RET_TYPE = List[Dict[str, Any]]
RESPONSE_SUCCESS = 200
NaN = float("nan")
MAIN_FIAT = ("USD", "USDC", "USDS", "DAI", "PAX", "TUSD", "USDT")
Ticker = namedtuple(
"Ticker",
"bid bid_size ask ask_size daily_change daily_change_percent last_price volume high low"
)
BookStructure = namedtuple("Book", "price count amount")
TradeStructure = namedtuple("Trade", "id mts amount price")
# n0-n9 no documented, we dont' know, maybe later market write docs
ConfStructure = namedtuple("Conf", "n0 n1 n2 min max n5 n6 n7 n8 n9")
class BitfinexAPIOrderBookDataSource(OrderBookTrackerDataSource):
MESSAGE_TIMEOUT = 30.0
STEP_TIME_SLEEP = 1.0
REQUEST_TTL = 60 * 30
TIME_SLEEP_BETWEEN_REQUESTS = 5.0
CACHE_SIZE = 1
SNAPSHOT_LIMIT_SIZE = 100
_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._logger is None:
cls._logger = logging.getLogger(__name__)
return cls._logger
def __init__(self, trading_pairs: Optional[List[str]] = None):
super().__init__(trading_pairs)
self._trading_pairs: Optional[List[str]] = trading_pairs
# Dictionary that maps Order IDs to book enties (i.e. price, amount, and update_id the
# way it is stored in Hummingbot order book, usually timestamp)
self._tracked_book_entries: Dict[int, OrderBookRow] = {}
@staticmethod
async def fetch_trading_pairs() -> List[str]:
try:
async with aiohttp.ClientSession() as client:
async with client.get("https://api-pub.bitfinex.com/v2/conf/pub:list:pair:exchange", timeout=10) as response:
if response.status == 200:
data = await response.json()
trading_pair_list: List[str] = []
for trading_pair in data[0]:
# change the following line accordingly
converted_trading_pair: Optional[str] = \
convert_from_exchange_trading_pair(trading_pair)
if converted_trading_pair is not None:
trading_pair_list.append(converted_trading_pair)
else:
logging.getLogger(__name__).info(f"Could not parse the trading pair "
f"{trading_pair}, skipping it...")
return trading_pair_list
except Exception:
# Do nothing if the request fails -- there will be no autocomplete available
pass
return []
@staticmethod
def _convert_volume(raw_prices: Dict[str, Any]) -> BOOK_RET_TYPE:
converters = {}
prices = []
for price in [v for v in raw_prices.values() if v["quoteAsset"] in MAIN_FIAT]:
raw_symbol = f"{price['baseAsset']}-{price['quoteAsset']}"
symbol = f"{price['baseAsset']}{price['quoteAsset']}"
prices.append(
{
**price,
"symbol": symbol,
"USDVolume": price["volume"] * price["price"]
}
)
converters[price["baseAsset"]] = price["price"]
del raw_prices[raw_symbol]
for raw_symbol, item in raw_prices.items():
symbol = f"{item['baseAsset']}{item['quoteAsset']}"
if item["baseAsset"] in converters:
prices.append(
{
**item,
"symbol": symbol,
"USDVolume": item["volume"] * converters[item["baseAsset"]]
}
)
if item["quoteAsset"] not in converters:
converters[item["quoteAsset"]] = item["price"] / converters[item["baseAsset"]]
continue
if item["quoteAsset"] in converters:
prices.append(
{
**item,
"symbol": symbol,
"USDVolume": item["volume"] * item["price"] * converters[item["quoteAsset"]]
}
)
if item["baseAsset"] not in converters:
converters[item["baseAsset"]] = item["price"] * converters[item["quoteAsset"]]
continue
prices.append({
**item,
"symbol": symbol,
"volume": NaN})
return prices
@staticmethod
def _prepare_snapshot(pair: str, raw_snapshot: List[BookStructure]) -> Dict[str, Any]:
"""
Return structure of three elements:
symbol: traded pair symbol
bids: List of OrderBookRow for bids
asks: List of OrderBookRow for asks
"""
update_id = time.time()
bids = [OrderBookRow(i.price, i.amount, update_id) for i in raw_snapshot if i.amount > 0]
asks = [OrderBookRow(i.price, abs(i.amount), update_id) for i in raw_snapshot if i.amount < 0]
return {
"symbol": pair,
"bids": bids,
"asks": asks,
}
def _prepare_trade(self, raw_response: str) -> Optional[Dict[str, Any]]:
*_, content = ujson.loads(raw_response)
if content == ContentEventType.HEART_BEAT:
return None
try:
trade = TradeStructure(*content)
except Exception as err:
self.logger().error(err)
self.logger().error(raw_response)
else:
return {
"id": trade.id,
"mts": trade.mts,
"amount": trade.amount,
"price": trade.price,
}
async def _get_response(self, ws: websockets.WebSocketClientProtocol) -> AsyncIterable[str]:
try:
while True:
msg: str = await asyncio.wait_for(ws.recv(), timeout=self.MESSAGE_TIMEOUT)
yield msg
except asyncio.TimeoutError:
self.logger().warning("WebSocket ping timed out. Going to reconnect...")
return
except ConnectionClosed:
return
finally:
await ws.close()
def _generate_delete_message(self, symbol: str, price: float, amount: str):
side_key = "bids" if amount == 1 else "asks"
timestamp = time.time()
msg = {
"symbol": symbol,
side_key: OrderBookRow(price, 0, timestamp), # 0 amount will force the order to be deleted
"update_id": time.time() # Assume every update is incremental
}
return BitfinexOrderBookMessage(
message_type=OrderBookMessageType.DIFF,
content=msg,
timestamp=timestamp)
def _generate_add_message(self, symbol: str, price: float, amount: float):
side_key = "bids" if amount > 0 else "asks"
timestamp = time.time()
msg = {
"symbol": symbol,
side_key: OrderBookRow(price, abs(amount), timestamp),
"update_id": timestamp # Assume every update is incremental
}
return BitfinexOrderBookMessage(
message_type=OrderBookMessageType.DIFF,
content=msg,
timestamp=timestamp)
def _parse_raw_update(self, pair: str, raw_response: str) -> OrderBookMessage:
"""
Parses raw update, if price for a tracked order identified by ID is 0, then order is deleted
Returns OrderBookMessage
"""
*_, content = ujson.loads(raw_response)
if isinstance(content, list) and len(content) == 3:
price = content[0]
count = content[1]
amount = content[2]
if count > 0:
return self._generate_add_message(pair, price, amount)
else:
return self._generate_delete_message(pair, price, amount)
return None
@classmethod
async def get_last_traded_prices(cls, trading_pairs: List[str]) -> Dict[str, float]:
tasks = [cls.get_last_traded_price(t_pair) for t_pair in trading_pairs]
results = await safe_gather(*tasks)
return {t_pair: result for t_pair, result in zip(trading_pairs, results)}
@classmethod
async def get_last_traded_price(cls, trading_pair: str) -> float:
async with aiohttp.ClientSession() as client:
# https://api-pub.bitfinex.com/v2/ticker/tBTCUSD
ticker_url: str = join_paths(BITFINEX_REST_URL, f"ticker/{convert_to_exchange_trading_pair(trading_pair)}")
resp = await client.get(ticker_url)
resp_json = await resp.json()
ticker = Ticker(*resp_json)
return float(ticker.last_price)
async def get_trading_pairs(self) -> List[str]:
"""
Get a list of active trading pairs
(if the market class already specifies a list of trading pairs,
returns that list instead of all active trading pairs)
:returns: A list of trading pairs defined by the market class,
or all active trading pairs from the rest API
"""
if not self._trading_pairs:
try:
self._trading_pairs = await self.fetch_trading_pairs()
except Exception:
msg = "Error getting active exchange information. Check network connection."
self._trading_pairs = []
self.logger().network(
"Error getting active exchange information.",
exc_info=True,
app_warning_msg=msg
)
return self._trading_pairs
async def get_snapshot(self, client: aiohttp.ClientSession, trading_pair: str) -> Dict[str, Any]:
request_url: str = f"{BITFINEX_REST_URL}/book/{convert_to_exchange_trading_pair(trading_pair)}/P0"
# by default it's = 50, 25 asks + 25 bids.
# set 100: 100 asks + 100 bids
# Exchange only allow: 1, 25, 100 (((
params = {
"len": self.SNAPSHOT_LIMIT_SIZE
}
async with client.get(request_url, params=params) as response:
response: aiohttp.ClientResponse = response
if response.status != RESPONSE_SUCCESS:
raise IOError(f"Error fetching Bitfinex market snapshot for {trading_pair}. "
f"HTTP status is {response.status}.")
raw_data: Dict[str, Any] = await response.json()
return self._prepare_snapshot(trading_pair, [BookStructure(*i) for i in raw_data])
async def get_new_order_book(self, trading_pair: str) -> OrderBook:
async with aiohttp.ClientSession() as client:
snapshot: Dict[str, any] = await self.get_snapshot(client, trading_pair)
snapshot_timestamp: float = time.time()
snapshot_msg: OrderBookMessage = BitfinexOrderBook.snapshot_message_from_exchange(
snapshot,
snapshot_timestamp
)
active_order_tracker: BitfinexActiveOrderTracker = BitfinexActiveOrderTracker()
bids, asks = active_order_tracker.convert_snapshot_message_to_order_book_row(snapshot_msg)
order_book = self.order_book_create_function()
order_book.apply_snapshot(bids, asks, snapshot_msg.update_id)
return order_book
async def get_tracking_pairs(self) -> Dict[str, OrderBookTrackerEntry]:
result: Dict[str, OrderBookTrackerEntry] = {}
trading_pairs: List[str] = await self.get_trading_pairs()
number_of_pairs: int = len(trading_pairs)
async with aiohttp.ClientSession() as client:
for idx, trading_pair in enumerate(trading_pairs):
try:
snapshot: Dict[str, Any] = await self.get_snapshot(client, trading_pair)
snapshot_timestamp: float = time.time()
snapshot_msg: OrderBookMessage = BitfinexOrderBook.snapshot_message_from_exchange(
snapshot,
snapshot_timestamp
)
order_book: OrderBook = self.order_book_create_function()
active_order_tracker: BitfinexActiveOrderTracker = BitfinexActiveOrderTracker()
order_book.apply_snapshot(
snapshot_msg.bids,
snapshot_msg.asks,
snapshot_msg.update_id
)
result[trading_pair] = BitfinexOrderBookTrackerEntry(
trading_pair, snapshot_timestamp, order_book, active_order_tracker
)
self.logger().info(
f"Initialized order book for {trading_pair}. "
f"{idx+1}/{number_of_pairs} completed."
)
await asyncio.sleep(self.STEP_TIME_SLEEP)
except IOError:
self.logger().network(
f"Error getting snapshot for {trading_pair}.",
exc_info=True,
app_warning_msg=f"Error getting snapshot for {trading_pair}. "
"Check network connection."
)
except Exception:
self.logger().error(
f"Error initializing order book for {trading_pair}. ",
exc_info=True
)
return result
async def listen_for_trades(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
while True:
try:
trading_pairs: List[str] = await self.get_trading_pairs()
for trading_pair in trading_pairs:
async with websockets.connect(BITFINEX_WS_URI) as ws:
payload: Dict[str, Any] = {
"event": "subscribe",
"channel": "trades",
"symbol": convert_to_exchange_trading_pair(trading_pair),
}
await ws.send(ujson.dumps(payload))
await asyncio.wait_for(ws.recv(), timeout=self.MESSAGE_TIMEOUT) # response
await asyncio.wait_for(ws.recv(), timeout=self.MESSAGE_TIMEOUT) # subscribe info
await asyncio.wait_for(ws.recv(), timeout=self.MESSAGE_TIMEOUT) # snapshot
async for raw_msg in self._get_response(ws):
msg = self._prepare_trade(raw_msg)
if msg:
msg_book: OrderBookMessage = BitfinexOrderBook.trade_message_from_exchange(
msg,
metadata={"symbol": f"{trading_pair}"}
)
output.put_nowait(msg_book)
except Exception as err:
self.logger().error(err)
self.logger().network(
"Unexpected error with WebSocket connection.",
exc_info=True,
app_warning_msg="Unexpected error with WebSocket connection. "
f"Retrying in {int(self.MESSAGE_TIMEOUT)} seconds. "
"Check network connection."
)
await asyncio.sleep(5)
async def listen_for_order_book_diffs(self,
ev_loop: asyncio.BaseEventLoop,
output: asyncio.Queue):
while True:
try:
trading_pairs: List[str] = await self.get_trading_pairs()
for trading_pair in trading_pairs:
async with websockets.connect(BITFINEX_WS_URI) as ws:
payload: Dict[str, Any] = {
"event": "subscribe",
"channel": "book",
"prec": "P0",
"symbol": convert_to_exchange_trading_pair(trading_pair),
}
await ws.send(ujson.dumps(payload))
await asyncio.wait_for(ws.recv(), timeout=self.MESSAGE_TIMEOUT) # response
await asyncio.wait_for(ws.recv(), timeout=self.MESSAGE_TIMEOUT) # subscribe info
raw_snapshot = await asyncio.wait_for(ws.recv(), timeout=self.MESSAGE_TIMEOUT) # snapshot
snapshot = self._prepare_snapshot(trading_pair, [BookStructure(*i) for i in ujson.loads(raw_snapshot)[1]])
snapshot_timestamp: float = time.time()
snapshot_msg: OrderBookMessage = BitfinexOrderBook.snapshot_message_from_exchange(
snapshot,
snapshot_timestamp
)
output.put_nowait(snapshot_msg)
async for raw_msg in self._get_response(ws):
msg = self._parse_raw_update(trading_pair, raw_msg)
if msg is not None:
output.put_nowait(msg)
except Exception as err:
self.logger().error(err)
self.logger().network(
"Unexpected error with WebSocket connection.",
exc_info=True,
app_warning_msg="Unexpected error with WebSocket connection. "
f"Retrying in {int(self.MESSAGE_TIMEOUT)} seconds. "
"Check network connection."
)
await asyncio.sleep(5)
async def listen_for_order_book_snapshots(self,
ev_loop: asyncio.BaseEventLoop,
output: asyncio.Queue):
while True:
trading_pairs: List[str] = await self.get_trading_pairs()
try:
async with aiohttp.ClientSession() as client:
for trading_pair in trading_pairs:
try:
snapshot: Dict[str, Any] = await self.get_snapshot(client, trading_pair)
snapshot_timestamp: float = time.time()
snapshot_msg: OrderBookMessage = BitfinexOrderBook.snapshot_message_from_exchange(
snapshot,
snapshot_timestamp
)
output.put_nowait(snapshot_msg)
self.logger().debug(f"Saved order book snapshot for {trading_pair}")
await asyncio.sleep(self.TIME_SLEEP_BETWEEN_REQUESTS)
except asyncio.CancelledError:
raise
except Exception as err:
self.logger().error("Listening snapshots", err)
self.logger().network(
"Unexpected error with HTTP connection.",
exc_info=True,
app_warning_msg="Unexpected error with HTTP connection. "
f"Retrying in {self.TIME_SLEEP_BETWEEN_REQUESTS} sec."
"Check network connection."
)
await asyncio.sleep(self.TIME_SLEEP_BETWEEN_REQUESTS)
this_hour: pd.Timestamp = pd.Timestamp.utcnow().replace(
minute=0, second=0, microsecond=0
)
next_hour: pd.Timestamp = this_hour + pd.Timedelta(hours=1)
delta: float = next_hour.timestamp() - time.time()
await asyncio.sleep(delta)
except asyncio.CancelledError:
raise
except Exception as err:
self.logger().error("Listening snapshots", err)
self.logger().error("Unexpected error", exc_info=True)
await asyncio.sleep(self.TIME_SLEEP_BETWEEN_REQUESTS)
```
#### File: exchange/bitfinex/bitfinex_api_user_stream_data_source.py
```python
import asyncio
import logging
import time
from typing import Optional, List
from hummingbot.core.data_type.user_stream_tracker_data_source import \
UserStreamTrackerDataSource
from hummingbot.logger import HummingbotLogger
from hummingbot.connector.exchange.bitfinex.bitfinex_order_book import BitfinexOrderBook
from hummingbot.connector.exchange.bitfinex.bitfinex_websocket import BitfinexWebsocket
from hummingbot.connector.exchange.bitfinex.bitfinex_auth import BitfinexAuth
from hummingbot.connector.exchange.bitfinex.bitfinex_order_book_message import \
BitfinexOrderBookMessage
class BitfinexAPIUserStreamDataSource(UserStreamTrackerDataSource):
MESSAGE_TIMEOUT = 30.0
_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._logger is None:
cls._logger = logging.getLogger(__name__)
return cls._logger
def __init__(self, bitfinex_auth: BitfinexAuth, trading_pairs: Optional[List[str]] = None):
if trading_pairs is None:
trading_pairs = []
self._bitfinex_auth: BitfinexAuth = bitfinex_auth
self._trading_pairs = trading_pairs
self._current_listen_key = None
self._listen_for_user_stream_task = None
self._last_recv_time: float = 0
super().__init__()
@property
def order_book_class(self):
return BitfinexOrderBook
@property
def last_recv_time(self) -> float:
return self._last_recv_time
async def listen_for_user_stream(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
while True:
try:
ws = await BitfinexWebsocket(self._bitfinex_auth).connect()
await ws.authenticate()
async for msg in ws.messages():
transformed_msg: BitfinexOrderBookMessage = self._transform_message_from_exchange(msg)
if transformed_msg is None:
continue
else:
output.put_nowait(transformed_msg)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error(
"Unexpected error with Bitfinex WebSocket connection. " "Retrying after 30 seconds...",
exc_info=True,
)
await asyncio.sleep(self.MESSAGE_TIMEOUT)
def _transform_message_from_exchange(self, msg) -> Optional[BitfinexOrderBookMessage]:
order_book_message: BitfinexOrderBookMessage = BitfinexOrderBook.diff_message_from_exchange(msg, time.time())
if any([
order_book_message.type_heartbeat,
order_book_message.event_auth,
order_book_message.event_info,
]):
# skip unneeded events and types
return
return order_book_message
```
#### File: exchange/coinbase_pro/coinbase_pro_order_book_message.py
```python
import pandas as pd
from typing import (
Dict,
List,
Optional,
)
from hummingbot.core.data_type.order_book_row import OrderBookRow
from hummingbot.core.data_type.order_book_message import (
OrderBookMessage,
OrderBookMessageType,
)
class CoinbaseProOrderBookMessage(OrderBookMessage):
def __new__(
cls,
message_type: OrderBookMessageType,
content: Dict[str, any],
timestamp: Optional[float] = None,
*args,
**kwargs,
):
if timestamp is None:
if message_type is OrderBookMessageType.SNAPSHOT:
raise ValueError("timestamp must not be None when initializing snapshot messages.")
timestamp = pd.Timestamp(content["time"], tz="UTC").timestamp()
return super(CoinbaseProOrderBookMessage, cls).__new__(
cls, message_type, content, timestamp=timestamp, *args, **kwargs
)
@property
def update_id(self) -> int:
if self.type in [OrderBookMessageType.DIFF, OrderBookMessageType.SNAPSHOT]:
return int(self.content["sequence"])
else:
return -1
@property
def trade_id(self) -> int:
if self.type is OrderBookMessageType.TRADE:
return int(self.content["sequence"])
return -1
@property
def trading_pair(self) -> str:
if "product_id" in self.content:
return self.content["product_id"]
elif "symbol" in self.content:
return self.content["symbol"]
@property
def asks(self) -> List[OrderBookRow]:
raise NotImplementedError("Coinbase Pro order book messages have different semantics.")
@property
def bids(self) -> List[OrderBookRow]:
raise NotImplementedError("Coinbase Pro order book messages have different semantics.")
```
#### File: web_assistant/connections/data_types.py
```python
from dataclasses import dataclass
from enum import Enum
from typing import Any, Mapping, Optional
import aiohttp
class RESTMethod(Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
def __str__(self):
obj_str = repr(self)
return obj_str
def __repr__(self):
return self.value
@dataclass
class RESTRequest:
method: RESTMethod
url: Optional[str] = None
params: Optional[Mapping[str, str]] = None
data: Any = None
headers: Optional[Mapping[str, str]] = None
is_auth_required: bool = False
throttler_limit_id: Optional[str] = None
@dataclass(init=False)
class RESTResponse:
url: str
method: RESTMethod
status: int
headers: Optional[Mapping[str, str]]
def __init__(self, aiohttp_response: aiohttp.ClientResponse):
self._aiohttp_response = aiohttp_response
@property
def url(self) -> str:
url_str = str(self._aiohttp_response.url)
return url_str
@property
def method(self) -> RESTMethod:
method_ = RESTMethod[self._aiohttp_response.method.upper()]
return method_
@property
def status(self) -> int:
status_ = int(self._aiohttp_response.status)
return status_
@property
def headers(self) -> Optional[Mapping[str, str]]:
headers_ = self._aiohttp_response.headers
return headers_
async def json(self) -> Any:
json_ = await self._aiohttp_response.json()
return json_
async def text(self) -> str:
text_ = await self._aiohttp_response.text()
return text_
@dataclass
class WSRequest:
payload: Mapping[str, Any]
throttler_limit_id: Optional[str] = None
@dataclass
class WSResponse:
data: Any
```
#### File: core/web_assistant/rest_pre_processors.py
```python
import abc
from hummingbot.core.web_assistant.connections.data_types import RESTRequest
class RESTPreProcessorBase(abc.ABC):
@abc.abstractmethod
async def pre_process(self, request: RESTRequest) -> RESTRequest:
...
```
#### File: core/web_assistant/ws_pre_processors.py
```python
import abc
from hummingbot.core.web_assistant.connections.data_types import WSRequest
class WSPreProcessorBase(abc.ABC):
@abc.abstractmethod
async def pre_process(self, request: WSRequest) -> WSRequest:
...
```
#### File: derivative/binance_perpetual/test_binance_perpetual_api_order_book_data_source.py
```python
import asyncio
import re
import ujson
import unittest
import hummingbot.connector.derivative.binance_perpetual.constants as CONSTANTS
from aioresponses.core import aioresponses
from typing import (
Any,
Awaitable,
Dict,
)
from unittest.mock import AsyncMock, patch
from hummingbot.core.api_throttler.async_throttler import AsyncThrottler
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.order_book_message import OrderBookMessage, OrderBookMessageType
from hummingbot.connector.derivative.binance_perpetual import binance_perpetual_utils as utils
from hummingbot.connector.derivative.binance_perpetual.binance_perpetual_api_order_book_data_source import BinancePerpetualAPIOrderBookDataSource
from test.hummingbot.connector.network_mocking_assistant import NetworkMockingAssistant
class BinancePerpetualAPIOrderBookDataSourceUnitTests(unittest.TestCase):
# logging.Level required to receive logs from the data source logger
level = 0
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.ev_loop = asyncio.get_event_loop()
cls.base_asset = "COINALPHA"
cls.quote_asset = "HBOT"
cls.trading_pair = f"{cls.base_asset}-{cls.quote_asset}"
cls.ex_trading_pair = f"{cls.base_asset}{cls.quote_asset}"
cls.domain = "binance_perpetual_testnet"
def setUp(self) -> None:
super().setUp()
self.log_records = []
self.listening_task = None
self.data_source = BinancePerpetualAPIOrderBookDataSource(trading_pairs=[self.trading_pair],
domain=self.domain,
)
self.data_source.logger().setLevel(1)
self.data_source.logger().addHandler(self)
self.mocking_assistant = NetworkMockingAssistant()
self.resume_test_event = asyncio.Event()
def tearDown(self) -> None:
self.listening_task and self.listening_task.cancel()
super().tearDown()
def handle(self, record):
self.log_records.append(record)
def async_run_with_timeout(self, coroutine: Awaitable, timeout: float = 1):
ret = self.ev_loop.run_until_complete(asyncio.wait_for(coroutine, timeout))
return ret
def resume_test_callback(self, *_, **__):
self.resume_test_event.set()
return None
def _is_logged(self, log_level: str, message: str) -> bool:
return any(record.levelname == log_level and record.getMessage() == message
for record in self.log_records)
def _raise_exception(self, exception_class):
raise exception_class
def _orderbook_update_event(self):
resp = {
"stream": f"{self.ex_trading_pair.lower()}@depth",
"data": {
"e": "depthUpdate",
"E": 1631591424198,
"T": 1631591424189,
"s": self.ex_trading_pair,
"U": 752409354963,
"u": 752409360466,
"pu": 752409354901,
"b": [
[
"43614.31",
"0.000"
],
],
"a": [
[
"45277.14",
"0.257"
],
]
}
}
return resp
def _orderbook_trade_event(self):
resp = {
"stream": f"{self.ex_trading_pair.lower()}@aggTrade",
"data": {
"e": "aggTrade",
"E": 1631594403486,
"a": 817295132,
"s": self.ex_trading_pair,
"p": "45266.16",
"q": "2.206",
"f": 1437689393,
"l": 1437689407,
"T": 1631594403330,
"m": False
}
}
return resp
@aioresponses()
def test_get_last_traded_prices(self, mock_api):
url = utils.rest_url(path_url=CONSTANTS.TICKER_PRICE_CHANGE_URL, domain=self.domain)
regex_url = re.compile(f"^{url}".replace(".", r"\.").replace("?", r"\?"))
mock_response: Dict[str, Any] = {
# Truncated responses
"lastPrice": "10.0",
}
mock_api.get(regex_url, body=ujson.dumps(mock_response))
result: Dict[str, Any] = self.async_run_with_timeout(
self.data_source.get_last_traded_prices(trading_pairs=[self.trading_pair], domain=self.domain)
)
self.assertTrue(self.trading_pair in result)
self.assertEqual(10.0, result[self.trading_pair])
def test_get_throttler_instance(self):
self.assertTrue(isinstance(self.data_source._get_throttler_instance(), AsyncThrottler))
@aioresponses()
def test_fetch_trading_pairs_failure(self, mock_api):
url = utils.rest_url(path_url=CONSTANTS.EXCHANGE_INFO_URL, domain=self.domain)
regex_url = re.compile(f"^{url}".replace(".", r"\.").replace("?", r"\?"))
mock_api.get(regex_url, status=400, body=ujson.dumps({"ERROR"}))
result: Dict[str, Any] = self.async_run_with_timeout(
self.data_source.fetch_trading_pairs(domain=self.domain)
)
self.assertEqual(0, len(result))
@aioresponses()
@patch("hummingbot.connector.derivative.binance_perpetual.binance_perpetual_utils.convert_from_exchange_trading_pair")
def test_fetch_trading_pairs_successful(self, mock_api, mock_utils):
mock_utils.return_value = self.trading_pair
url = utils.rest_url(path_url=CONSTANTS.EXCHANGE_INFO_URL, domain=self.domain)
regex_url = re.compile(f"^{url}".replace(".", r"\.").replace("?", r"\?"))
mock_response: Dict[str, Any] = {
# Truncated Responses
"symbols": [
{
"symbol": self.ex_trading_pair,
"pair": self.ex_trading_pair,
"baseAsset": self.base_asset,
"quoteAsset": self.quote_asset,
"status": "TRADING",
},
{
"symbol": "INACTIVEMARKET",
"status": "INACTIVE"
}
],
}
mock_api.get(regex_url, status=200, body=ujson.dumps(mock_response))
result: Dict[str, Any] = self.async_run_with_timeout(
self.data_source.fetch_trading_pairs(domain=self.domain)
)
self.assertEqual(1, len(result))
@aioresponses()
def test_get_snapshot_exception_raised(self, mock_api):
url = utils.rest_url(CONSTANTS.SNAPSHOT_REST_URL, domain=self.domain)
regex_url = re.compile(f"^{url}".replace(".", r"\.").replace("?", r"\?"))
mock_api.get(regex_url, status=400, body=ujson.dumps({"ERROR"}))
with self.assertRaises(IOError) as context:
self.async_run_with_timeout(
self.data_source.get_snapshot(trading_pair=self.trading_pair, domain=self.domain)
)
self.assertEqual(str(context.exception), f"Error fetching Binance market snapshot for {self.trading_pair}.")
@aioresponses()
def test_get_snapshot_successful(self, mock_api):
url = utils.rest_url(CONSTANTS.SNAPSHOT_REST_URL, domain=self.domain)
regex_url = re.compile(f"^{url}".replace(".", r"\.").replace("?", r"\?"))
mock_response = {
"lastUpdateId": 1027024,
"E": 1589436922972,
"T": 1589436922959,
"bids": [
[
"10",
"1"
]
],
"asks": [
[
"11",
"1"
]
]
}
mock_api.get(regex_url, status=200, body=ujson.dumps(mock_response))
result: Dict[str, Any] = self.async_run_with_timeout(
self.data_source.get_snapshot(trading_pair=self.trading_pair, domain=self.domain)
)
self.assertEqual(mock_response, result)
@aioresponses()
def test_get_new_order_book(self, mock_api):
url = utils.rest_url(CONSTANTS.SNAPSHOT_REST_URL, domain=self.domain)
regex_url = re.compile(f"^{url}".replace(".", r"\.").replace("?", r"\?"))
mock_response = {
"lastUpdateId": 1027024,
"E": 1589436922972,
"T": 1589436922959,
"bids": [
[
"10",
"1"
]
],
"asks": [
[
"11",
"1"
]
]
}
mock_api.get(regex_url, status=200, body=ujson.dumps(mock_response))
result = self.async_run_with_timeout(
self.data_source.get_new_order_book(trading_pair=self.trading_pair)
)
self.assertIsInstance(result, OrderBook)
self.assertEqual(1027024, result.snapshot_uid)
@patch("aiohttp.ClientSession.ws_connect")
def test_create_websocket_connection_cancelled_when_connecting(self, mock_ws):
mock_ws.side_effect = asyncio.CancelledError
with self.assertRaises(asyncio.CancelledError):
self.async_run_with_timeout(
self.data_source._create_websocket_connection()
)
@patch("aiohttp.ClientSession.ws_connect")
def test_create_websocket_connection_exception_raised(self, mock_ws):
mock_ws.side_effect = Exception("TEST ERROR.")
with self.assertRaises(Exception):
self.async_run_with_timeout(
self.data_source._create_websocket_connection()
)
self.assertTrue(self._is_logged("NETWORK",
"Unexpected error occured when connecting to WebSocket server. Error: TEST ERROR."))
@patch("aiohttp.ClientSession.ws_connect", new_callable=AsyncMock)
@patch("hummingbot.core.data_type.order_book_tracker_data_source.OrderBookTrackerDataSource._sleep")
def test_listen_for_order_book_diffs_cancelled_when_connecting(self, _, mock_ws):
msg_queue: asyncio.Queue = asyncio.Queue()
mock_ws.side_effect = asyncio.CancelledError
with self.assertRaises(asyncio.CancelledError):
self.listening_task = self.ev_loop.create_task(
self.data_source.listen_for_order_book_diffs(self.ev_loop, msg_queue)
)
self.async_run_with_timeout(self.listening_task)
self.assertEqual(msg_queue.qsize(), 0)
@patch("hummingbot.core.data_type.order_book_tracker_data_source.OrderBookTrackerDataSource._sleep")
@patch("hummingbot.connector.derivative.binance_perpetual.binance_perpetual_utils.convert_from_exchange_trading_pair")
@patch("aiohttp.ClientSession.ws_connect", new_callable=AsyncMock)
def test_listen_for_order_book_diffs_logs_exception(self, mock_ws, mock_utils, *_):
mock_utils.return_value = self.trading_pair
msg_queue: asyncio.Queue = asyncio.Queue()
mock_ws.return_value = self.mocking_assistant.create_websocket_mock()
mock_ws.close.return_value = None
incomplete_resp = {
"m": 1,
"i": 2,
}
self.mocking_assistant.add_websocket_json_message(mock_ws.return_value, incomplete_resp)
self.mocking_assistant.add_websocket_json_message(mock_ws.return_value, self._orderbook_update_event())
self.listening_task = self.ev_loop.create_task(
self.data_source.listen_for_order_book_diffs(self.ev_loop, msg_queue)
)
self.mocking_assistant.run_until_all_json_messages_delivered(mock_ws.return_value)
self.assertTrue(self._is_logged("ERROR", "Unexpected error with Websocket connection. Retrying after 30 seconds..."))
@patch("aiohttp.ClientSession.ws_connect", new_callable=AsyncMock)
@patch("hummingbot.connector.derivative.binance_perpetual.binance_perpetual_utils.convert_from_exchange_trading_pair")
def test_listen_for_order_book_diffs_successful(self, mock_utils, mock_ws):
mock_utils.return_value = self.trading_pair
msg_queue: asyncio.Queue = asyncio.Queue()
mock_ws.return_value = self.mocking_assistant.create_websocket_mock()
mock_ws.close.return_value = None
self.mocking_assistant.add_websocket_json_message(mock_ws.return_value, self._orderbook_update_event())
self.listening_task = self.ev_loop.create_task(
self.data_source.listen_for_order_book_diffs(self.ev_loop, msg_queue)
)
result: OrderBookMessage = self.async_run_with_timeout(msg_queue.get())
self.assertIsInstance(result, OrderBookMessage)
self.assertEqual(OrderBookMessageType.DIFF, result.type)
self.assertTrue(result.has_update_id)
self.assertEqual(result.update_id, 752409360466)
self.assertEqual(self.trading_pair, result.content["trading_pair"])
self.assertEqual(1, len(result.content["bids"]))
self.assertEqual(1, len(result.content["asks"]))
@patch("aiohttp.ClientSession.ws_connect", new_callable=AsyncMock)
def test_listen_for_trades_cancelled_error_raised(self, mock_ws):
msg_queue: asyncio.Queue = asyncio.Queue()
mock_ws.return_value = self.mocking_assistant.create_websocket_mock()
mock_ws.return_value.receive_json.side_effect = lambda: (
self._raise_exception(asyncio.CancelledError)
)
with self.assertRaises(asyncio.CancelledError):
self.listening_task = self.ev_loop.create_task(
self.data_source.listen_for_trades(self.ev_loop, msg_queue)
)
self.async_run_with_timeout(self.listening_task)
self.assertEqual(msg_queue.qsize(), 0)
@patch("aiohttp.ClientSession.ws_connect", new_callable=AsyncMock)
@patch("hummingbot.core.data_type.order_book_tracker_data_source.OrderBookTrackerDataSource._sleep")
@patch("hummingbot.connector.derivative.binance_perpetual.binance_perpetual_utils.convert_from_exchange_trading_pair")
def test_listen_for_trades_logs_exception(self, mock_utils, _, mock_ws):
mock_utils.return_value = self.trading_pair
msg_queue: asyncio.Queue = asyncio.Queue()
mock_ws.return_value = self.mocking_assistant.create_websocket_mock()
mock_ws.close.return_value = None
incomplete_resp = {
"m": 1,
"i": 2,
}
self.mocking_assistant.add_websocket_json_message(mock_ws.return_value, incomplete_resp)
self.mocking_assistant.add_websocket_json_message(mock_ws.return_value, self._orderbook_trade_event())
self.listening_task = self.ev_loop.create_task(
self.data_source.listen_for_trades(self.ev_loop, msg_queue)
)
self.async_run_with_timeout(msg_queue.get())
self.assertTrue(self._is_logged("ERROR", "Unexpected error with Websocket connection. Retrying after 30 seconds..."))
@patch("aiohttp.ClientSession.ws_connect", new_callable=AsyncMock)
@patch("hummingbot.connector.derivative.binance_perpetual.binance_perpetual_utils.convert_from_exchange_trading_pair")
def test_listen_for_trades_successful(self, mock_utils, mock_ws):
mock_utils.return_value = self.trading_pair
msg_queue: asyncio.Queue = asyncio.Queue()
mock_ws.return_value = self.mocking_assistant.create_websocket_mock()
mock_ws.close.return_value = None
self.mocking_assistant.add_websocket_json_message(mock_ws.return_value, self._orderbook_trade_event())
self.listening_task = self.ev_loop.create_task(
self.data_source.listen_for_trades(self.ev_loop, msg_queue)
)
result: OrderBookMessage = self.async_run_with_timeout(msg_queue.get())
self.assertIsInstance(result, OrderBookMessage)
self.assertEqual(OrderBookMessageType.TRADE, result.type)
self.assertTrue(result.has_trade_id)
self.assertEqual(result.trade_id, 817295132)
self.assertEqual(self.trading_pair, result.content["trading_pair"])
@aioresponses()
def test_listen_for_order_book_snapshots_cancelled_error_raised(self, mock_api):
url = utils.rest_url(CONSTANTS.SNAPSHOT_REST_URL, domain=self.domain)
regex_url = re.compile(f"^{url}".replace(".", r"\.").replace("?", r"\?"))
mock_api.get(regex_url, exception=asyncio.CancelledError)
msg_queue: asyncio.Queue = asyncio.Queue()
with self.assertRaises(asyncio.CancelledError):
self.listening_task = self.ev_loop.create_task(
self.data_source.listen_for_order_book_snapshots(self.ev_loop, msg_queue)
)
self.async_run_with_timeout(self.listening_task)
self.assertEqual(0, msg_queue.qsize())
@aioresponses()
def test_listen_for_order_book_snapshots_logs_exception_error_with_response(self, mock_api):
url = utils.rest_url(CONSTANTS.SNAPSHOT_REST_URL, domain=self.domain)
regex_url = re.compile(f"^{url}".replace(".", r"\.").replace("?", r"\?"))
mock_response = {
"m": 1,
"i": 2,
}
mock_api.get(regex_url, body=ujson.dumps(mock_response), callback=self.resume_test_callback)
msg_queue: asyncio.Queue = asyncio.Queue()
self.listening_task = self.ev_loop.create_task(
self.data_source.listen_for_order_book_snapshots(self.ev_loop, msg_queue)
)
self.async_run_with_timeout(self.resume_test_event.wait())
self.assertTrue(self._is_logged("ERROR", "Unexpected error occurred fetching orderbook snapshots. Retrying in 5 seconds..."))
@aioresponses()
def test_listen_for_order_book_snapshots_successful(self, mock_api):
url = utils.rest_url(CONSTANTS.SNAPSHOT_REST_URL, domain=self.domain)
regex_url = re.compile(f"^{url}".replace(".", r"\.").replace("?", r"\?"))
mock_response = {
"lastUpdateId": 1027024,
"E": 1589436922972,
"T": 1589436922959,
"bids": [
[
"10",
"1"
]
],
"asks": [
[
"11",
"1"
]
]
}
mock_api.get(regex_url, body=ujson.dumps(mock_response))
msg_queue: asyncio.Queue = asyncio.Queue()
self.listening_task = self.ev_loop.create_task(
self.data_source.listen_for_order_book_snapshots(self.ev_loop, msg_queue)
)
result = self.async_run_with_timeout(msg_queue.get())
self.assertIsInstance(result, OrderBookMessage)
self.assertEqual(OrderBookMessageType.SNAPSHOT, result.type)
self.assertTrue(result.has_update_id)
self.assertEqual(result.update_id, 1027024)
self.assertEqual(self.trading_pair, result.content["trading_pair"])
``` |
{
"source": "joedougherty/buildaspider",
"score": 3
} |
#### File: buildaspider/buildaspider/spiderconfig.py
```python
import configparser
import os
class SpiderConfig(object):
"""
This class provides config file values for use in an instance of `Spider`.
:param CONFIG_FILE: Absolute path to the configuration file
:type CONFIG_FILE: str
"""
def __init__(self, abs_path_to_config_file):
self.CONFIG_PATH = abs_path_to_config_file
config = configparser.RawConfigParser()
config.read(self.CONFIG_PATH)
self.cfg = config["buildaspider"]
self.login = self.cfg.getboolean("login", None)
self.username = self.cfg.get("username", None)
self.password = self.cfg.get("password", None)
self.login_url = self.cfg.get("login_url", None)
self.log_dir = self.set_log_dir()
self.include_patterns = self.extract_patterns("include_patterns")
self.exclude_patterns = self.extract_patterns("exclude_patterns")
self.seed_urls = self.extract_patterns("seed_urls")
self.max_num_retries = self.cfg.getint("max_num_retries", 5)
def extract_patterns(self, config_section):
"""
Returns a list of values from the section of config file
specified by `config_section`.
:param config_section: The name of the section of the config file
:type config_section: str
:return: A list of the values specified in `config_section`
:rtype: list
"""
raw_val = self.cfg.get(config_section, None)
if not raw_val:
raise ValueError(
f"""Please ensure that "{config_section}" contains at least one value!"""
)
return [p for p in raw_val.split("\n") if p not in ("", " ")]
def set_log_dir(self):
"""
Ensures that the desired log directory already exists on the file system.
:return: the log directory
:rtype: str
"""
log_dir = self.cfg.get("log_dir", None)
if not log_dir:
raise ValueError("Please ensure that log_dir exists and is set correctly in the config.")
if not os.path.exists(log_dir):
raise FileNotFoundError(
f"The log directory does not exist: {log_dir}"
)
return log_dir
```
#### File: buildaspider/examples/myspider.py
```python
from buildaspider import Spider, append_line_to_log
broken_links_logpath = 'broken_links.log'
class MySpider(Spider):
# Override this method to login if required!
def login(self):
pass
# Override this method to extend logging!
def log_broken_link(self, link):
append_line_to_log(
broken_links_logpath,
f'{link} :: {link.http_code}'
)
myspider = MySpider('cfg.ini')
myspider.weave()
``` |
{
"source": "joedougherty/earthquake_etl",
"score": 4
} |
#### File: joedougherty/earthquake_etl/eq_database.py
```python
from collections import namedtuple
import pandas as pd
import sqlite3
"""
Data types and any add'l metadata from:
http://earthquake.usgs.gov/earthquakes/feed/v1.0/glossary.php
"""
def get_db(db_name):
"""
Create a DB connection to the table passed in by db_name.
Returns a namedtuple containing the connection object and the DB cursor.
"""
db_conn_mgr = namedtuple('db_conn_mgr', ['conn', 'cursor'])
conn = sqlite3.connect(db_name)
cursor = conn.cursor()
return db_conn_mgr(conn, cursor)
def create_eq_table(csv_file, db_location):
dataframe = pd.read_csv(csv_file)
db = get_db(db_location)
# Careful! This will replace an existing db.
dataframe.to_sql('all_earthquakes', db.conn, if_exists='replace', index=False)
``` |
{
"source": "joedougherty/mjForth",
"score": 3
} |
#### File: joedougherty/mjForth/test_properties.py
```python
import pytest
from core import *
from mjForth import run
from hypothesis import given
from hypothesis.strategies import integers, floats, one_of
nums = one_of(
integers(),
floats(allow_nan=False, allow_infinity=False)
)
@given(nums, nums)
def test_stack_addition(x, y):
tiny = Stack()
tiny.push(x)
tiny.push(y)
add(Data=tiny)
assert tiny.height() == 1
assert tiny.peek() == x + y
assert tiny.peek() == y + x
@given(nums, nums)
def test_stack_subtraction(x, y):
tiny = Stack()
tiny.push(x)
tiny.push(y)
subtract(Data=tiny)
assert tiny.height() == 1
assert tiny.peek() == x - y
@given(nums, nums)
def test_addition_interpreter(x, y):
code = f'''{x} {y} +'''
run(code)
top_of_stack = Data.peek()
assert top_of_stack == x + y
@given(nums, nums)
def test_subtraction_interpreter(x, y):
code = f'''{x} {y} - '''
run(code)
top_of_stack = Data.peek()
assert top_of_stack == x - y
@given(nums, nums)
def test_swap_interpreter(x, y):
code = f'''{x} {y} swap'''
run(code)
top_of_stack, just_below = Data.peek(), Data.peek(-2)
assert top_of_stack == x
assert just_below == y
``` |
{
"source": "joedougherty/tinyetl",
"score": 3
} |
#### File: tinyetl/tinyetl/__init__.py
```python
from functools import wraps
import sys
import os
from datetime import datetime
import logging
import requests
class TinyETL:
"""Manages facts about an ETL Process.
Provides a consistent interface for storing log location,
temporary data locations, and a way to facilitate dry-run
and logging on Fabric-based ETL scripts.
USAGE:
=====
etl = TinyETL(
'an_etl_job',
long_desc,
env=env, # This `env` will be provided by Fabric. [from fabric.api import env]
log_dir="/path/to/a/log/directory",
tmpdata_dir="/path/to/tmpdata/directory",
# Optionally, Create additional runtime attributes here
another_relevant_dir="path/to/relevant/dir"
)
Instantiating this object will alter the behavior of your fabfile.py.
Specifically, fab will require you to set the `dry_run` parameter explicitly
if you'll be invoking a task.
`fab --list` will work as expected.
`fab main_task` will complain that `dry_run` has not be explicitly set.
INVOCATION:
==========
`fab main_task --set dry_run=True`
LOG DECORATOR:
=============
This also provides a decorator for any tasks you want to log.
Apply `@etl.log` as the innermost decorator to a task and it
will be logged.
"""
def __init__(self, name, long_desc, env, log_dir, tmpdata_dir, **kwargs):
"""
name [str] -> Short name to ETL task. Used in creating logfile names.
long_desc [str] -> Docstring description of this task.
env [env object] -> The env object provided by Fabric.
log_dir [str] -> Absolute path to the directory to store logs in.
tmpdata_dir [str] -> Absolute path to the directory to store temp data in.
"""
# If there are no tasks to be run at invocation,
# don't bother with the rest of the object __init__
if env.tasks == []:
return
self.name = name
self.long_desc = long_desc
self.dry_run = self._this_is_a_dry_run(env)
self.log_dir = log_dir
self.tmpdata_dir = tmpdata_dir
if not self.dry_run:
self.logname = "{}_{}".format(self.name, datetime.now().strftime('%Y-%m-%d_%H:%M:%S'))
self.logfile = os.path.join(self.log_dir, self.logname + '.log')
self.logger = self._create_logger()
# This allows the user to store relevant data on the
# object they've created, without needing to anticipate
# every possible type of value a user may want to store.
self.__dict__.update(kwargs)
def usage(self):
msg = "Please provide either 'True' or 'False' to dry_run.\n"
msg += "Usage: fab <tasks> --set dry_run=[True|False]"
raise SystemExit(msg)
def _this_is_a_dry_run(self, env):
""" Determines if this is a dry run. """
try:
dry_run = env.dry_run
except AttributeError:
self.usage()
if dry_run not in ('True', 'False'):
self.usage()
else:
# Convert the passed-in string val to a bool before returning
return {'True': True, 'False': False}.get(dry_run)
def _create_logger(self):
# See https://wingware.com/psupport/python-manual/2.3/lib/node304.html
logger = logging.getLogger(self.name)
hdlr = logging.FileHandler(self.logfile)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
return logger
def log(self, f):
@wraps(f)
def logwrapper(*args, **kwargs):
if self.dry_run:
print('[DRY RUN] :: {}()'.format(f.__name__))
else:
current_info = "Running {}".format(f.__name__)
print(current_info)
self.logger.info(current_info)
try:
return f(*args, **kwargs)
except Exception:
self.logger.exception("ETL Error")
raise
return logwrapper
def timestamp(self):
return datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
def download_file(self, endpoint, file_to_write_to):
r = requests.get(endpoint)
if r.status_code != 200:
self.logger.error("Attempt to download {} failed with code {}.".format(endpoint, r.status_code))
else:
with open(file_to_write_to, "wb") as f:
f.write(r.content)
def __str__(self):
info = """
Standard Attributes:
===================
ETL Name: {}
Long Description: {}
Log location: {}
Temp data location: {}
""".format(self.name, self.long_desc, self.log_dir, self.tmpdata_dir)
standard = ('name', 'long_desc', 'log_dir', 'tmpdata_dir', 'logger', 'dry_run')
user_defined_attrs = ""
for k, v in self.__dict__.iteritems():
if k not in standard:
user_defined_attrs += "{}: {}\n".format(k.title(), v)
if user_defined_attrs == "":
return info
else:
user_defined_attrs = "\nUser-defined Attributes:\n" + "=======================\n\n" + user_defined_attrs
return info + user_defined_attrs
``` |
{
"source": "Joe-Doyle/moos-ivp-agent",
"score": 4
} |
#### File: trained/the-attack-of-jaylan/environment.py
```python
import os
class State:
"""------------------------------------------------------------------------
Structure to hold information about a state parameter to be passed on to the rest
of the codebase
self.index -> index of parameter in state representation
self.type -> whether it is "binary", "distance", "angle", or "raw" (other inputs such as position)
self.range -> the range of values that it can go between (not applicable for binary)
self.bucket -> the size of. blocks that you are descritizing this parameter into. Set
to 1 if not descritized
self.var -> the object that this paraemter is relative to. Used by distance and theta.
For example, if the parameter type is distance and obj is "flag", then the parameter
defines the distance to the flag. Possibilities include "flag", "team", "has_flag",
"tagged". Of these, only "flag" can be used with non-binary values (distance and angle).
In addition, you can get raw "x", "y", and "heading" values by setting type to "raw" (only for
the robot that is running the behavior.)
self.var_mod -> not fully implemented yet. Goal is to have additional specifiers to self.var
such as "self", "enemy", "felix", "evan" that would allow you to specify which vehicle/team's
information you want to access. The "self" and "enemy" specifiers do work for things like distance
and heading to flag
self.standardized -> the value is standardized when used in the neural net to be
a value between 0 and 1
------------------------------------------------------------------------"""
def __init__(self, index=0, typ="binary", rang=(0, 200), bucket=1, var="flag", var_mod="self", standardized=False, vehicle="self"):
self.index=index
self.type=typ
self.range=rang
self.bucket=bucket
self.var=var
self.var_mod=var_mod
self.standardized=standardized
self.vehicle=vehicle
class Constants:
def __init__(self):
#Define number of terms in state and index of each term
"""---------------------------------------------------------------------
NOTE: If changing state definition, BHV_Input will automatically adjust
state definition accordingly
---------------------------------------------------------------------"""
self.state={}
self.state["out"]= State(index=0, typ="binary", var="tagged", var_mod="self")
self.state["flag_dist"]= State(index=1, typ="distance", rang=(0, 200), var= "flag", var_mod="enemy")
self.state["flag_theta"]= State(index=2, typ="angle", rang=(0, 360), var= "flag", var_mod="enemy")
self.state["heading"]=State(index=3, typ="raw", var="heading", rang=(0, 360))
self.state["color"]= State(index=4, typ="binary", var="team", var_mod="self")
self.state["leftBound"] = State(index=5, typ="distance", var="leftBound", var_mod="self")
self.state["rightBound"] = State(index=6, typ="distance", var="rightBound", var_mod="self")
self.state["upperBound"] = State(index=7, typ="distance", var="upperBound", var_mod="self")
self.state["lowerBound"] = State(index=8, typ="distance", var="lowerBound", var_mod="self")
self.state["enemy_dist"]= State(index=9, typ="distance", var="player", rang=(0, 200), vehicle="evan")
self.state["enemy_angle"]=State(index=10, typ="angle", var="player", rang=(0,360), vehicle="evan")
self.state["enemy_heading"]=State(index=11, typ="raw", var="heading", rang=(0, 360), vehicle="evan")
#maybe add speed info
self.num_states=len(self.state)
#define learning parameters to be passed to learning function
"""---------------------------------------------------------------------
self.num_layers -> number of layers in the neural network
self.num_units -> number of units per layer in network
self.num_traj -> number of times a simulation is run per iteration to
make dataset
self.iters -> number of iterations for training (can be arbitrarily high
since model is saved on each iteration and training can be stopped any time)
self.lr -> learning rate for the Adam optimizer used to train the neural net
self.training_type -> whether trained with "stochastic" gradient descent or "batch" gradient descent
self.eps_min -> minimum rate at which actions are picked at random when generating table
self.eps_init -> the starting rate at which actions are picked at random when generating table
self.eps_decay -> the rate at which the randomness of actions diminishes per iteration
self.epochs -> how many epochs of training the network goes through to train
each neural net
self.batch_size -> the number of examples picked from memory when training with DQN
self.batches_per_training -> how many times we pick out and train on batch of size self.batch_size per iteration
self.alg_type -> selects which algorithm to use, can be "DQL" or "fitted"
----------------------------------------------------------------------"""
self.num_layers= 2
self.num_units = 10
self.activation_function = "relu"
self.num_traj = 1
self.iters = 200
self.lr = .005
self.training_type = "batch"
self.eps_min = .01
self.eps_init = 1
self.eps_decay = .98
self.epochs = 2
self.batch_size = 600
self.batches_per_training = 3
self.epoch_batch_size = 1
self.alg_type = "fitted"
#define constants/defaults to be used
"""-----------------------------------------------------------------------
self.speeds -> possible speeds that actions can have
self.relative -> switch to actions that add or subtract from heading rather
than define an absolute heading to take (switch this boolean in BHV_Input.cpp
as well)... in practice I have found that absolute headings work better
self.rel_headings -> possible headings for relative actions (only relavent if
relative is defined)
self.theta_size_act -> the block size for how spaced out the possible thetas are
in the action space
self.discount_factor -> the rate at which future rewards are discounted
self.max_reward -> maximum positive reward for reaching the goal state
self.neg_reward -> negative reward for going out of bounds
self.reward_dropoff -> the rate at which reward diminish as you move away from
the goal
self.max_reward_radius -> the radius around the goal that has max reward.
self.smooth_reward -> which type of reward function we use
self.save_iteration -> boolean will have the given model save itself to a new
folder for every self.save_iter_num iterations
self.save_iter_num -> number of iterations before saving to a new folder
self.players -> lists other players that are in the simulation (for use by BHV_Input)
self.mem_type -> whether the memory is a "set", a "deque", or "memory per action". A set makes an unbounded memory with no
repeating experiences, while a deque makes a memory of fixed length (self.mem_length) that may have multiple
of the same experiences, putting weight on the experiences that are seen more often. Memory per Action keeps seperate deque
memories per action and samples are taken evenly from each memory pool.
self.mem_length -> the length of deque memory
self.mem_thresh -> the threshold at which we go from sampling one batch per iteration to
sampling self.batches_per_training batches per iteration
self.end_at_tagged -> Flag that decides whether tagged states count as terminal states (i.e.
there are no transitions recorded that go from a tagged state to another state)
self.num_test_iters -> the number of times to run the simulation per model when testing
self.num_eval_iters -> the number of times to run the simulation per model when evaluating
-----------------------------------------------------------------------"""
self.speeds = [2]
self.relative = False
self.rel_headings = [-30,0,30]
self.theta_size_act = 60
self.discount_factor = .999
self.max_reward = 100
self.neg_reward = -50
self.reward_dropoff = .96
self.max_reward_radius = 10
self.smooth_reward = True
self.save_iteration=True
self.save_iter_num = 4
self.players = "evan"
self.mem_type = "memory per action"
self.mem_length = 40000
self.mem_thresh = 4000
self.end_at_tagged = True
self.num_test_iters = 1
self.num_eval_iters = 100
#define locations for storing/reading and scripts
"""-----------------------------------------------------------------------
self.sim_cmd -> path to script that will run the simulation
self.process_path -> path to folder to get data to be processed
self.process_cmd -> path to script to process the raw data at self.process_path
self.read_path -> path to processed data
self.out_address -> path to where the table.csv file should be output so that
it can be read by BHV_Input
self.load_model_dir -> path to the folder where we should load a model from,
only important if we want to start training or testing from a model we have
already partially trained
self.save_model_dir -> path to the foler where we should save the model to
(model gets saved on every iteration)
self.mem_address -> path to memory to load in
self.eval_address -> path to the folder holding the models that need to be evaluated
self.test_address -> path to the folder holding the models that need to be tested
-----------------------------------------------------------------------"""
user_path = os.getenv("HOME") + '/'
learning_path = user_path + 'moos-ivp-pLearn/pLearn/learning_code/'
simulation_path = user_path + 'moos-ivp-pLearn/pLearn/simulation_engine/'
self.sim_cmd = learning_path+'train.sh'
self.eval_sim_cmd = learning_path+'evaluator.sh'
self.test_sim_cmd = learning_path+'tester.sh'
self.process_path = learning_path+'results'
self.process_cmd = learning_path+'log_converter.py'
self.read_path = learning_path+'processed'
self.out_address = simulation_path+'m200/table.csv'
#self.load_model_dir = learning_path+'examples/Simple_Opponent_BHV/topModel/'
self.load_model_dir = learning_path+'models/new_model/'
self.save_model_dir = learning_path+'models/new_model/'
self.mem_address = learning_path+'models/new_model/'
self.eval_address = learning_path+'examples/Simple_Opponent_BHV/topModel/'
self.test_address = learning_path+'models/the-attack-of-jaylan/iteration_50/'
```
#### File: model/util/graphing.py
```python
import os
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from tqdm import tqdm
from util.constants import PLEARN_ACTIONS, plearn_action_to_text
from mivp_agent.util.data_structures import LimitedHistory
def plot_rewards(reward_function, defender=False):
# Init lists to graph
x_pos = []
y_pos = []
v_pos = []
for x in tqdm(range(-85, 85, 4)):
for y in range(-115, 20, 4):
# Find the max value across all headings
max_value = None
for action in PLEARN_ACTIONS:
fake_state = {
'NAV_X': x,
'NAV_Y': y,
'NAV_HEADING': PLEARN_ACTIONS[action]['course'],
'TAGGED': False,
'NODE_REPORTS': {
'evan': {
'NAV_X': 200,
'NAV_Y': 200,
'NAV_HEADING': 200,
}
}
}
value = reward_function(fake_state)
if max_value is None or value > max_value:
max_value = value
v_pos.append(max_value.item(0))
x_pos.append(x)
y_pos.append(y)
fig = plt.figure()
ax = plt.axes(projection='3d')
# Do the plotting
ax.plot([56,-83,-53,82,56], [16,-49,-114,-56,16], 'red', linewidth=4)
ax.plot_trisurf(x_pos, y_pos, v_pos)
plt.show()
class TestGrapher:
def __init__(self, save_dir=None):
# Parse args
self.save_dir = save_dir
# Setup matplotlib
matplotlib.use('TkAgg')
plt.ion()
# Configure axes
self.fig, self.axs = plt.subplots(2,2)
self.axs[0, 0].set_title("Success Precent")
self.success, = self.axs[0,0].plot([], [], '-go')
self.axs[0,0].set_ylim(-5,100)
self.axs[0, 1].set_title("Min Dist to Flag")
self.min_dist, = self.axs[0,1].plot([], [], '-bo')
self.axs[1, 0].set_title("Avg Durration")
self.avg_duration, = self.axs[1,0].plot([], [], '-mo')
self.axs[1,0].set_ylim(0,100)
self.other, = self.axs[1,1].plot([], [], '-ro')
# Stylisitic details
self.fig.tight_layout(pad=2.0)
self.fig.set_size_inches(8, 7)
self.fig.canvas.manager.set_window_title('pLearn Tester')
# Create data structures
self.iters = []
self.success_data = []
self.min_dist_data = []
self.avg_duration_data = []
self.other_data = []
self.max_iter = -1
# Show graph just for the nice-ness factor :)
self._plot()
def add_iteration(self, iter, success_pcnt, min_dist, avg_duration, other, plot=True):
self.iters.append(iter)
self.success_data.append(success_pcnt)
self.min_dist_data.append(min_dist)
self.avg_duration_data.append(avg_duration)
self.other_data.append(other)
if iter > self.max_iter:
self.max_iter = iter
if plot:
self._plot()
def _plot(self):
right_bound = max(self.max_iter, 1) # So matplotlib doesn yell about set_xlim(0,0)
self.success.set_data(self.iters, self.success_data)
self.axs[0,0].set_xlim(0, right_bound)
self.min_dist.set_data(self.iters, self.min_dist_data)
self.axs[0,1].relim()
self.axs[0,1].set_xlim(0, right_bound)
self.axs[0,1].autoscale()
self.avg_duration.set_data(self.iters, self.avg_duration_data)
self.axs[1,0].set_xlim(0, right_bound)
self.other.set_data(self.iters, self.other_data)
self.axs[1,1].relim()
self.axs[1,1].set_xlim(0, right_bound)
self.axs[1,1].autoscale()
self.fig.canvas.draw()
self.fig.canvas.flush_events()
if self.save_dir != None:
plt.savefig(os.path.join(self.save_dir, 'test_graph.png'))
class DebugGrapher:
FRAME_SIZE = 25
def __init__(self, save_dir=None):
# Parse args
self.save_dir = save_dir
# Setup matplotlib
matplotlib.use('TkAgg')
plt.ion()
# Create data structures
self.data_entries = len(PLEARN_ACTIONS) + 2 # 2 for iters and td_data
self.history = LimitedHistory(self.FRAME_SIZE, self.data_entries)
self.episode_iters = []
self.expected_reward = []
# Configure figure
# Gridspec reference: https://matplotlib.org/stable/tutorials/intermediate/gridspec.html
self.fig = plt.figure(constrained_layout=True)
gs = self.fig.add_gridspec(2, 2)
self.ax1 = self.fig.add_subplot(gs[0,:]) # First row all columns
self.ax2 = self.fig.add_subplot(gs[1,0])
self.ax3 = self.fig.add_subplot(gs[1,1])
# Stylisitic details
self.fig.tight_layout(pad=2.0)
self.fig.set_size_inches(8, 7)
self.fig.canvas.manager.set_window_title('pLearn Debugging Charts')
# Setup lines
self.ax1.set_title("~Relative~ Action Value")
self.action_lines = {}
self.action_labels = {}
for a in PLEARN_ACTIONS:
self.action_lines[a], = self.ax1.plot([], [])
self.action_labels[a] = self.ax1.text(0, 0, "")
self.ax2.set_title("Expected Reward")
self.reward, = self.ax2.plot(self.episode_iters, self.expected_reward)
self.ax3.set_title("Loop Time (in seconds)")
self.td, = self.ax3.plot([], [],)
# Show graph just for the nice-ness factor :)
self._plot()
def add_iteration(self, iter, action_values, episode_iters, expected_reward, td, plot=True):
# Store reward info
self.episode_iters = episode_iters
self.expected_reward = expected_reward
# Construct data frame
frame_data = [iter, td]
for a in PLEARN_ACTIONS:
frame_data.append(action_values[a])
# Push to history
self.history.push_frame(np.array(frame_data))
# Plot
if plot:
self._plot()
def _plot(self):
# Get data from history
iters = self.history.entry_history(0)
td = self.history.entry_history(1)
a_values = self.history.select_history([2,3,4,5,6,7], scale=1.0)
for i, a in enumerate(PLEARN_ACTIONS):
# Set line data
if a_values is not None:
self.action_lines[a].set_data(iters, a_values[:,i])
# Reset labels
self.action_labels[a].set_visible(False)
self.action_labels[a] = self.ax1.text(
iters[0]+3, # X position
a_values[:,i][0], # Y position
f'{plearn_action_to_text(a)} {a}')
self.reward.set_data(self.episode_iters, self.expected_reward)
self.td.set_data(iters, td)
# Rescale
x_min = 0
x_max = 1
try:
x_min = iters.min()
x_max = iters.max()+35
except ValueError:
pass
self.ax1.relim()
self.ax1.autoscale()
self.ax1.set_xlim(x_min, x_max)
self.ax2.relim()
self.ax2.autoscale()
self.ax3.relim()
self.ax3.autoscale()
self.fig.canvas.draw()
self.fig.canvas.flush_events()
if self.save_dir != None:
plt.savefig(os.path.join(self.save_dir, 'debug_graph.png'))
'''
# Update the value for each action
for a in PLEARN_ACTIONS:
# Normalize the data between 0 and 1
self.action_lines[a].set_data(self.iters, self.action_data[a])
# Update the action plot window
self.axs[0].relim()
self.axs[0].set_yscale('log')
self.axs[0].autoscale()
self.td.set_data(self.iters, self.td_data)
self.axs[1].relim()
self.axs[1].autoscale()'''
```
#### File: QTable/model/runner.py
```python
import argparse
import time
from mivp_agent.manager import MissionManager
from mivp_agent.util.math import dist
from mivp_agent.util.display import ModelConsole
from mivp_agent.aquaticus.const import FIELD_BLUE_FLAG
from constants import DEFAULT_RUN_MODEL
from model import load_model
def run(args):
q, attack_actions, retreat_actions = load_model(args.model)
with MissionManager('runner', log=False) as mgr:
print('Waiting for sim vehicle connections...')
while mgr.get_vehicle_count() < 1:
time.sleep(0.1)
# ---------------------------------------
# Part 1: Asserting simulation state
last_state = None
current_action = None
current_action_set = None
console = ModelConsole()
while True:
# Listen for state
msg = mgr.get_message()
while False:
print('-------------------------------------------')
print(f"({msg.vname}) {msg.state['HAS_FLAG']}")
print('-------------------------------------------')
msg.request_new()
msg = mgr.get_message()
console.tick(msg)
# Detect state transitions
model_state = q.get_state(
msg.state['NAV_X'],
msg.state['NAV_Y'],
msg.state['NODE_REPORTS'][args.enemy]['NAV_X'],
msg.state['NODE_REPORTS'][args.enemy]['NAV_Y'],
msg.state['HAS_FLAG']
)
# Detect state transition
if model_state != last_state:
current_action = q.get_action(model_state)
last_state = model_state
# Determine action set
if msg.state['HAS_FLAG']:
current_action_set = retreat_actions
else:
current_action_set = attack_actions
# Construct instruction for BHV_Agent
action = {
'speed': current_action_set[current_action]['speed'],
'course': current_action_set[current_action]['course']
}
flag_dist = abs(dist((msg.state['NAV_X'], msg.state['NAV_Y']), FIELD_BLUE_FLAG))
if flag_dist < 10:
action['posts']= {
'FLAG_GRAB_REQUEST': f'vname={msg.vname}'
}
msg.act(action)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', default=DEFAULT_RUN_MODEL)
parser.add_argument('--enemy', default='drone_21')
args = parser.parse_args()
run(args)
```
#### File: src/mivp_agent/bridge_old.py
```python
import socket
import pickle
import struct
import sys
from typing import Iterable
import traceback
'''
ModelBridgeServer (MODEL SIDE):
- Receives:
- MOOSDB Subscribed to (mainly for state construction)
- Sends:
- Heartbeat?
- Actions:
- IvP function actions (speed, course)
- MOOSDB actions (var, value) pairs to post
ModelBridgeClient (MOOSDB SIDE):
- Receives:
- Actions:
- See above
- Sends:
- Heartbeat?
- MOOSDB variables
'''
# Socket Helpers ===========================
TYPE_CTRL=0
TYPE_ACTION=1
TYPE_MUST_POST=2
TYPE_STATE=3
TYPES = (TYPE_CTRL, TYPE_ACTION, TYPE_MUST_POST, TYPE_STATE)
HEADER_SIZE=4
MAX_BUFFER_SIZE=8192
def recv_full(connection, timeout=None, return_read=False):
messages = []
current_len = None
last_read = None
total_read = None
# Create byte string for storing the header in
tmp_data = b''
# Attempt first receive with timeout
try:
connection.settimeout(timeout)
tmp_data = connection.recv(MAX_BUFFER_SIZE)
finally:
# Cleanup regardless
connection.settimeout(None)
last_read = len(tmp_data)
total_read = last_read
# If buffer read was full, call until not full
# Attempt to empty the recv queue
while last_read == MAX_BUFFER_SIZE:
print('WARNING: Got max buffer attempting to clear queue...')
try:
# Non blocking, just checking if there is more in queue
connection.settimeout(0.001)
tmp_data += connection.recv(MAX_BUFFER_SIZE)
last_read = len(tmp_data)
total_read += last_read
except socket.timeout:
last_read = 0
finally:
connection.settimeout(None)
# While we have data to process into messages
while len(tmp_data) != 0:
# Get more data if message is incomplete
if (current_len is None and len(tmp_data) < HEADER_SIZE) or (current_len is not None and len(tmp_data) < current_len):
tmp_data += connection.recv(MAX_BUFFER_SIZE)
last_read = len(tmp_data)
total_read += last_read
if current_len is None:
# We should be looking for a header
if len(tmp_data) >= HEADER_SIZE:
# We can construct a header (current_len)
current_len = struct.unpack('>i', tmp_data[:HEADER_SIZE])[0]
# Remove header data from our data store
tmp_data = tmp_data[HEADER_SIZE:]
# Not else b/c previous clause might have constructed it
if current_len is not None:
# We should be looking for a message
if len(tmp_data) >= current_len:
# We can construct a packed
messages.append(tmp_data[:current_len])
# Remove the packet just constructed from out data store
tmp_data = tmp_data[current_len:]
current_len = None # Signal we are looking for another header
if return_read:
return messages, total_read
return messages
def send_full(connection, data, type):
assert type < 5
# Create C struct (in python bytes)
# '>i' specifies a big-endian encoded integer (a standard size of 4 bytes)
# '>ii' does two big-endian numbers
packed_size = struct.pack('>i', len(data))
# Concat the size (our 8 bytes header) and data then send
result = connection.sendall(packed_size+data)
assert result is None
# Assertion Helpers ===========================
def checkFloat(var, error_string):
try:
return float(var)
except ValueError:
raise ValueError(error_string)
def checkInstruction(instr):
assert isinstance(instr, dict), "Instruction must be a dict"
assert "speed" in instr, "Instruction must have key 'speed'"
instr['speed'] = checkFloat(instr['speed'], "Instruction['speed'] must be a float")
assert "course" in instr, "Action must have key 'course'"
instr['course'] = checkFloat(instr['course'], "Instruction['course'] must be a float")
assert "posts" in instr, "Instruction must have key 'posts'"
assert isinstance(instr["posts"], dict), "posts must be a dict"
assert "ctrl_msg" in instr, "Instruction must have key 'ctrl_str'"
assert isinstance(instr['ctrl_msg'], str), 'ctrl_msg must be string'
def checkState(state):
assert isinstance(state, dict), "State must be dict"
assert "NAV_X" in state, "State must have 'NAV_X' key"
assert "NAV_Y" in state, "State must have 'NAV_Y' key"
def checkMustPost(must_post):
assert isinstance(must_post, dict), "TYPE_MUST_POSTs must have type"
class ModelBridgeServer:
def __init__(self, hostname="localhost", port=57722):
self.host = hostname
self.port = port
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Line below reuses the socket address if previous socket closed but improperly
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((self.host, self.port))
self._client = None
self._address = None
self.last_read = 0
def __enter__(self):
return self
def accept(self):
# Close current connection if we have one
if self._client is not None:
self._client.close()
self._client = None
self._socket.listen(0) # Only accept one connection
# Wait for client
self._client, self._address = self._socket.accept()
print(f"Client connected from {self._address}")
def send_instr(self, instr):
# Test submitted instruction
checkInstruction(instr)
# Fail if no client connected
if self._client is None:
return False
try:
send_full(self._client, pickle.dumps(instr), TYPE_ACTION)
except ConnectionResetError:
# Client has left
self.close_client()
return False
return True
def listen_state(self, timeout=None):
if self._client is None:
return False
try:
msgs, self.last_read = recv_full(self._client, timeout=timeout, return_read=True)
except socket.timeout:
return False
assert len(msgs) == 1, 'State should only come one at a time'
state = pickle.loads(msgs[0])
checkState(state)
return state
def close_client(self):
if self._client is not None:
self._client.close()
def close(self):
self.close_client()
if self._socket is not None:
self._socket.close()
def __exit__(self, exc_type, exc_value, traceback):
self.close()
class ModelBridgeClient:
def __init__(self, hostname="localhost", port=57722):
self.host = hostname
self.port = port
self._socket = None
def __enter__(self):
return self
def connect(self, timeout=1):
if self._socket is not None:
raise RuntimeError("Clients should not be connect more than once")
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Attempt connection with timeout
try:
self._socket.settimeout(timeout)
self._socket.connect((self.host, self.port))
# Dont hold onto the timeout if we succeed
self._socket.settimeout(None)
except (socket.timeout, ConnectionRefusedError) as e:
# Clean up socket
self._socket.close()
self._socket = None
# Signal failure in event of timeout
return False
# Return status
return True
def send_state(self, state):
if self._socket is None:
return False
# Test submitted action
checkState(state)
try:
send_full(self._socket, pickle.dumps(state), TYPE_STATE)
except BrokenPipeError:
# Server has disconnected, reset
self.close()
return False
return True
def listen(self, timeout=0.0005):
if self._socket is None:
return False
try:
msgs = recv_full(self._socket, timeout=timeout)
except socket.timeout:
return False
assert len(msgs) == 1, 'Instructions should only come one at a time'
instr = pickle.loads(msgs[0])
checkInstruction(instr)
return instr
def close(self):
if self._socket is not None:
self._socket.close()
def __exit__(self, exc_type, exc_value, traceback):
self.close()
```
#### File: mivp_agent/cli/info.py
```python
import os, sys
import argparse
from mivp_agent.cli.util import load_data_dir, size_of, human_bytes
from mivp_agent.const import DATA_DIRECTORY
from mivp_agent.log.directory import LogDirectory
class Info:
def __init__(self, parser):
self.parser = parser
# Setup parser
self.parser.add_argument('-a', '--all', action='store_true', help="Used to print extensive information.")
self.parser.set_defaults(func=self.do_it)
def do_it(self, args):
data = load_data_dir()
print(f'Path: {data.path()}\n')
print(f'Sessions: {data.meta.registry.session_count()}')
if args.all:
for session in data.meta.registry.list_sessions():
print(f' - {session}')
size, label = human_bytes(size_of(data.path()))
print(f'Folder Size: {size} {label}')
```
#### File: inspect/graphers/tdist.py
```python
import plotly.graph_objects as go
from mivp_agent.cli.inspect.consumers import PlotlyScalars
from mivp_agent.util.math import dist
class TransitionDist(PlotlyScalars):
def layout(self, name):
title = go.layout.Title(
text=f"<b>Transition Distance</b><br><sup>{name}</sup>",
xref="paper",
x=0
)
return go.Layout(
title=title,
xaxis_title='Episode',
yaxis_title='Transition Distance'
)
def pb2_data(self, data):
x1 = data.s1.vinfo.NAV_X
y1 = data.s1.vinfo.NAV_Y
x2 = data.s2.vinfo.NAV_X
y2 = data.s2.vinfo.NAV_Y
num = 0
if data.s2.HasField('episode_report'):
num = data.s2.episode_report.NUM
d = dist((x1, y1), (x2, y2))
#self.plot('Transition Distance', data.s1.vinfo.MOOS_TIME, d)
self.plot('Transition Distance', num, d)
```
#### File: mivp_agent/log/consumer.py
```python
from mivp_agent.proto import translate
from google.protobuf.message import Message
class Consumer:
'''
This is an abstract class meant to help with providing a general interface to consume transition data.
'''
def __init__(self, *args, **kwargs):
if self._has_setup():
self.setup(*args, **kwargs)
def _has_setup(self):
try:
test = self.setup
return True
except AttributeError:
return False
def _inject(self, data):
assert isinstance(data, Message)
pb2_run = False
try:
self.pb2_data(data)
pb2_run = True
except AttributeError as e:
pass
if not pb2_run:
try:
s1 = translate.state_to_dict(data.s1)
a = translate.action_to_dict(data.a)
s2 = translate.state_to_dict(data.s2)
self.dict_data(s1, a, s2)
except AttributeError as e:
raise AttributeError('No consumer method found to parse data') from None
return pb2_run
```
#### File: mivp_agent/log/metadata.py
```python
import os
import sys
from pathlib import Path
from mivp_agent.util.file_system import find_unique
from mivp_agent.log.const import CORE_DIRS
from mivp_agent.proto.mivp_agent_pb2 import Transition
from mivp_agent.proto.proto_logger import ProtoLogger
class RegistryDatum:
'''
This class is for managing a "registry" of MissionManager session ids to assure that they are unique with respect to a certain logging directory.
**NOTE:** The following is **NOT** thread safe. It is unlikely to fail silently, but still need to be cautious.
'''
def __init__(self, path):
'''
Args:
path (str): The registry directory.
'''
self.path = path
if not os.path.isdir(self.path):
try:
os.makedirs(self.path)
except FileExistsError:
return FileExistsError('There is a file in the place of the specified registry directory')
def validate(self):
for p in os.listdir(self.path):
if not os.path.isfile(p):
print('WARNING: There is a directory in the metadata registry. This indicates a corrupted registry.', file=sys.stderr)
def has_session(self, id):
return os.path.isfile(os.path.join(self.path, f'{id}.session'))
def list_sessions(self):
for p in os.listdir(self.path):
if os.path.isfile(os.path.join(self.path, p)):
yield p
def session_count(self):
return len(list(self.list_sessions()))
def register(self, name):
# Find a unique name
id = find_unique(self.path, name, ext='.session')
# Register it
entry = os.path.join(self.path, f'{id}.session')
Path(entry).touch(exist_ok=False)
return id
class LogMetadata:
'''
The following is for managing metadata associated with a perticular logging directory.
'''
def __init__(self, path):
'''
Args:
path (str): The logging directory.
'''
self._data_dir = path
self._path = os.path.join(path, '.meta')
# We don't init here because .meta is a signal that the directory is a valid logging directory
assert os.path.isdir(self._path), "Metadata directory not found, is this a valid log directory?"
self.registry = RegistryDatum(os.path.join(self._path, 'registry'))
def get_logs(self, id):
'''
This function is used to get the logs associated with a specific session id
'''
# Check if the session id is valid in this context
if not self.registry.has_session(id):
return None
logs = []
for subdir in os.listdir(self._data_dir):
if subdir not in CORE_DIRS:
# We have a task folder
session_path = os.path.join(
self._data_dir,
subdir,
id
)
if os.path.isdir(session_path):
for log_dir in os.listdir(session_path):
path = os.path.join(session_path, log_dir)
logs.append(ProtoLogger(path, Transition, mode='r'))
return logs
```
#### File: mivp_agent/proto/proto_logger.py
```python
import os
import sys
import time
import gzip
from google.protobuf import message
from mivp_agent.util import packit
from google.protobuf.message import Message
from google.protobuf.reflection import GeneratedProtocolMessageType
MODE_READ = 'r'
MODE_WRITE = 'w'
MODES_SUPPORTED = (
MODE_WRITE,
MODE_READ
)
class ProtoLogger:
'''
max_msgs will not be used in MODE_READ
'''
def __init__(self, path, type, mode='r', max_msgs=1000):
assert mode in MODES_SUPPORTED, f"Unsupported mode '{mode}'"
if mode == MODE_WRITE:
assert not os.path.isdir(path), "Provided path is existing directory"
assert not os.path.isfile(path), "Provided path is existing file"
if mode == MODE_READ:
assert os.path.isdir(path), "Provided path is not existing directory"
assert len(os.listdir(path)) != 0, "Provided directory is empty"
for f in os.listdir(path):
if f[-3:len(f)] != '.gz':
raise RuntimeError(f"ProtoLogger dir contains non gzip file '{f}'")
assert isinstance(type, GeneratedProtocolMessageType), "Type must be a generated MessageType class"
assert isinstance(max_msgs, int), "Buffer size must be integer"
assert max_msgs > 0, "Buffer size must be positive integer"
self._path = path
self._type = type
self._mode = mode
self._max_msgs = max_msgs
self._msg_count = 0
self._buffer = bytearray()
# Open the directory
if self._mode == MODE_WRITE:
os.makedirs(self._path, exist_ok=False)
self._time_stamp = str(round(time.time()))
self._current_idx = 0
if self._mode == MODE_READ:
# Read save directory
self._gzip_files = os.listdir(self._path)
# Sort by index
index = lambda x: int(x.split('.')[0].split('-')[1])
self._gzip_files = sorted(self._gzip_files, key=index)
# Current file index in self._gzip_files
self._gzip_idx = 0
# Messages from the most recently read file
self._current_messages = []
def path(self):
return self._path
def write(self, message):
'''
Method used to write protobuf messages of type specified in __init__ to a buffer. The buffer will be written to a .gz file when the length is greater than or equal to `max_messages` or open close() / context manager exit.
Args:
message (Message): A protobuf message of type specified in __init__
'''
assert self._mode == MODE_WRITE, "Method add() only supported in write mode"
assert isinstance(message, Message), "Message must be protobuf message"
assert isinstance(message, self._type), "Message not of type specified by constructor"
self._buffer.extend(packit.pack(message.SerializeToString()))
self._msg_count += 1
if self._msg_count >= self._max_msgs:
self._write_buffer()
def _write_buffer(self):
# The below might happen due to close()
if self._msg_count == 0:
return
# Incase something goes wrong, don't crash
try:
save_path = os.path.join(self._path, f'{self._time_stamp}-{self._current_idx}.gz')
# Use gzip in write bytes mode
with gzip.open(save_path, 'wb') as gz:
gz.write(self._buffer)
# Clean up
self._current_idx += 1
self._msg_count = 0
self._buffer.clear()
except Exception as e:
print(e, file=sys.stderr)
print("Warning: unable to write to gzip file, deffering write", file=sys.stderr)
def has_more(self):
assert self._mode == MODE_READ, "Method has_more() only supported in write mode"
return len(self._current_messages) != 0 or self._gzip_idx != len(self._gzip_files)
def total_files(self):
assert self._mode == MODE_READ, "Method total_files() only supported in write mode"
return len(self._gzip_files)
def current_file(self):
assert self._mode == MODE_READ, "Method current_file() only supported in write mode"
return self._gzip_idx
def read(self, n: int):
'''
Method is used to read a specified number of messages from disk.
Args:
n (int): Number of messages to read
Returns:
A python list of protobuf messages. The length of this list will be less than or equal to `n`
'''
assert self._mode == MODE_READ, "Method read() only supported in write mode"
messages_out = []
while len(messages_out) < n:
# See if we have messages to parse from previously read gz
if len(self._current_messages) == 0:
# Check if we have exhausted all files
if self._gzip_idx == len(self._gzip_files):
break # Out of messages
# If there are more gzip files, read next
filepath = os.path.join(self._path, self._gzip_files[self._gzip_idx])
with gzip.open(filepath, mode='rb') as gz:
buffer = gz.read()
# Get binary messages and parse
bmsgs = packit.unpack_buffer(buffer)
for bmsg in bmsgs:
msg = self._type()
msg.ParseFromString(bmsg)
self._current_messages.append(msg)
# Indicate this file has been read
self._gzip_idx += 1
# Here we should have more messages, find out how many message we should add to message_out
amt = min(n - len(messages_out), len(self._current_messages))
# Add that amount to the returned list
messages_out.extend(self._current_messages[:amt])
# Remove that amount from the queue thing
self._current_messages = self._current_messages[amt:]
return messages_out
# Not 100% if I need the following
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tracebac):
self.close()
def close(self):
if self._mode == MODE_WRITE:
self._write_buffer()
elif self._mode == MODE_READ:
pass # No file pointers to close
else:
raise RuntimeError(f'Unexpected mode "{self._mode}" on close')
```
#### File: mivp_agent/proto/translate.py
```python
from mivp_agent.util import validate
from mivp_agent.const import KEY_ID, KEY_EPISODE_MGR_STATE, KEY_EPISODE_MGR_REPORT
from mivp_agent.proto.moos_pb2 import MOOSVar, NodeReport
from mivp_agent.proto.mivp_agent_pb2 import State, Action, EpisodeReport
core_keys = (
KEY_ID,
KEY_EPISODE_MGR_REPORT,
"NAV_X",
"NAV_Y",
"NAV_HEADING",
"MOOS_TIME",
"NODE_REPORTS"
)
'''
=================================
Begin "From Dictionary" functions
=================================
'''
def moos_var_from_kp(key, val):
var = MOOSVar()
var.key = key
if isinstance(val, float):
var.dval = val
elif isinstance(val, str):
var.sval = val
elif isinstance(val, bool):
var.bval = val
else:
raise TypeError(f"Unexpected type when parsing moos var {key}:{val}")
return var
def node_report_from_dict(report, vname):
assert isinstance(report, dict), "Report must be dict"
proto_report = NodeReport()
proto_report.vname = vname
proto_report.NAV_X = report['NAV_X']
proto_report.NAV_Y = report['NAV_Y']
proto_report.NAV_HEADING = report['NAV_HEADING']
proto_report.MOOS_TIME = report['MOOS_TIME']
return proto_report
def episode_report_from_dict(report):
assert isinstance(report, dict), "Report must be dict"
proto_report = EpisodeReport()
proto_report.NUM = report['NUM']
proto_report.SUCCESS = report['SUCCESS']
proto_report.DURATION = report['DURATION']
proto_report.WILL_PAUSE = report['WILL_PAUSE']
return proto_report
def state_from_dict(state):
validate.validateState(state)
# Create new protobuf to store information
proto_state = State()
# Parse own report / info
vinfo = node_report_from_dict(state, state[KEY_ID])
proto_state.vinfo.CopyFrom(vinfo)
# Parse other reports
if 'NODE_REPORTS' in state:
for vname in state['NODE_REPORTS']:
report = node_report_from_dict(state['NODE_REPORTS'][vname], vname)
proto_state.node_reports.add().CopyFrom(report)
# Find other vars
for key in state:
if key not in core_keys:
proto_state.vars.add().CopyFrom(moos_var_from_kp(key, state[key]))
if state[KEY_EPISODE_MGR_REPORT] is not None:
proto_state.episode_report.CopyFrom(episode_report_from_dict(state[KEY_EPISODE_MGR_REPORT]))
return proto_state
def action_from_dict(action):
validate.validateInstruction(action)
proto_action = Action()
proto_action.course = action['course']
proto_action.speed = action['speed']
for post in action['posts']:
proto_action.posts.add().CopyFrom(moos_var_from_kp(post, action['posts'][post]))
proto_action.ctrl_msg = action['ctrl_msg']
return proto_action
'''
===============================
Begin "To Dictionary" functions
===============================
'''
def moos_var_to_kp(var):
assert isinstance(var, MOOSVar), "Input var is not of known MOOSVar prototype"
if var.HasField('sval'):
return var.key, var.sval
elif var.HasField('dval'):
return var.key, var.dval
elif var.HasField('bval'):
return var.key, var.bval
else:
raise TypeError("Could not find valid type in MOOSVar message")
def node_report_to_dict(report):
assert isinstance(report, NodeReport), "Input report is not of known NodeReport prototype"
dict_report = {}
dict_report['vname'] = report.vname
dict_report['NAV_X'] = report.NAV_X
dict_report['NAV_Y'] = report.NAV_Y
dict_report['NAV_HEADING'] = report.NAV_HEADING
dict_report['MOOS_TIME'] = report.MOOS_TIME
return dict_report
def episode_report_to_dict(report):
assert isinstance(report, EpisodeReport), "Input report is not of known EpisodeReport prototype"
dict_report = {}
dict_report['NUM'] = report.NUM
dict_report['SUCCESS'] = report.SUCCESS
dict_report['DURATION'] = report.DURATION
dict_report['WILL_PAUSE'] = report.WILL_PAUSE
return dict_report
def state_to_dict(state):
assert isinstance(state, State), "Input state is not of known State prototype"
dict_state = {}
# Parse own info
dict_vinfo = node_report_to_dict(state.vinfo)
for key in dict_vinfo:
if key == 'vname':
dict_state[KEY_ID] = dict_vinfo[key]
else:
dict_state[key] = dict_vinfo[key]
# Parse other reports
for report in state.node_reports:
if 'NODE_REPORTS' not in dict_state:
dict_state['NODE_REPORTS'] = {}
dict_state['NODE_REPORTS'][report.vname] = node_report_to_dict(report)
# Parse other vars
for var in state.vars:
dict_state[var.key] = moos_var_to_kp(var)[1]
# Parse episode report
if state.HasField('episode_report'):
dict_state[KEY_EPISODE_MGR_REPORT] = episode_report_to_dict(state.episode_report)
else:
dict_state[KEY_EPISODE_MGR_REPORT] = None
return dict_state
def action_to_dict(action):
assert isinstance(action, Action), "Input action is not of known Action prototype"
dict_action = {}
dict_action['course'] = action.course
dict_action['speed'] = action.speed
dict_action['ctrl_msg'] = action.ctrl_msg
dict_action['posts'] = {}
for post in action.posts:
dict_action['posts'][post.key] = moos_var_to_kp(post)[1]
return dict_action
```
#### File: mivp_agent/util/parse.py
```python
def csp_to_dict(csp_str):
d = {}
pairs = csp_str.split(',')
for p in pairs:
name, value = p.split('=')
d[name] = value
return d
def parse_boolstr(boolstr):
if boolstr.lower() == 'true':
return True
elif boolstr.lower() == 'false':
return False
else:
raise RuntimeError(f'Unexpected non boolean value: {boolstr}')
# For parsing pEpisodeManager reports
def parse_report(report):
if report is None:
return None
report = csp_to_dict(report)
report['NUM'] = int(report['NUM'])
report['DURATION'] = float(report['DURATION'])
report['SUCCESS'] = parse_boolstr(report['SUCCESS'])
report['WILL_PAUSE'] = parse_boolstr(report['WILL_PAUSE'])
return report
```
#### File: python_module/test/test_log.py
```python
import os
from typing import Pattern
import unittest
from pathlib import Path
current_dir = os.path.dirname(os.path.realpath(__file__))
generated_dir = os.path.join(current_dir, '.generated')
from mivp_agent.log.metadata import LogMetadata, RegistryDatum
from mivp_agent.util.file_system import safe_clean
class TestMetadata(unittest.TestCase):
def test_interface(self):
meta_dir = os.path.join(generated_dir, '.meta')
os.makedirs(meta_dir)
# Test we do not fail on a new directory
m = LogMetadata(generated_dir)
self.assertEqual(m.registry.register('my_name'), 'my_name')
self.assertNotEqual(m.registry.register('my_name'), 'my_name')
safe_clean(generated_dir, patterns=['*.session'])
if __name__ == '__main__':
unittest.main()
```
#### File: python_module/test/test_proto.py
```python
import os
import glob
import unittest
from pathlib import Path
current_dir = os.path.dirname(os.path.realpath(__file__))
generated_dir = os.path.join(current_dir, '.generated')
import unittest
from google.protobuf.message import EncodeError
from google.protobuf.message import Message
from mivp_agent.proto import moos_pb2, mivp_agent_pb2
from mivp_agent.proto import translate
from mivp_agent.const import KEY_ID, KEY_EPISODE_MGR_REPORT
class TestProto(unittest.TestCase):
def test_moos_var(self):
var = moos_pb2.MOOSVar()
self.assertRaises(EncodeError, var.SerializeToString)
var.key = "MY_VAR"
with self.assertRaises(TypeError):
var.dval = "Hi"
with self.assertRaises(TypeError):
var.sval = 1203.2
def do_translate_test(self, input, from_dict, to_dict, debug=False):
proto = from_dict(input)
# Make sure we can serialize
proto.SerializeToString()
# Untranslate and compare with original
out = to_dict(proto)
if debug:
print("IN --------------------")
print(input)
print("OUT -------------------")
print(out)
self.assertEqual(input, out)
def test_translate_state(self):
state = {
KEY_ID: 'felix',
'MOOS_TIME': 16923.012,
'NAV_X': 98.0,
'NAV_Y': 40.0,
'NAV_HEADING': 180,
KEY_EPISODE_MGR_REPORT: None
}
self.do_translate_test(
state,
translate.state_from_dict,
translate.state_to_dict
)
state['TAGGED'] = True
self.do_translate_test(
state,
translate.state_from_dict,
translate.state_to_dict
)
# Test with bad episode report
state[KEY_EPISODE_MGR_REPORT] = {
'NUM': 54,
'SUCCESS': True,
'WILL_PAUSE': False,
}
with self.assertRaises(KeyError):
self.do_translate_test(
state,
translate.state_from_dict,
translate.state_to_dict
)
state[KEY_EPISODE_MGR_REPORT]['DURATION'] = 2.0
self.do_translate_test(
state,
translate.state_from_dict,
translate.state_to_dict
)
# Test with bad other vehicle first
state['NODE_REPORTS'] = {}
state['NODE_REPORTS']['henry'] = {
'vname': 'henry',
'NAV_X': 2.354,
'NAV_Y': 23.1,
'NAV_HEADING': 140,
}
with self.assertRaises(KeyError):
self.do_translate_test(
state,
translate.state_from_dict,
translate.state_to_dict
)
state['NODE_REPORTS']['henry']['MOOS_TIME'] = 0.123
self.do_translate_test(
state,
translate.state_from_dict,
translate.state_to_dict
)
def test_translate_action(self):
action = {
'speed': 2.0,
'course': 120.0,
'posts': {},
'ctrl_msg': 'SEND_STATE'
}
self.do_translate_test(
action,
translate.action_from_dict,
translate.action_to_dict
)
action['posts']['myVar'] = True
self.do_translate_test(
action,
translate.action_from_dict,
translate.action_to_dict
)
action['posts']['myVar'] = "String"
self.do_translate_test(
action,
translate.action_from_dict,
translate.action_to_dict
)
action['posts']['myVar'] = 3.1415
self.do_translate_test(
action,
translate.action_from_dict,
translate.action_to_dict
)
del action['speed']
with self.assertRaises(AssertionError):
self.do_translate_test(
action,
translate.action_from_dict,
translate.action_to_dict
)
from mivp_agent.proto.proto_logger import ProtoLogger
def clean_dir(dir, file_pattern="*"):
files = glob.glob(f'{dir}/{file_pattern}')
for f in files:
os.remove(f)
assert len(os.listdir(dir)) == 0, f"File written to {dir} that doesn't match pattern {file_pattern}"
os.rmdir(dir)
class TestLogger(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
# Add some helpful initializations
cls.report = moos_pb2.NodeReport()
cls.report.vname = "agent"
cls.report.NAV_X = 40.1
cls.report.NAV_Y = -50.4
cls.report.NAV_HEADING = 143.2
cls.report.MOOS_TIME = 4.0
cls.report.SerializeToString()
cls.reports = []
for i in range(100):
report = moos_pb2.NodeReport()
report.vname = f"vname{i}"
report.NAV_X = 40.1
report.NAV_Y = -50.4
report.NAV_HEADING = 143.2
report.MOOS_TIME = 4.0
# Test is valid
report.SerializeToString()
cls.reports.append(report)
cls.other = moos_pb2.MOOSVar()
cls.other.key = "Hello"
cls.other.sval = "Yo"
cls.other.SerializeToString()
return super().setUpClass()
def test_basic(self):
with self.assertRaises(AssertionError):
ProtoLogger(generated_dir, moos_pb2.NodeReport, 'w')
save_dir = os.path.join(generated_dir, 'test')
log = ProtoLogger(save_dir, moos_pb2.NodeReport, 'w', max_msgs=2)
with self.assertRaises(AssertionError):
log.write('WrongType')
with self.assertRaises(AssertionError):
log.write(self.other)
# First add should not write to file
self.assertEqual(len(log._buffer), 0, "Initialization error")
log.write(self.report)
self.assertGreater(len(log._buffer), 0, "Message was not written to buffer")
self.assertEqual(len(os.listdir(save_dir)), 0, "Unexpected write to save directory")
# Second add should write to file
log.write(self.report)
self.assertEqual(len(os.listdir(save_dir)), 1, "Messages were not written to file")
self.assertEqual(len(log._buffer), 0, "Messages were not clear from buffer")
# Make sure the logs are written to file on close
log.write(self.report)
log.close()
self.assertEqual(len(os.listdir(save_dir)), 2, "Logs not written on close")
# Test reading fails with non-existing directory
with self.assertRaises(AssertionError):
ProtoLogger(os.path.join(generated_dir, 'non-exist'), moos_pb2.NodeReport, 'r')
# Test fail on empty directory
test2_dir = os.path.join(generated_dir, 'test2')
os.makedirs(test2_dir)
with self.assertRaises(AssertionError):
ProtoLogger(test2_dir, moos_pb2.NodeReport, 'r')
# Test fail on non gz file
Path(os.path.join(test2_dir, 'file.txt')).touch()
with self.assertRaises(RuntimeError):
ProtoLogger(test2_dir, moos_pb2.NodeReport, 'r')
clean_dir(test2_dir, file_pattern="*.txt")
log = ProtoLogger(save_dir, moos_pb2.NodeReport, 'r')
# Test that we can get the amount request
msgs = log.read(1)
self.assertEqual(len(msgs), 1)
self.assertTrue(isinstance(msgs[0], Message))
self.assertTrue(isinstance(msgs[0], moos_pb2.NodeReport))
self.assertEqual(msgs[0], self.report)
# Test that reading separate files will happen seemlessly (remember 2 messages per file in the above)
# Test that a too high n won't break
msgs = log.read(3)
self.assertEqual(len(msgs), 2, "Expected only 2 messages to remain on disk")
for msg in msgs:
self.assertTrue(isinstance(msg, Message))
self.assertTrue(isinstance(msg, moos_pb2.NodeReport))
self.assertEqual(msg, self.report)
# Sanity check
self.assertNotEqual(msg, self.other)
# Test subsequent reads just return nothing =
self.assertEqual(len(log.read(1)), 0)
# Clean up
clean_dir(save_dir, file_pattern="*.gz")
def test_matrix(self):
matrix_dir = os.path.join(generated_dir, 'matrix')
for store_amt in range(1, 50, 5):
for read_amt in range(1, 50, 5):
# Make sure the directory not created yet
self.assertFalse(os.path.isdir(matrix_dir), "Expected matrix directory to be empty at begining of test")
# Write the messages with max_msgs set to store_amount
with ProtoLogger(
matrix_dir,
moos_pb2.NodeReport,
'w',
max_msgs=store_amt) as log:
for msg in self.reports:
log.write(msg)
# Check that the proper number of files have been generated
# NOTE: Reference https://stackoverflow.com/questions/14822184/is-there-a-ceiling-equivalent-of-operator-in-python
file_amt = -(len(self.reports) // -store_amt)
self.assertEqual(len(os.listdir(matrix_dir)), file_amt)
# Open previously written dir in read mode
all_messages = []
with ProtoLogger(
matrix_dir,
moos_pb2.NodeReport,
'r',) as log:
while len(all_messages) != len(self.reports):
msgs = log.read(read_amt)
self.assertNotEqual(len(msgs), 0, "Read returned empty list before all messages were decoded")
if len(msgs) != read_amt:
self.assertEqual(len(log.read(read_amt)), 0, "Read did not fulfill `n` requested when more messages were remaining")
all_messages.extend(msgs)
for i, msg in enumerate(all_messages):
self.assertTrue(isinstance(msg, Message))
self.assertTrue(isinstance(msg, moos_pb2.NodeReport))
self.assertEqual(msg, self.reports[i], "Message either corrupted or out of order")
# Clean up
clean_dir(matrix_dir, file_pattern="*.gz")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joedrago/jsastc",
"score": 2
} |
#### File: jsastc/Test/astc_size_binary.py
```python
import argparse
import shutil
import subprocess as sp
import sys
def run_size(binary):
"""
Run size on a single binary.
Args:
binary (str): The path of the binary file to process.
Returns:
tuple(int, int, int): A triplet of code size, read-only data size, and
zero-init data size, all in bytes.
Raises:
CalledProcessException: The ``size`` subprocess failed for any reason.
"""
args = ["size", "--format=sysv", binary]
result = sp.run(args, stdout=sp.PIPE, stderr=sp.PIPE,
check=True, universal_newlines=True)
data = {}
patterns = {"Code": ".text", "RO": ".rodata", "ZI": ".bss"}
lines = result.stdout.splitlines()
for line in lines:
for key, value in patterns.items():
if line.startswith(value):
size = float(line.split()[1])
data[key] = size
return (data["Code"], data["RO"], data["ZI"])
def parse_command_line():
"""
Parse the command line.
Returns:
Namespace: The parsed command line container.
"""
parser = argparse.ArgumentParser()
parser.add_argument("bin", type=argparse.FileType("r"),
help="The new binary to size")
parser.add_argument("ref", nargs="?", type=argparse.FileType("r"),
help="The reference binary to compare against")
return parser.parse_args()
def main():
"""
The main function.
Returns:
int: The process return code.
"""
args = parse_command_line()
# Preflight - check that size exists. Note that size might still fail at
# runtime later, e.g. if the binary is not of the correct format
path = shutil.which("size")
if not path:
print("ERROR: The 'size' utility is not installed on the PATH")
return 1
# Collect the data
try:
newSize = run_size(args.bin.name)
if args.ref:
refSize = run_size(args.ref.name)
except sp.CalledProcessError as ex:
print("ERROR: The 'size' utility failed")
print(" %s" % ex.stderr.strip())
return 1
# Print the basic table of absolute values
print("%8s % 8s % 8s % 8s" % ("", "Code", "RO Data", "ZI Data"))
if args.ref:
print("%8s % 8u % 8u % 8u" % ("Ref", *refSize))
print("%8s % 8u % 8u % 8u" % ("New", *newSize))
# Print the difference if we have a reference
if args.ref:
diffAbs = []
diffRel = []
for refVal, newVal in zip(refSize, newSize):
diff = newVal - refVal
diffAbs.append(diff)
diffRel.append((diff / refVal) * 100.0)
dat = ("Abs D", diffAbs[0], diffAbs[1], diffAbs[2])
print("%8s % 8u % 8u % 8u" % dat)
dat = ("Rel D", diffRel[0], diffRel[1], diffRel[2])
print("%8s % 7.2f%% % 7.2f%% % 7.2f%%" % dat)
return 0
if __name__ == "__main__":
sys.exit(main())
```
#### File: Test/testlib/testset.py
```python
import os
from testlib.image import TestImage
class TSetException(Exception):
"""
Exception thrown for bad test set specification.
"""
class TestSet():
"""
Generate a list of images that are test candidates.
This reflection is built automatically based on a directory of images on
disk, provided that the images follow a standard structure.
Attributes:
name: The name of the test set.
tests: The list of TestImages forming the set.
"""
def __init__(self, name, rootDir, profiles, formats):
"""
Create a new TestSet through reflection.
Args:
name (str): The name of the test set.
rootDir (str): The root directory of the test set.
profiles (list(str)): The ASTC profiles to allow.
formats (list(str)): The image formats to allow.
Raises:
TSetException: The specified TestSet could not be loaded.
"""
self.name = name
if not os.path.exists(rootDir) and not os.path.isdir(rootDir):
raise TSetException("Bad test set root directory (%s)" % rootDir)
self.tests = []
for (dirPath, dirNames, fileNames) in os.walk(rootDir):
for fileName in fileNames:
# Only select image files
fileExt = os.path.splitext(fileName)[1]
if fileExt not in TestImage.TEST_EXTS:
continue
# Create the TestImage for each file on disk
filePath = os.path.join(dirPath, fileName)
image = TestImage(filePath)
# Filter out the ones we don't want to allow
if image.colorProfile not in profiles:
continue
if image.colorFormat not in formats:
continue
self.tests.append((filePath, image))
# Sort the TestImages so they are in a stable order
self.tests.sort()
self.tests = [x[1] for x in self.tests]
``` |
{
"source": "JoeDReynolds/HW_13",
"score": 3
} |
#### File: JoeDReynolds/HW_13/scrape_mars.py
```python
import time
from splinter import Browser
from bs4 import BeautifulSoup as bs
import requests
import json
import numpy as np
import pandas as pd
import pymongo
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
db = client.mars_db
collection = db.mars_db
#URLs of Sites to be scraped
def init_browser():
executable_path = {'executable_path':'chromedriver.exe'}
return Browser('chrome', **executable_path, headless=False)
def scrape():
browser = init_browser()
# Create Mars Data Dictionary to insert to MongoDB
mars_data = {}
news_url = 'https://mars.nasa.gov/news/'
jpl_url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
weather_url = 'https://twitter.com/marswxreport?lang=en'
facts_url ='https://space-facts.com/mars/'
usgs_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
# Retrieve pages with the requests module
news_response = requests.get(news_url)
jpl_response = requests.get(jpl_url)
weather_response = requests.get(weather_url)
# Create BeautifulSoup objects; parse with 'html.parser'
news_soup = bs(news_response.text, 'html.parser')
weather_soup = bs(weather_response.text, 'html.parser')
#Set Up Splinter Browser
browser.visit(news_url)
news_html = browser.html
news_soup = bs(news_html, 'html.parser')
news_title = news_soup.find('div', class_='content_title').text
#Scrape Teaser Body
news_p = news_soup.find('div', class_='rollover_description_inner').text
browser.visit(jpl_url)
#Click on Featured Image
browser.visit(jpl_url)
time.sleep(2)
browser.click_link_by_partial_text('FULL IMAGE')
time.sleep(2)
browser.click_link_by_partial_text('more info')
time.sleep(2)
browser.click_link_by_partial_text('.jpg')
#Save HTML of the page to variable
html = browser.html
#Use bs to save image url
jpl_soup = bs(html, 'html.parser')
featured_image_url = jpl_soup.find("img")["src"]
import re
mars_weather= weather_soup.find(string=re.compile("Sol"))
tables = pd.read_html(facts_url)
facts_df = tables[0]
facts_df.to_html('facts.html')
#Create List of Mars Hemisphers to iterate over and dictionary to save the results
mars_hemis = ["Valles Marineris Hemisphere", "Cerberus Hemisphere", "Schiaparelli Hemisphere", "Syrtis Major Hemisphere"]
hemisphere_image_urls = []
browser.visit(usgs_url)
hemisphere_image_urls = []
for i in range (4):
time.sleep(2)
images = browser.find_by_tag('h3')
images[i].click()
html = browser.html
soup = bs(html, 'html.parser')
partial = soup.find("img", class_="wide-image")["src"]
img_title = soup.find("h2",class_="title").text
img_url = 'https://astrogeology.usgs.gov'+ partial
hemi_data = {"title":img_title,"img_url":img_url}
hemisphere_image_urls.append(hemi_data)
browser.back()
mars_data["news_title"] = news_title
mars_data["summary"] = news_p
mars_data["featured_image_url"] = featured_image_url
mars_data["mars_weather"] = mars_weather
mars_data["mars_table"] = facts_df
mars_data['mars_hemis'] = hemisphere_image_urls
return mars_data
``` |
{
"source": "joeduffy/pulumi-kubernetes-ingress-nginx",
"score": 2
} |
#### File: python/pulumi_kubernetes_ingress_nginx/ingress_controller.py
```python
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
import pulumi_kubernetes
__all__ = ['IngressControllerArgs', 'IngressController']
@pulumi.input_type
class IngressControllerArgs:
def __init__(__self__, *,
controller: Optional[pulumi.Input['ControllerArgs']] = None,
default_backend: Optional[pulumi.Input['ControllerDefaultBackendArgs']] = None,
dh_param: Optional[pulumi.Input[str]] = None,
fullname_override: Optional[pulumi.Input[str]] = None,
helm_options: Optional[pulumi.Input['ReleaseArgs']] = None,
image_pull_secrets: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.LocalObjectReferenceArgs']]]] = None,
name_override: Optional[pulumi.Input[str]] = None,
pod_security_policy: Optional[pulumi.Input['ControllerPodSecurityPolicyArgs']] = None,
rbac: Optional[pulumi.Input['ControllerRBACArgs']] = None,
revision_history_limit: Optional[pulumi.Input[int]] = None,
service_account: Optional[pulumi.Input['ControllerServiceAccountArgs']] = None,
tcp: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
udp: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None):
"""
The set of arguments for constructing a IngressController resource.
:param pulumi.Input['ControllerDefaultBackendArgs'] default_backend: Default 404 backend.
:param pulumi.Input[str] dh_param: A base64ed Diffie-Hellman parameter. This can be generated with: openssl dhparam 4096 2> /dev/null | base64 Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param.
:param pulumi.Input[str] fullname_override: Overrides for generated resource names.
:param pulumi.Input['ReleaseArgs'] helm_options: HelmOptions is an escape hatch that lets the end user control any aspect of the Helm deployment. This exposes the entirety of the underlying Helm Release component args.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.LocalObjectReferenceArgs']]] image_pull_secrets: Optional array of imagePullSecrets containing private registry credentials Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/.
:param pulumi.Input[str] name_override: Overrides for generated resource names.
:param pulumi.Input['ControllerPodSecurityPolicyArgs'] pod_security_policy: If true, create & use Pod Security Policy resources https://kubernetes.io/docs/concepts/policy/pod-security-policy/
:param pulumi.Input['ControllerRBACArgs'] rbac: Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266
:param pulumi.Input[int] revision_history_limit: Rollback limit.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] tcp: TCP service key:value pairs Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] udp: UDP service key:value pairs Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md.
"""
if controller is not None:
pulumi.set(__self__, "controller", controller)
if default_backend is not None:
pulumi.set(__self__, "default_backend", default_backend)
if dh_param is not None:
pulumi.set(__self__, "dh_param", dh_param)
if fullname_override is not None:
pulumi.set(__self__, "fullname_override", fullname_override)
if helm_options is not None:
pulumi.set(__self__, "helm_options", helm_options)
if image_pull_secrets is not None:
pulumi.set(__self__, "image_pull_secrets", image_pull_secrets)
if name_override is not None:
pulumi.set(__self__, "name_override", name_override)
if pod_security_policy is not None:
pulumi.set(__self__, "pod_security_policy", pod_security_policy)
if rbac is not None:
pulumi.set(__self__, "rbac", rbac)
if revision_history_limit is not None:
pulumi.set(__self__, "revision_history_limit", revision_history_limit)
if service_account is not None:
pulumi.set(__self__, "service_account", service_account)
if tcp is not None:
pulumi.set(__self__, "tcp", tcp)
if udp is not None:
pulumi.set(__self__, "udp", udp)
@property
@pulumi.getter
def controller(self) -> Optional[pulumi.Input['ControllerArgs']]:
return pulumi.get(self, "controller")
@controller.setter
def controller(self, value: Optional[pulumi.Input['ControllerArgs']]):
pulumi.set(self, "controller", value)
@property
@pulumi.getter(name="defaultBackend")
def default_backend(self) -> Optional[pulumi.Input['ControllerDefaultBackendArgs']]:
"""
Default 404 backend.
"""
return pulumi.get(self, "default_backend")
@default_backend.setter
def default_backend(self, value: Optional[pulumi.Input['ControllerDefaultBackendArgs']]):
pulumi.set(self, "default_backend", value)
@property
@pulumi.getter(name="dhParam")
def dh_param(self) -> Optional[pulumi.Input[str]]:
"""
A base64ed Diffie-Hellman parameter. This can be generated with: openssl dhparam 4096 2> /dev/null | base64 Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param.
"""
return pulumi.get(self, "dh_param")
@dh_param.setter
def dh_param(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dh_param", value)
@property
@pulumi.getter(name="fullnameOverride")
def fullname_override(self) -> Optional[pulumi.Input[str]]:
"""
Overrides for generated resource names.
"""
return pulumi.get(self, "fullname_override")
@fullname_override.setter
def fullname_override(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fullname_override", value)
@property
@pulumi.getter(name="helmOptions")
def helm_options(self) -> Optional[pulumi.Input['ReleaseArgs']]:
"""
HelmOptions is an escape hatch that lets the end user control any aspect of the Helm deployment. This exposes the entirety of the underlying Helm Release component args.
"""
return pulumi.get(self, "helm_options")
@helm_options.setter
def helm_options(self, value: Optional[pulumi.Input['ReleaseArgs']]):
pulumi.set(self, "helm_options", value)
@property
@pulumi.getter(name="imagePullSecrets")
def image_pull_secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.LocalObjectReferenceArgs']]]]:
"""
Optional array of imagePullSecrets containing private registry credentials Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/.
"""
return pulumi.get(self, "image_pull_secrets")
@image_pull_secrets.setter
def image_pull_secrets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.LocalObjectReferenceArgs']]]]):
pulumi.set(self, "image_pull_secrets", value)
@property
@pulumi.getter(name="nameOverride")
def name_override(self) -> Optional[pulumi.Input[str]]:
"""
Overrides for generated resource names.
"""
return pulumi.get(self, "name_override")
@name_override.setter
def name_override(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name_override", value)
@property
@pulumi.getter(name="podSecurityPolicy")
def pod_security_policy(self) -> Optional[pulumi.Input['ControllerPodSecurityPolicyArgs']]:
"""
If true, create & use Pod Security Policy resources https://kubernetes.io/docs/concepts/policy/pod-security-policy/
"""
return pulumi.get(self, "pod_security_policy")
@pod_security_policy.setter
def pod_security_policy(self, value: Optional[pulumi.Input['ControllerPodSecurityPolicyArgs']]):
pulumi.set(self, "pod_security_policy", value)
@property
@pulumi.getter
def rbac(self) -> Optional[pulumi.Input['ControllerRBACArgs']]:
"""
Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266
"""
return pulumi.get(self, "rbac")
@rbac.setter
def rbac(self, value: Optional[pulumi.Input['ControllerRBACArgs']]):
pulumi.set(self, "rbac", value)
@property
@pulumi.getter(name="revisionHistoryLimit")
def revision_history_limit(self) -> Optional[pulumi.Input[int]]:
"""
Rollback limit.
"""
return pulumi.get(self, "revision_history_limit")
@revision_history_limit.setter
def revision_history_limit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "revision_history_limit", value)
@property
@pulumi.getter(name="serviceAccount")
def service_account(self) -> Optional[pulumi.Input['ControllerServiceAccountArgs']]:
return pulumi.get(self, "service_account")
@service_account.setter
def service_account(self, value: Optional[pulumi.Input['ControllerServiceAccountArgs']]):
pulumi.set(self, "service_account", value)
@property
@pulumi.getter
def tcp(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
TCP service key:value pairs Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md.
"""
return pulumi.get(self, "tcp")
@tcp.setter
def tcp(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "tcp", value)
@property
@pulumi.getter
def udp(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
UDP service key:value pairs Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md.
"""
return pulumi.get(self, "udp")
@udp.setter
def udp(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "udp", value)
class IngressController(pulumi.ComponentResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
controller: Optional[pulumi.Input[pulumi.InputType['ControllerArgs']]] = None,
default_backend: Optional[pulumi.Input[pulumi.InputType['ControllerDefaultBackendArgs']]] = None,
dh_param: Optional[pulumi.Input[str]] = None,
fullname_override: Optional[pulumi.Input[str]] = None,
helm_options: Optional[pulumi.Input[pulumi.InputType['ReleaseArgs']]] = None,
image_pull_secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.LocalObjectReferenceArgs']]]]] = None,
name_override: Optional[pulumi.Input[str]] = None,
pod_security_policy: Optional[pulumi.Input[pulumi.InputType['ControllerPodSecurityPolicyArgs']]] = None,
rbac: Optional[pulumi.Input[pulumi.InputType['ControllerRBACArgs']]] = None,
revision_history_limit: Optional[pulumi.Input[int]] = None,
service_account: Optional[pulumi.Input[pulumi.InputType['ControllerServiceAccountArgs']]] = None,
tcp: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
udp: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
__props__=None):
"""
Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ControllerDefaultBackendArgs']] default_backend: Default 404 backend.
:param pulumi.Input[str] dh_param: A base64ed Diffie-Hellman parameter. This can be generated with: openssl dhparam 4096 2> /dev/null | base64 Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param.
:param pulumi.Input[str] fullname_override: Overrides for generated resource names.
:param pulumi.Input[pulumi.InputType['ReleaseArgs']] helm_options: HelmOptions is an escape hatch that lets the end user control any aspect of the Helm deployment. This exposes the entirety of the underlying Helm Release component args.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.LocalObjectReferenceArgs']]]] image_pull_secrets: Optional array of imagePullSecrets containing private registry credentials Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/.
:param pulumi.Input[str] name_override: Overrides for generated resource names.
:param pulumi.Input[pulumi.InputType['ControllerPodSecurityPolicyArgs']] pod_security_policy: If true, create & use Pod Security Policy resources https://kubernetes.io/docs/concepts/policy/pod-security-policy/
:param pulumi.Input[pulumi.InputType['ControllerRBACArgs']] rbac: Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266
:param pulumi.Input[int] revision_history_limit: Rollback limit.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] tcp: TCP service key:value pairs Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] udp: UDP service key:value pairs Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[IngressControllerArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer
:param str resource_name: The name of the resource.
:param IngressControllerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IngressControllerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
controller: Optional[pulumi.Input[pulumi.InputType['ControllerArgs']]] = None,
default_backend: Optional[pulumi.Input[pulumi.InputType['ControllerDefaultBackendArgs']]] = None,
dh_param: Optional[pulumi.Input[str]] = None,
fullname_override: Optional[pulumi.Input[str]] = None,
helm_options: Optional[pulumi.Input[pulumi.InputType['ReleaseArgs']]] = None,
image_pull_secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.LocalObjectReferenceArgs']]]]] = None,
name_override: Optional[pulumi.Input[str]] = None,
pod_security_policy: Optional[pulumi.Input[pulumi.InputType['ControllerPodSecurityPolicyArgs']]] = None,
rbac: Optional[pulumi.Input[pulumi.InputType['ControllerRBACArgs']]] = None,
revision_history_limit: Optional[pulumi.Input[int]] = None,
service_account: Optional[pulumi.Input[pulumi.InputType['ControllerServiceAccountArgs']]] = None,
tcp: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
udp: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is not None:
raise ValueError('ComponentResource classes do not support opts.id')
else:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IngressControllerArgs.__new__(IngressControllerArgs)
__props__.__dict__["controller"] = controller
__props__.__dict__["default_backend"] = default_backend
__props__.__dict__["dh_param"] = dh_param
__props__.__dict__["fullname_override"] = fullname_override
__props__.__dict__["helm_options"] = helm_options
__props__.__dict__["image_pull_secrets"] = image_pull_secrets
__props__.__dict__["name_override"] = name_override
__props__.__dict__["pod_security_policy"] = pod_security_policy
__props__.__dict__["rbac"] = rbac
__props__.__dict__["revision_history_limit"] = revision_history_limit
__props__.__dict__["service_account"] = service_account
__props__.__dict__["tcp"] = tcp
__props__.__dict__["udp"] = udp
__props__.__dict__["status"] = None
super(IngressController, __self__).__init__(
'kubernetes-ingress-nginx:index:IngressController',
resource_name,
__props__,
opts,
remote=True)
@property
@pulumi.getter
def status(self) -> pulumi.Output['outputs.ReleaseStatus']:
"""
Detailed information about the status of the underlying Helm deployment.
"""
return pulumi.get(self, "status")
``` |
{
"source": "joe-duop/accounts-Abstract_Base_user",
"score": 2
} |
#### File: accounts-Abstract_Base_user/accounts/views.py
```python
from django.shortcuts import render, redirect
from django.contrib.auth.forms import UserCreationForm
from .models import UserProfile
from .forms import CustomUserCreationForm
from django.contrib.auth import authenticate, login
# Create your views here.
def home(request):
return render(request, 'home.html')
def signupuser(request):
if request.method == "GET":
return render(request, 'signupuser.html', {'form':CustomUserCreationForm()})
else:
if request.method == "POST":
form = CustomUserCreationForm(request.POST)
if form.is_valid():
user = form.save()
authenticate(email=form.cleaned_data['email'], password=form.cleaned_data['<PASSWORD>'])
login(request, user)
return redirect('current')
return render(request, 'signupuser.html', {'form':form})
def current(request):
return render(request, 'current.html')
``` |
{
"source": "joedurbak/echelle_simulator_model_creation",
"score": 2
} |
#### File: joedurbak/echelle_simulator_model_creation/PyEchelle.py
```python
import tables
from matplotlib import patches
import matplotlib.mlab as ml
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
import pickle as pickle
import os
from scipy import interpolate
import matplotlib.pyplot as plt
from PIL import Image
import astropy.io.fits as pyfits
from scipy.interpolate import griddata
import pyzdde.arraytrace as at
from collections import Counter
px = []
py = []
for i in range(-50, 51, 1):
for j in range(-50, 51, 1):
px.append(i / 50.)
py.append(j / 50.)
px = np.array(px)
py = np.array(py)
idx = (px ** 2 + py ** 2) < 1
class Transformation(tables.IsDescription):
wavelength = tables.Float32Col()
shear = tables.Float32Col()
rotation = tables.Float32Col()
scale_x = tables.Float32Col()
scale_y = tables.Float32Col()
translation_x = tables.Float32Col()
translation_y = tables.Float32Col()
def save_CCD_info_to_hdf(path, ccd):
h5file = tables.open_file(path, "a")
ccd_group = h5file.create_group(h5file.root, 'CCD', 'CCD information')
ccd_group._v_attrs.Nx = ccd.Nx
ccd_group._v_attrs.Ny = ccd.Ny
ccd_group._v_attrs.pixelsize = ccd.pixelSize
h5file.close()
def save_spectrograph_info_to_hdf(path, spec):
h5file = tables.open_file(path, "w")
spec_group = h5file.create_group(h5file.root, 'Spectrograph', "Spectrograph Information")
spec_group._v_attrs.blaze = spec.blaze
spec_group._v_attrs.gpmm = spec.grmm
spec_group._v_attrs.name = spec.name
h5file.close()
def save_transformation_to_hdf(path, res, fiber_number=1):
h5file = tables.open_file(path, "a")
gr = h5file.create_group(h5file.root, "fiber_" + str(fiber_number))
gr._v_attrs.MatricesPerOrder = res['MatricesPerOrder']
gr._v_attrs.norm_field = res['norm_field']
gr._v_attrs.sampling_input_x = res['sampling_input_x']
gr._v_attrs.field_with = res['field_width']
gr._v_attrs.field_height = res['field_height']
for order, r in res['matrices'].iteritems():
tab = h5file.create_table("/fiber_" + str(fiber_number), 'order' + str(abs(order)), Transformation,
"Affine Transformation", expectedrows=len(r), chunkshape=True)
transf = tab.row
for wl, pars in r.iteritems():
transf['wavelength'] = wl
transf['rotation'] = pars[0]
transf['scale_x'] = pars[1]
transf['scale_y'] = pars[2]
transf['shear'] = pars[3]
transf['translation_x'] = pars[4]
transf['translation_y'] = pars[5]
transf.append()
tab.flush()
h5file.close()
def save_psfs_to_hdf(path, res, fiber_number=1):
h5file = tables.open_file(path, "a")
if not (h5file.__contains__("/fiber_" + str(fiber_number))):
gr = h5file.create_group(h5file.root, "fiber_" + str(fiber_number))
else:
gr = h5file.get_node(h5file.root, "fiber_" + str(fiber_number))
for order, psfs in res.iteritems():
if not (h5file.__contains__("/fiber_" + str(fiber_number) + "/psf_order_" + str(abs(order)))):
gr = h5file.create_group("/fiber_" + str(fiber_number), "psf_order_" + str(abs(order)))
else:
gr = h5file.get_node("/fiber_" + str(fiber_number), "psf_order_" + str(abs(order)))
for wl, data in psfs.iteritems():
if not (
h5file.__contains__(
"/fiber_" + str(fiber_number) + "/psf_order_" + str(order) + "/wavelength_" + str(wl))):
ar = h5file.create_array(gr, "wavelength_" + str(wl), np.array(data[1]))
ar.attrs.wavelength = float(wl)
ar.attrs.order = int(abs(order))
for i, a in enumerate(data[0]._fields):
ar.set_attr(a, data[0][i])
def efficiency(scalingfactor, order, alpha, blaze, wl, n):
bb = np.arcsin(-np.sin(alpha) + order * wl * n * 1E-6)
return scalingfactor * np.sinc(order * (np.cos(alpha) / np.cos(alpha - blaze)) *
(np.cos(blaze) - np.sin(blaze) / np.tan((alpha + bb) / 2.))) ** 2
class Spot(object):
""" Class that describes a spot in a optical design
It basically consists of a DDEArray
"""
def __init__(self, wavelength, order, i, rd_in, rd_out, valid_only=True, circular_pupil=True):
"""
Constructor
:param wavelength: wavelength in microns
:param order: order of diffraction of the echelle grating
:param i: index of spot per order - makes it easier to create the spot_map but is probably redundant
:param rd_in: DdeArray of input rays before raytracing
:param rd_out: DdeArray of traced rays
:param valid_only: if True, only rays within a circular aperture are traced (needed for spot diagrams)
which are not vignetted
:return:
"""
self.wavelength = wavelength
self.order = order
self.i = i
# number of rays
self.Nrays = len(rd_in['z'][1:])
# restrict rays to circular pupil or not
if circular_pupil:
px = rd_in['z'][1:]
py = rd_in['l'][1:]
idx = (px ** 2 + py ** 2) <= 1.
else:
idx = np.ones(self.Nrays)
# restrict rays to non vignetted ones
if valid_only:
vig = rd_out['vigcode'][1:]
err = rd_out['error'][1:]
vig = np.logical_or(vig, err)
index = np.logical_and(vig < 1, idx)
else:
index = idx
self.hx = rd_in['x'][1:][index]
self.hy = rd_in['y'][1:][index]
self.x = rd_out['x'][1:][index]
self.y = rd_out['y'][1:][index]
self.px = rd_in['z'][1:][index]
self.py = rd_in['l'][1:][index]
self.barycenter = None
self.xy_c = None
self.rms = None
self.rms_x = None
self.rms_y = None
self._calc_barycenter()
self._calc_rms_radius()
def _calc_barycenter(self):
"""
calculate the barycenter of the spot
"""
self.barycenter = {'x': np.average(self.x),
'y': np.average(self.y)}
self.xy_c = {'x': self.x - self.barycenter['x'],
'y': self.y - self.barycenter['y']}
def _calc_rms_radius(self):
"""
calculate rms radius of the spot, radially, in x and y direction
"""
self.rms = np.std(np.sqrt(self.xy_c['x'] ** 2 + self.xy_c['y'] ** 2))
self.rms_x = np.std(np.sqrt(self.xy_c['x'] ** 2))
self.rms_y = np.std(np.sqrt(self.xy_c['y'] ** 2))
def EE_radius(self, EE=80., direction='r'):
"""
Calculate encircled energy (EE) radius of the spot
:param EE: encircled energy level in percent
:param direction: direction in which EE is calculated (radial, x and y)
:return:
"""
n = len(self.xy_c['x'])
if direction == 'r':
return np.sort(np.sqrt(self.xy_c['x'] ** 2 + self.xy_c['y'] ** 2))[int(EE / 100. * n)] * 1000.
if direction == 'x':
return np.sort(np.sqrt(self.xy_c['x'] ** 2))[int(EE / 100. * n)] * 1000.
if direction == 'y':
return np.sort(np.sqrt(self.xy_c['y'] ** 2))[int(EE / 100. * n)] * 1000.
def calc_weighted_barycenter(self, path_image=None, xy_c=None, radius=None, f=None, plot=False):
"""
Calculates the barycenter of the spot weighted with an image.
This can be used to calculate the spot barycenter weighted with a fiber far field (FF) illumination pattern.
:param path_image: path to image that contains the weights
:param xy_c: x and y coordinate of the center of the FF for interpolation, default is geometric image center
:param radius: radius on the FF image that corresponds to p=radius, default is half image width
:return: weighted barycenter
"""
if isinstance(path_image, str):
if path_image.lower().endswith('.fit') or path_image.lower().endswith('.fits'):
weight_image = pyfits.open(path_image)[0].data[xy_c['y'] - np.ceil(radius):xy_c['y'] + np.ceil(radius),
xy_c['x'] - np.ceil(radius):xy_c['x'] + np.ceil(radius)]
else:
if xy_c == None:
xy_c = {}
dims = np.shape(np.array(Image.open(path_image).convert('L')))
xy_c['y'] = dims[0] / 2.
xy_c['x'] = dims[1] / 2.
if radius == None:
radius = np.shape(np.array(Image.open(path_image).convert('L')))[0] / 2.
# open image but only select relevant parts
weight_image = np.array(Image.open(path_image).convert('L'))[
xy_c['y'] - np.ceil(radius):xy_c['y'] + np.ceil(radius),
xy_c['x'] - np.ceil(radius):xy_c['x'] + np.ceil(radius)]
# normalized x and y coordinates (correspond to Px and Py in ZEMAX)
xr = yr = np.arange(-1., 1., 1. / radius)
# interpolation function
f = interpolate.RectBivariateSpline(xr, yr, weight_image)
w = f(self.px, self.py, grid=False)
weighted_barycenter = {'x': np.average(self.x, weights=w),
'y': np.average(self.y, weights=w)}
if plot:
plt.figure()
plt.scatter(self.px, self.py, c=w, linewidth=0., marker='o')
plt.show()
return weighted_barycenter
class Order(object):
""" Class that describes an echelle order
"""
def __init__(self, m, blazeWL, minWL, maxWL, minFSRwl, maxFSRwl):
"""
Constructor
:param m: order number
:param blazeWL: blaze wavelength [micron]
:param minWL: minimum wavelength that fits on chip [micron]
:param maxWL: maximum wavelength that fits on chip [micron]
:param minFSRwl: minimum FSR wavelength [micron]
:param maxFSRwl: maximum FSR wavelength [micron]
:return: None
"""
self.m = m
self.blazeWL = blazeWL
self.minWL = minWL
self.maxWL = maxWL
self.minFSRwl = minFSRwl
self.maxFSRwl = maxFSRwl
def inFSR(self, wl):
"""
checks if wavelength lies within FSR or not
:param wl: wavelength [micron]
:return: True/False
"""
return self.maxFSRwl > wl > self.minFSRwl
def inOrder(self, wl):
"""
checks if wavelength lies in order (all chip) or not
:param wl: wavelength [micron]
:return: True/False
"""
return self.maxWL > wl > self.minWL
def info(self):
print('Order ', self.m)
print('FSR wavelength boundaries [microns]', self.minFSRwl, self.maxFSRwl)
print('Chip wavelength boundaries [microns]', self.minWL, self.maxWL)
class CCD(object):
""" CCD class, contains information about CCD detector """
def __init__(self, Nx, Ny, pixelSize, dispersionDirection='x', name=''):
"""
:param Nx: number of pixels in x - direction
:param Ny: number of pixels in y - direction
:param pixelSize: size of one pixel [micron]
:param dispersionDirection: echelle dispersion direction
:param name: name/identifier of the CCD detector
:return:
"""
self.Wx = Nx * pixelSize / 1000.
self.Wy = Ny * pixelSize / 1000.
self.Nx = Nx
self.Ny = Ny
self.pixelSize = pixelSize
self.name = name
self.xi = np.linspace(-Nx * pixelSize / 2000., Nx * pixelSize / 2000., Nx)
self.yi = np.linspace(-Ny * pixelSize / 2000., Ny * pixelSize / 2000., Ny)
self.extent = [-Nx * pixelSize / 2000.,
+Nx * pixelSize / 2000.,
-Ny * pixelSize / 2000.,
+Ny * pixelSize / 2000.]
self.shape = [[-Nx * pixelSize / 2000., -Ny * pixelSize / 2000.],
[Nx * pixelSize / 2000., -Ny * pixelSize / 2000.],
[Nx * pixelSize / 2000., Ny * pixelSize / 2000.],
[-Nx * pixelSize / 2000., Ny * pixelSize / 2000.]
]
self.dispersionDirection = dispersionDirection
class Echelle():
"""
class describing an echelle spectrograph
"""
def __init__(self, ln=None, name=''):
self.name = name
self.savePath = 'PyEchelle_' + self.name
if not os.path.exists(self.savePath):
os.makedirs(self.savePath)
# zemax surface number
# self.ln= pyz.createLink()
if ln is not None:
import pyzdde.zdde as pyz
import pyzdde.arraytrace as at # Module for array ray tracing
self.ln = ln
self.zmx_nsurf = None
# minimal/maximal order
self.minord = None
self.maxord = None
# Blaze angle in degree
self.blaze = None
# gamma angle in degree
self.gamma = None
# groves per mm
self.grmm = None
# current order
self.order = None
self.theta = 0
self.grp = None
self.tracing = []
self.x = []
self.y = []
self.orders = []
self.file = None
self.rays = []
self.wls = []
self.CCD = None
self.Orders = {}
self.spots = []
self.order_configs = {}
self.order_config_wave = {}
def setCCD(self, CCD):
self.CCD = CCD
def saveOrders(self, filename='orders.pkl'):
"""
Save Orders to file
:param filename: filename
:return: None
"""
print('save orders')
pickle.dump(self.Orders, open(self.savePath + '/' + filename, "wb"))
def saveSpectrograph(self, filename=None):
if filename == None:
filename = self.name
spec = {'blaze': self.blaze,
'gamma': self.gamma,
'theta': self.theta,
'order': self.order,
'name': self.name,
'savePath': self.savePath,
'minOrder': self.minord,
'maxOrder': self.maxord,
'grmm': self.grmm,
'grp': self.grp,
}
pickle.dump(spec, open(self.savePath + '/' + filename + '.pkl', "wb"))
def loadSpectrograph(self, filename=None):
if filename == None:
filename = self.name
spec = pickle.load(open(self.savePath + '/' + filename + '.pkl'))
self.blaze = spec['blaze']
self.gamma = spec['gamma']
self.theta = spec['theta']
self.order = spec['order']
self.minord = spec['minOrder']
self.maxord = spec['maxOrder']
self.grmm = spec['grmm']
self.grp = spec['grp']
def loadOrders(self, filename='orders.pkl'):
"""
Load Orders from file
:param filename: filename
:return:
"""
self.Orders = pickle.load(open(self.savePath + '/' + filename))
def analyseZemaxFile(self, echellename='Echelle', thetaname='theta', blazename='blaze', gammaname='gamma'):
"""
Analyses ZEMAX files and extract important parameters to specify Echelle Spectrograph.
Looks for names in comment column of ZEMAX to detect specific surfaces.
:param echellename: ZEMAX surface name of Echelle grating
:param thetaname: ZEMAX surface name of theta angle
:param blazename: ZEMAX surface name of blaze angle
:param gammaname: ZEMAX surface name of gamma angle
:return:
"""
for i in range(self.ln.zGetNumSurf()):
comm = self.ln.zGetComment(i)
if comm == echellename:
print('Echelle found ----------------------------')
self.zmx_nsurf = i
self.echelle_surface = i
# grooves per mm
self.grmm = self.ln.zGetSurfaceParameter(i, 1) * 1000.
# current order
self.order = int(self.ln.zGetSurfaceParameter(i, 2))
print('Grooves per mm', self.grmm)
print('Current order', self.order)
print('Surface number', self.zmx_nsurf)
elif comm == thetaname:
print('Theta found ------------------------------')
self.theta = float(self.ln.zGetSurfaceParameter(i, 4))
print('theta', self.theta)
elif comm == blazename:
print('blaze found ------------------------------')
b1 = abs(float(self.ln.zGetSurfaceParameter(i, 3)))
b2 = abs(float(self.ln.zGetSurfaceParameter(i, 4)))
b3 = abs(float(self.ln.zGetSurfaceParameter(i, 5)))
self.blaze = max((b1, b2, b3))
print('blaze', self.blaze)
elif comm == gammaname:
print('gamma found ------------------------------')
b1 = abs(float(self.ln.zGetSurfaceParameter(i, 3)))
b2 = abs(float(self.ln.zGetSurfaceParameter(i, 4)))
self.gamma = max((b1, b2))
print('gamma', self.gamma)
def trace(self, wave=1, hx=0, hy=0, N=101, intensity=1.):
self.ln.zGetUpdate()
self.ln.zPushLens()
Nx = Ny = int(np.sqrt(N))
rd_in = at.getRayDataArray(Nx * Ny, tType=0, mode=0)
rd_out = at.getRayDataArray(Nx * Ny, tType=0, mode=0)
k = 0
for i in np.linspace(-1., 1., Nx):
for j in np.linspace(-1., 1., Ny):
k += 1
rd_out[k].x = hx
rd_out[k].y = hy
rd_out[k].z = i # px
rd_out[k].l = j # py
rd_out[k].intensity = intensity
rd_out[k].wave = wave
rd_in[k].x = hx
rd_in[k].y = hy
rd_in[k].z = i # px
rd_in[k].l = j # py
rd_in[k].intensity = intensity
rd_in[k].wave = wave
ret = at.zArrayTrace(rd_out, timeout=5000)
return np.array(rd_in, dtype=at.DdeArrayData), np.array(rd_out, dtype=at.DdeArrayData)
def trace_rays(self, wave, FIELD):
self.ln.zGetUpdate()
self.ln.zPushLens()
numRays = 10201
rd = at.getRayDataArray(numRays, tType=0, mode=0)
# Fill the rest of the ray data array
k = 0
for i in range(-50, 51, 1):
for j in range(-50, 51, 1):
k += 1
rd[k].y = FIELD
rd[k].z = i / 50. # px
rd[k].l = j / 50. # py
rd[k].intensity = 1.0
rd[k].wave = wave
# Trace the rays
ret = at.zArrayTrace(rd, timeout=5000)
return rd
def order_to_config(self, order):
return self.order_configs[order]
def clear_configs(self):
c, cc, rc = self.ln.zGetConfig()
for i in range(cc):
self.ln.zDeleteConfig(1)
self.ln.zPushLens()
for i in range(rc):
self.ln.zDeleteMCO(1)
def clear_merit_function(self):
while (self.ln.zDeleteMFO(1) > 1):
self.ln.zDeleteMFO(1)
def set_config_and_wavelength(self, wavelength_per_order=7):
self.clear_configs()
self.ln.zSetMulticon(0, 1, 'PAR2', self.echelle_surface, 0, 0)
self.ln.zInsertMCO(2)
self.ln.zSetMulticon(0, 2, 'WAVE', 0, 0, 0)
self.order_configs = {}
for i, o in enumerate(self.Orders):
self.ln.zInsertConfig(i + 1)
self.ln.zSetMulticon(i + 1, 1, self.Orders[o].m, 0, 0, 0, 1, 0)
self.ln.zSetMulticon(i + 1, 2, self.Orders[o].blazeWL, 0, 0, 0, 1, 0)
self.order_configs[o] = i + 1
# self.ln.zInsertMFO(i + 1)
# self.ln.zSetOperandRow(i + 1, 'CONF', i+1)
c, cc, rc = self.ln.zGetConfig()
self.ln.zDeleteConfig(cc)
self.ln.zPushLens()
def clear_wavelength(self):
n = self.ln.zGetNumWave()
def set_config_and_wavelength_from_list(self, orders, wavelength, posx, posy):
self.clear_configs()
self.clear_merit_function()
self.ln.zSetMulticon(0, 1, 'PAR2', self.echelle_surface, 0, 0)
# add unique orders to multi config file
unique_orders = np.unique(np.array(orders))
self.order_configs = dict(zip(unique_orders, range(len(unique_orders))))
for i, o in enumerate(unique_orders):
self.ln.zInsertConfig(i + 1)
self.ln.zSetMulticon(i + 1, 1, o, 0, 0, 0, 1, 0)
self.order_configs[o] = i + 1
self.ln.zPushLens()
c, cc, rc = self.ln.zGetConfig()
self.ln.zDeleteConfig(cc)
self.ln.zPushLens()
# # add as many rows needed for the order with the most wavelength
n_wavelength = Counter(orders).most_common(1)[0][1]
self.ln.zSetWave(0, 1, n_wavelength)
self.ln.zGetUpdate()
#
for n in range(n_wavelength):
self.ln.zInsertMCO(n + 2)
self.ln.zSetPrimaryWave(n + 1)
self.ln.zSetMulticon(0, n + 2, 'WAVE', n + 1, n + 1, n + 1)
for i in unique_orders:
self.ln.zSetMulticon(self.order_to_config(i), n + 2, self.Orders[i].blazeWL, 0, 0, 0, 1, 0)
row_count = {}
for uo in unique_orders:
row_count[uo] = 2
for o, wl, px, py in zip(orders, wavelength, posx, posy):
config = self.order_to_config(o)
rc = row_count[o]
self.ln.zSetMulticon(config, rc, wl, 0, 0, 0, 1, 0)
self.set_merit_function(o, rc - 1, px, py)
row_count[o] += 1
self.ln.zPushLens()
def set_merit_function(self, order, wave, posx, posy, clear=False):
if clear:
self.clear_merit_function()
n = self.ln.zGetNumSurf()
cf = self.order_to_config(order)
self.ln.zInsertMFO(1)
self.ln.zSetOperandRow(1, 'REAY', n, wave, 0, 0, 0, 0, tgt=posy)
self.ln.zInsertMFO(1)
self.ln.zSetOperandRow(1, 'REAX', n, wave, 0, 0, 0, 0, tgt=posx)
self.ln.zInsertMFO(1)
self.ln.zSetOperandRow(1, 'CONF', cf)
self.ln.zPushLens()
def read_merit_function_position_difference(self, n):
REAX = []
REAY = []
dx = []
dy = []
for i in range(n):
data = self.ln.zGetOperandRow(i)
if data[0] == 'REAX':
dx.append((data[9] - data[11]) * data[10])
REAX.append(data[11])
if data[0] == 'REAY':
dy.append((data[9] - data[11]) * data[10])
REAY.append(data[11])
print("Median deviation XY: ", np.median(np.array(dx)) * 1000., np.median(np.array(dy)) * 1000.)
plt.figure()
plt.plot()
plt.axis('equal')
for x, y, dxx, dyy in zip(REAX, REAY, dx, dy):
plt.scatter(x, y)
plt.arrow(x, y, dxx * 100, dyy * 100)
plt.show()
def do_spectral_format(self, nPerOrder=7, FSRonly=True, hx=0, hy=0):
s = []
for o in list(self.Orders.values()):
print('Trace order', o.m)
self.ln.zSetSurfaceParameter(self.zmx_nsurf, 2, o.m)
# self.ln.zPushLens()
if FSRonly:
wl = np.linspace(o.minFSRwl, o.maxFSRwl, nPerOrder)
else:
wl = np.linspace(o.minWL, o.maxWL, nPerOrder)
for i, w in enumerate(wl):
self.ln.zSetWave(1, w, 1.)
self.ln.zGetUpdate()
# self.ln.zPushLens()
rayTraceData = self.ln.zGetTrace(1, 0, -1, hx, hy, 0, 0)
error, vig, x, y, z, l, m, n, l2, m2, n2, intensity = rayTraceData
s.append([o.m, w, x, y])
return s
def get_psfs(self, nPerOrder=1, fieldnumber=3, fieldposition=[0., 0.]):
psfs = {}
old_field = self.ln.zGetField(fieldnumber)
# self.ln.zSetField(fieldnumber, fieldposition[0], fieldposition[1])
for o in list(self.Orders.values()):
print('Trace order', o.m)
self.ln.zSetSurfaceParameter(self.zmx_nsurf, 2, o.m)
wl = np.linspace(o.minWL, o.maxWL, nPerOrder)
psfs[o.m] = {}
for w in wl:
self.ln.zSetWave(1, w, 1.)
psf = self.ln.zGetPSF(which='huygens')
print(psf)
psfs[o.m][w] = psf
# restore field
self.ln.zSetField(fieldnumber, old_field.xf, old_field.yf)
return psfs
def do_affine_transformation_calculation(self, nPerOrder=10,
norm_field=[[-1, 1], [-1, -1], [1, -1], [1, 1], [0, 0]], fw=None, fh=None):
"""
Calculates Affine Matrices that describe spectrograph
The spectrograph can be described by affine transformations from the input slit to the focal plane.
an affine transofmration can be described by a 3x3 matrix.
this function calculates the 3x3 matrix per wavelength and order that matches the input slit to the focal plane
:param nPerOrder: number of wavelength steps across one order
:param norm_field: corner points and center point in normalized coordinates
:param fw: fiber/slit width [microns]
:param fh: fiber/slit height [microns]
:return:
"""
from skimage import transform as tf
sampling_input_x = int(fw)
res = {'MatricesPerOrder': nPerOrder,
'norm_field': norm_field,
'sampling_input_x': sampling_input_x}
# find field dimensions in ZEMAX
field_info = self.ln.zGetField(0)
# TODO: raise error
if field_info[0] is not 1:
print('Field coordinates have the wrong format')
zmx_fields = []
for ii in range(1, field_info[1] + 1):
field = self.ln.zGetField(ii)
zmx_fields.append([field[0], field[1]])
zmx_fields = np.array(zmx_fields)
norm_field = np.array(norm_field)
if fw is None:
fw = (np.max(zmx_fields[:, 0]) - np.min(zmx_fields[:, 0])) * 1000. # slit width in microns
fw *= (np.max(norm_field[:, 0]) - np.min(norm_field[:, 0])) / 2.
if fh is None:
fh = (np.max(zmx_fields[:, 1]) - np.min(zmx_fields[:, 1])) * 1000. # slit height in microns
fh *= (np.max(norm_field[:, 1]) - np.min(norm_field[:, 1]))
print('Field width: ' + str(fw))
print('Field height: ' + str(fh))
res['field_width'] = fw
res['field_height'] = fh
sampling_x = sampling_input_x
sampling_y = sampling_input_x * fh / fw
src = np.array(norm_field, dtype=float)
src[:, 0] -= np.min(src[:, 0])
src[:, 1] -= np.min(src[:, 1])
src[:, 0] /= np.max(src[:, 0])
src[:, 1] /= np.max(src[:, 1])
# src[:, 0] *= sampling_x
# src[:, 1] *= sampling_y
ppp = []
dst_x = []
dst_y = []
orders = []
wavelength = []
for o in list(self.Orders.values()):
print('trace order ' + str(o.m))
wl = np.linspace(o.minWL, o.maxWL, nPerOrder)
self.ln.zSetSurfaceParameter(self.zmx_nsurf, 2, o.m) # TODO: replace with ln.zSetConfig(config_num)
# print(wl, o.m)
for w in wl:
self.ln.zSetWave(1, w, 1.)
self.ln.zGetUpdate()
for f in norm_field:
rayTraceData = self.ln.zGetTrace(1, 0, -1, f[0], f[1], 0, 0)
error, vig, x, y, z, l, m, n, l2, m2, n2, intensity = rayTraceData
dst_x.append(x)
dst_y.append(y)
orders.append(o.m)
wavelength.append(w)
# plt.figure()
# plt.scatter(dst_x, dst_y)
# plt.show()
# ppp.append(np.array(self.do_spectral_format(nPerOrder=nPerOrder, FSRonly=False, hx=f[0], hy=f[1])))
# ppp = np.array(ppp)
dst_x = np.array(dst_x)
dst_y = np.array(dst_y)
dst = np.vstack((dst_x, dst_y))
dst /= ((self.CCD.pixelSize) / 1000.)
dst += self.CCD.Nx / 2
dst = dst.reshape(2, len(dst[0]) / len(norm_field), len(norm_field)).transpose((1, 2, 0))
orders = np.array(orders)
wavelength = np.array(wavelength)
orders = orders.reshape((len(orders) / len(norm_field), len(norm_field)))
wavelength = wavelength.reshape((len(wavelength) / len(norm_field), len(norm_field)))
affine_matrices = {}
transformations = {}
for order, wavel, p in zip(orders, wavelength, dst):
params = tf.estimate_transform('affine', src, p)
if affine_matrices.has_key(order[0]):
affine_matrices[order[0]].update({wavel[0]: np.array(
[params.rotation, params.scale[0], params.scale[1], params.shear, params.translation[0],
params.translation[1]])})
else:
affine_matrices[order[0]] = {wavel[0]: np.array(
[params.rotation, params.scale[0], params.scale[1], params.shear, params.translation[0],
params.translation[1]])}
res['matrices'] = affine_matrices
return res
def walk_trough_configs(self, nWl=7, nPerSpot=5001, hx=0., hy=0.):
actC, nC, operandC = self.ln.zGetConfig()
for i in range(1, nC + 1):
for j in range(1, nWl + 1):
self.ln.zSetConfig(i)
wl = self.ln.zGetWave(j).wavelength
print(wl)
rd_in, rd_out = self.trace(j, hx=hx, hy=hy, N=nPerSpot)
o = self.ln.zGetSurfaceParameter(self.zmx_nsurf, 2)
self.spots.append(Spot(wl, o, i - 1, rd_in, rd_out))
def do_spots(self, nPerOrder=5, nOrders=5, FSRonly=True, nPerSpot=5001, hx=0, hy=0, everyNthOrder=5):
n = everyNthOrder
for o in list(self.Orders.values()):
if n < everyNthOrder:
n += 1
else:
print('Trace order', o.m)
self.ln.zSetSurfaceParameter(self.zmx_nsurf, 2, o.m)
if FSRonly:
wl = np.linspace(o.minFSRwl, o.maxFSRwl, nPerOrder)
else:
wl = np.linspace(o.minWL, o.maxWL, nPerOrder)
for i, w in enumerate(wl):
self.ln.zSetWave(1, w, 1.)
rd_in, rd_out = self.trace(hx=hx, hy=hy, N=nPerSpot)
self.spots.append(Spot(w, o.m, i, rd_in, rd_out))
n -= everyNthOrder
def do_spot_diagrams(self, order='all', nPerOrder=5, field=0):
if order == 'all':
for o in self.tracing:
if o[0] <= self.maxord and o[0] >= self.minord:
print(("Trace order...", o[0]))
self.ln.zSetSurfaceParameter(self.zmx_nsurf, 2, o[0])
wl = np.linspace(o[1], o[2], nPerOrder)
for i, w in enumerate(wl):
self.ln.zSetWave(1, w, 1.)
asdf = self.trace_rays(1, field)
a = np.array(asdf, dtype=at.DdeArrayData)
wl = self.ln.zGetWave(self.ln.zGetPrimaryWave()).wavelength
vig = a['vigcode'][1:]
err = a['error'][1:]
vig = np.logical_and(vig, err)
index = np.logical_and(vig < 1, idx)
if np.max(index) > 0:
self.rays.append([a['x'][index], a['y'][index]])
self.wls.append(wl)
def saveSpots(self, filename='spots.pkl'):
print('save spots')
pickle.dump(self.spots, open(self.savePath + filename, "wb"))
def loadSpots(self, filename='spots.pkl'):
self.spots = pickle.load(open(self.savePath + filename))
def do_tracing(self, order='all', n=1000):
if order == 'all':
for o in self.tracing:
print(("Trace order...", o[0]))
self.ln.zSetSurfaceParameter(self.zmx_nsurf, 2, o[0])
array = self.file.create_array(self.file.root, 'order' + str(o[0]), atom=np.array([3.]),
shape=(2 * 4 * n,))
wlarray = self.file.create_array(self.file.root, 'wl_order' + str(o[0]), atom=np.array([3.]),
shape=(n,))
wl = np.linspace(o[1], o[2], n)
for i, w in enumerate(wl):
self.ln.zSetWave(1, w, 1.)
xy = self.ln.zGetTrace(1, 0, -1, -1, -1, 0, 0)
array[i * 4 * 2] = xy[2]
array[i * 4 * 2 + 1] = xy[3]
xy = self.ln.zGetTrace(1, 0, -1, 1, -1, 0, 0)
array[i * 4 * 2 + 2] = xy[2]
array[i * 4 * 2 + 3] = xy[3]
xy = self.ln.zGetTrace(1, 0, -1, 1, 1, 0, 0)
array[i * 4 * 2 + 4] = xy[2]
array[i * 4 * 2 + 5] = xy[3]
xy = self.ln.zGetTrace(1, 0, -1, -1, 1, 0, 0)
array[i * 4 * 2 + 6] = xy[2]
array[i * 4 * 2 + 7] = xy[3]
wlarray[i] = w
self.file.flush()
self.file.close()
else:
self.ln.zSetSurfaceParameter(self.zmx_nsurf, 2, self.tracing[0][0])
array = self.file.create_array(self.file.root, 'order' + str(self.tracing[0][0]), atom=np.array([3.]),
shape=(2 * 4 * n,))
wl = np.linspace(self.tracing[0][1], self.tracing[0][2], n)
for i, w in enumerate(wl):
self.ln.zSetWave(1, w, 1.)
xy = self.ln.zGetTrace(1, 0, -1, -1, -1, 0, 0)
array[i * 4 * 2] = xy[2]
array[i * 4 * 2 + 1] = xy[3]
xy = self.ln.zGetTrace(1, 0, -1, 1, -1, 0, 0)
array[i * 4 * 2 + 2] = xy[2]
array[i * 4 * 2 + 3] = xy[3]
xy = self.ln.zGetTrace(1, 0, -1, 1, 1, 0, 0)
array[i * 4 * 2 + 4] = xy[2]
array[i * 4 * 2 + 5] = xy[3]
xy = self.ln.zGetTrace(1, 0, -1, -1, 1, 0, 0)
array[i * 4 * 2 + 6] = xy[2]
array[i * 4 * 2 + 7] = xy[3]
self.file.close()
def setFile(self, name='MaroonXblue.h5', mode='w'):
self.file = tables.open_file(name, mode=mode)
def wavelength_to_order(self, wl):
"""
Returns the order in which the wavelength appears.
Returns empty list if wavelength is outside the spectral range.
Returns a list of tuples, with the order number and a string indicating whether it is within FSR or not.
:param wl: wavelength [micron]
:return: list of tuples (order number, 'FSR'/'CCD')
"""
res = []
for o in list(self.Orders.values()):
if o.inFSR(wl):
res.append((o.m, 'FSR'))
elif o.inOrder(wl):
res.append((o.m, 'CCD'))
return res
def calc_wl(self):
print('Calc wavelength')
def find_lmin(order, dwl=0.0001):
wl = self.ln.zGetWave(1)[0]
vig = False
wlmin = wl
while vig < 1:
wl = wl - dwl
self.ln.zSetWave(1, wl, 1.)
xy = self.ln.zGetTrace(1, 0, -1, 0, 0, 0, 0)
vig = np.logical_or(xy[1], xy[0])
else:
print('vignetting at surface ', xy[1], self.order, wl)
wlmin = wl
xmin = xy[2]
ymin = xy[3]
self.x.append(xmin)
self.y.append(ymin)
return wlmin, xmin, ymin
def find_lmax(order, dwl=0.0001):
wl = self.ln.zGetWave(1)[0]
vig = False
wlmin = wl
while vig < 1:
wl = wl + dwl
self.ln.zSetWave(1, wl, 1.)
xy = self.ln.zGetTrace(1, 0, -1, 0, 0, 0, 0)
vig = np.logical_or(xy[1], xy[0])
else:
print('vignetting at surface ', xy[1], self.order, wl)
wlmin = wl
xmin = xy[2]
ymin = xy[3]
self.x.append(xmin)
self.y.append(ymin)
return wlmin, xmin, ymin
gamma_rad = np.deg2rad(self.gamma)
blaze_rad = np.deg2rad(self.blaze)
theta_rad = np.deg2rad(self.theta)
self.grp = 1000. / self.grmm
alpha = blaze_rad + theta_rad
beta = blaze_rad - theta_rad
c0 = self.grp * np.cos(gamma_rad)
c1 = c0 * (np.sin(alpha) + np.sin(beta))
c2 = c0 * np.cos(beta)
c3 = self.grp * np.cos(blaze_rad) * (1. - np.tan(self.theta) * np.tan(blaze_rad))
print(self.order + 1, c1 / (self.order + 1))
self.ln.zSetWave(0, 1, 1)
self.ln.zPushLens()
vig = False
# find max order
o_working = self.order
print('find max order --------------------')
while vig < 1 and abs(self.order) < abs(self.maxord):
if self.order > 0:
self.order += 1
else:
self.order -= 1
blazeWL = abs(c1 / self.order)
print('Order: ', self.order, 'Blaze wl: ', blazeWL)
self.ln.zSetSurfaceParameter(self.zmx_nsurf, 2, self.order)
self.ln.zSetWave(1, blazeWL, 1.)
self.ln.zGetUpdate()
self.ln.zPushLens()
xy = self.ln.zGetTrace(1, 0, -1, 0, 0, 0, 0)
vig = np.logical_or(xy[1], xy[0])
if vig < 1:
self.x.append(xy[2])
self.y.append(xy[3])
self.orders.append(self.order)
self.ln.zSetWave(1, blazeWL, 1.)
self.ln.zPushLens()
wmax = find_lmax(self.order)[0]
self.ln.zSetWave(1, blazeWL, 1.)
self.ln.zPushLens()
wmin = find_lmin(self.order)[0]
print("Order added ", self.order, wmin, wmax, blazeWL)
self.Orders[self.order] = Order(self.order, blazeWL, wmin, wmax,
blazeWL - blazeWL / self.order / 2.,
blazeWL + blazeWL / self.order / 2.)
# find min order
vig = False
self.order = o_working + 1
print('find min order')
while vig < 1 and abs(self.order) > abs(self.minord):
print('test order', self.order, self.minord)
if self.order > 0:
self.order -= 1
else:
self.order += 1
blazeWL = abs(c1 / self.order)
self.ln.zSetSurfaceParameter(self.zmx_nsurf, 2, self.order)
self.ln.zSetWave(1, blazeWL, 1.)
self.ln.zPushLens()
xy = self.ln.zGetTrace(1, 0, -1, 0, 0, 0, 0)
vig = np.logical_or(xy[1], xy[0])
if vig < 1:
print('ok')
self.orders.append(self.order)
self.x.append(xy[2])
self.y.append(xy[3])
self.ln.zSetWave(1, blazeWL, 1.)
self.ln.zPushLens()
wmax = find_lmax(self.order)[0]
self.ln.zSetWave(1, blazeWL, 1.)
self.ln.zPushLens()
wmin = find_lmin(self.order)[0]
self.Orders[self.order] = Order(self.order, blazeWL, wmin, wmax,
blazeWL - blazeWL / self.order / 2.,
blazeWL + blazeWL / self.order / 2.)
def spots_on_CCD(self):
plt.figure()
for s in self.spots:
plt.scatter(s.x, s.y)
plt.show()
def EE_map(self, direction='r', plotSpots=True, zoom=150, save='', vmax=15., vmin=0., hx=0, hy=0, showplot=False,
EE_ratio=80., additional_spots=[]):
"""
generates encircled energy map from traced spots.
:param direction: 'r', 'x' or 'y'
:param plotSpots: plots spot diagramms as an overlay
:param zoom: zoom of the individual spot diagrams
:return:
"""
print('EE map')
fig, ax = plt.subplots()
X = []
Y = []
R = []
for s in self.spots:
if np.mean(s.hx) == hx:
if np.mean(s.hy) == hy:
X.append(s.barycenter['x'])
Y.append(s.barycenter['y'])
R.append(s.EE_radius(direction=direction, EE=EE_ratio))
if plotSpots:
if np.mean(s.hx) == hx:
if np.mean(s.hy) == hy:
ax.scatter(s.barycenter['x'] + zoom * s.xy_c['x'], -s.barycenter['y'] + zoom * s.xy_c['y'],
s=.2, facecolor='black', lw=0)
X = np.array(X)
Y = np.array(Y)
R = np.array(R)
xi = np.linspace(-self.CCD.Wx / 2., self.CCD.Wx / 2., 101)
yi = np.linspace(-self.CCD.Wy / 2., self.CCD.Wy / 2., 101)
zi = griddata((X, Y), R, (xi[None, :], yi[:, None]), method='linear')
ax.set_xlim((np.min(xi), np.max(xi)))
ax.set_ylim((np.min(yi), np.max(yi)))
ax.set_xlabel('Detector x [mm]')
ax.set_ylabel('Detector y [mm]')
im = ax.imshow(zi, interpolation='nearest', extent=[np.min(xi), np.max(xi), np.min(yi), np.max(yi)], vmin=vmin,
vmax=vmax)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cb = plt.colorbar(im, cax=cax)
cb.set_label('EE' + str(EE_ratio) + ' radius [micron]')
# for s in additional_spots:
# ax.plot(additional_spots[:,0], additional_spots[:,1], 'w+')
plt.tight_layout()
if not save == '':
plt.savefig(save, dpi=300)
if showplot:
plt.show()
def spot_map(self):
n_spots = len(self.spots)
orders = []
for s in self.spots:
orders.append(s.order)
unique_orders = np.unique(np.array(orders))
n_orders = len(unique_orders)
print('number of spots:', n_spots)
print('number of orders:', n_orders)
print('spot per order', n_spots / n_orders)
fig, axarr = plt.subplots(n_orders, n_spots / n_orders, figsize=(n_spots / n_orders, n_orders + 1))
for s in self.spots:
j = np.where(unique_orders == s.order)[0][0]
axarr[j, s.i].scatter(s.x, s.y, s=1, alpha=0.05, facecolor='blue', lw=0)
axarr[j, s.i].set_xticklabels([])
axarr[j, s.i].set_yticklabels([])
axarr[j, s.i].axis('equal')
axarr[j, s.i].xaxis.set_visible(False)
axarr[j, s.i].yaxis.set_visible(False)
axarr[j, s.i].axis('off')
axarr[j, s.i].annotate('{:.4f}'.format(s.wavelength), xy=(
s.barycenter['x'] - self.CCD.pixelSize / 2000., s.barycenter['y'] + self.CCD.pixelSize / 2000.),
fontsize=8)
axarr[j, s.i].add_patch(patches.Polygon(
[[s.barycenter['x'] - self.CCD.pixelSize / 1000., s.barycenter['y'] + self.CCD.pixelSize / 1000.],
[s.barycenter['x'] + self.CCD.pixelSize / 1000., s.barycenter['y'] + self.CCD.pixelSize / 1000.],
[s.barycenter['x'] + self.CCD.pixelSize / 1000., s.barycenter['y'] - self.CCD.pixelSize / 1000.],
[s.barycenter['x'] - self.CCD.pixelSize / 1000., s.barycenter['y'] - self.CCD.pixelSize / 1000.]],
fill=False))
axarr[j, s.i].set_xlim((s.barycenter['x'] - self.CCD.pixelSize / 2000. * 1.15,
s.barycenter['x'] + self.CCD.pixelSize / 2000. * 1.15))
axarr[j, s.i].set_ylim((s.barycenter['y'] - self.CCD.pixelSize / 2000. * 1.15,
s.barycenter['y'] + self.CCD.pixelSize / 2000. * 1.15))
plt.show()
def differential_FF_effects(self, path_image1, path_image2, xy1=None, xy2=None, r1=None, r2=None, k=1, plot=False,
saveplot=False):
if path_image1.lower().endswith('.fit') or path_image1.lower().endswith('.fits'):
weight_image1 = pyfits.open(path_image1)[0].data
else:
weight_image1 = np.array(Image.open(path_image1).convert('L'))
if xy1 is None:
xy1 = {}
dims = np.shape(weight_image1)
xy1['y'] = dims[0] / 2.
xy1['x'] = dims[1] / 2.
if r1 is None:
r1 = np.shape(weight_image1)[0] / 2.
weight_image1 = weight_image1[xy1['y'] - np.ceil(r1):xy1['y'] + np.ceil(r1),
xy1['x'] - np.ceil(r1):xy1['x'] + np.ceil(r1)]
# normalized x and y coordinates (correspond to Px and Py in ZEMAX)
xr = yr = np.arange(-1., 1., 1. / r1)
# interpolation function
f1 = interpolate.RectBivariateSpline(xr, yr, weight_image1)
if path_image2.lower().endswith('.fit') or path_image2.lower().endswith('.fits'):
weight_image2 = pyfits.open(path_image2)[0].data
else:
weight_image2 = np.array(Image.open(path_image2).convert('L'))
if xy2 is None:
xy2 = {}
dims = np.shape(weight_image2)
xy2['y'] = dims[0] / 2.
xy2['x'] = dims[1] / 2.
if r2 is None:
r2 = np.shape(weight_image2)[0] / 2.
weight_image2 = weight_image2[xy2['y'] - np.ceil(r2):xy2['y'] + np.ceil(r2),
xy2['x'] - np.ceil(r2):xy2['x'] + np.ceil(r2)]
# normalized x and y coordinates (correspond to Px and Py in ZEMAX)
xr2 = yr2 = np.arange(-1., 1., 1. / r2)
# interpolation function
f2 = interpolate.RectBivariateSpline(xr2, yr2, weight_image2)
if plot or saveplot:
fig = plt.figure()
ax = fig.add_subplot(111)
xMean = []
yMean = []
diffxMean = []
diffyMean = []
for s in self.spots:
b1 = s.calc_weighted_barycenter(f=f1)
xMean.append(b1['x'])
yMean.append(b1['y'])
b2 = s.calc_weighted_barycenter(f=f2)
diff = {'x': (b1['x'] - b2['x']) * 1000000. * 0.106,
'y': (b1['y'] - b2['y']) * 1000000. * 0.106}
diffxMean.append(diff['x'])
diffyMean.append(diff['y'])
if plot or saveplot:
# plt.scatter(b1['x'], b1['y'])
arr = patches.FancyArrow(b1['x'], b1['y'], k * diff['x'], k * diff['y'], head_width=0.25, width=0.05,
fc='k', aa=True)
ax.add_patch(arr)
# plt.scatter(b2['x'], b2['y'])
xMean = np.array(xMean)
yMean = np.array(yMean)
diffxMean = np.array(diffxMean)
diffyMean = np.array(diffyMean)
if plot or saveplot:
if self.CCD.dispersionDirection == 'x':
zi = ml.griddata(xMean, yMean, diffyMean, self.CCD.xi, self.CCD.yi, interp='linear')
else:
zi = ml.griddata(xMean, yMean, diffxMean, self.CCD.xi, self.CCD.yi, interp='linear')
img = ax.imshow(zi, interpolation='none', extent=self.CCD.extent, aspect='equal', origin='lower')
cbar = fig.colorbar(img)
cbar.ax.set_ylabel('shift in dispersion direction [m/s]')
if plot:
plt.show()
if saveplot:
plt.savefig(path_image2[:-3] + '_FF_rv.png')
return diffxMean, diffyMean
def calc_echellogram(self, nPerOrder=15, FSRonly=False):
s00 = np.array(self.do_spectral_format(nPerOrder=nPerOrder, FSRonly=FSRonly))
plt.figure()
plt.plot(s00[:, 2], s00[:, 3], 'g+')
plt.show()
if __name__ == "__main__":
pass
```
#### File: joedurbak/echelle_simulator_model_creation/rimas.py
```python
import os
import io
import re
import collections as co
import tables
import numpy as np
import pandas as pd
import pyzdde.zdde as pyz
import PyEchelle
class Spectrograph:
def __init__(self, blaze, grmm, name):
self.blaze = blaze
self.grmm = grmm
self.name = name
def wavelength_to_xy_config(lad, ind, band='yj'):
"""
convert wavelength to X-Y position
Input: wavelength in microns, index (1 less than
desired configuration number)
Returns X and Y position on the detector in mm for + and - sides of slit
"""
if band.lower() == 'yj':
band_string = ""
elif band.lower() == "hk":
band_string = "_HK"
Xp = np.loadtxt('XY_eq{}/X_plus.dat') # second order fit parameters for x+
Xm = np.loadtxt('XY_eq{}/X_minus.dat')
Yp = np.loadtxt('XY_eq{}/Y_plus.dat')
Ym = np.loadtxt('XY_eq{}/Y_minus.dat')
# ind = config - 1 #for 0-based indexing
# Apply fit parameters for that order
pxp = np.poly1d(Xp[ind])
pxm = np.poly1d(Xm[ind])
pyp = np.poly1d(Yp[ind])
pym = np.poly1d(Ym[ind])
Xla = [pxp(lad), pxm(lad)] # X coordinates of + and- slit
Yla = [pyp(lad), pym(lad)] # Y coordinates of + and- slit
return Xla, Yla
def get_psf_txt_lines(config, wave, band='YJ', use_centroid=False, base_path='.'):
if use_centroid:
centroid_prefix = ""
else:
centroid_prefix = "no_"
psf_path = os.path.join(
base_path, "PSF_{}".format(band), "{}use_centroid".format(centroid_prefix),
"config_{}_wave_{}.txt".format(config, wave)
)
with io.open(psf_path, mode='r', encoding='utf-16') as f:
psf_txt = f.readlines()
f.close()
return psf_txt
def parse_psf_txt_lines(psf_txt_lines):
def parse_wavelength():
# 0.9194 µm at 0.0000, 0.0000 (deg).
return np.double(psf_txt_lines[8].split(" ")[0].strip())
def parse_data_spacing():
# Data spacing is 0.422 µm.
data_line = psf_txt_lines[9]
split_txt = data_line.split(" is ")[1]
spacing = split_txt.split(" ")[0]
return np.double(spacing)
def parse_pupil_grid():
# Pupil grid size: 32 by 32
data_line = psf_txt_lines[12]
split_txt = data_line.split(": ")[1]
x, y = split_txt.split(" by ")
return np.int(x), np.int(y)
def parse_image_grid():
# Image grid size: 32 by 32
data_line = psf_txt_lines[13]
split_txt = data_line.split(": ")[1]
x, y = split_txt.split(" by ")
return np.int(x), np.int(y)
def parse_center_point():
# Center point is: 17, 17
data_line = psf_txt_lines[14]
split_txt = data_line.split(": ")[1]
x, y = split_txt.split(", ")
return np.int(x), np.int(y)
def parse_center_coords():
# Center coordinates : 6.20652974E+00, -4.18352207E-01 Millimeters
data_line = psf_txt_lines[15]
split_txt = data_line.split(": ")[1]
x, y = split_txt.split(", ")
x = x.strip()
y = y.strip()
y = y.split(" ")[0]
return np.double(x), np.double(y)
def parse_data_area():
# Data area is 13.517 by 13.517 µm.
data_line = psf_txt_lines[10]
split_txt = data_line.split("is ")[1]
x, y = split_txt.split(" by ")
return np.double(x.strip())
def parse_headers():
header = {
"wavelength": parse_wavelength(),
"dataSpacing": parse_data_spacing(),
"pupilGridX": parse_pupil_grid()[0],
"pupilGridY": parse_pupil_grid()[1],
"imgGridX": parse_image_grid()[0],
"imgGridY": parse_image_grid()[1],
"centerPtX": parse_center_point()[0],
"centerPtY": parse_center_point()[1],
"centerCoordX": parse_center_coords()[0],
"centerCoordY": parse_center_coords()[1],
"dataArea": parse_data_area(),
}
psfi = co.namedtuple(
'PSFinfo',
['dataSpacing', 'dataArea', 'pupilGridX', 'pupilGridY', 'imgGridX', 'imgGridY', 'centerPtX',
'centerPtY', 'centerCoordX', 'centerCoordY'])
psfInfo = psfi(
header['dataSpacing'], header['dataArea'], header['pupilGridX'], header['pupilGridY'], header['imgGridX'],
header['imgGridY'], header['centerPtX'], header['centerPtY'], header['centerCoordX'], header['centerCoordY']
)
return psfInfo
def parse_data():
start_line = 22
end_line = len(psf_txt_lines)
data_lines = psf_txt_lines[start_line:end_line]
data_txt = [(txt.strip()).split("\t ") for txt in data_lines]
data = [[np.double(intensity) for intensity in line] for line in data_txt]
# return data
return np.swapaxes(data, 0, 1)
return parse_wavelength(), parse_headers(), parse_data()
def get_psfs(conf_orders, wave_range=8, band='YJ'):
psfs = {}
for conf_order in range(len(conf_orders)):
order = conf_orders[conf_order]
psfs[order] = {}
for wave in range(wave_range):
psf_txt = get_psf_txt_lines(conf_order+1, wave+1, band)
psf = parse_psf_txt_lines(psf_txt)
psfs[order][psf[0]] = (psf[1], psf[2])
return psfs
default_config_to_order_array = range(30, 45)
default_config_to_order_array.reverse()
default_config_to_order_array = np.asarray(default_config_to_order_array)
default_ccd = PyEchelle.CCD(2048, 2048, 19, 'y', name='H2RG')
def affine_tsv_filename():
filenames = os.listdir(".")
affine_filenames = [re.findall('affine_\d+.tsv', f) for f in filenames]
found_ints = [re.findall('\d+', f[0]) for f in affine_filenames if f]
found_ints = [int(i[0]) for i in found_ints if i]
if found_ints:
found_int = max(found_ints) + 1
else:
found_int = 0
return "affine_{0:03d}.tsv".format(found_int)
def do_affine_transformation_calculation(
ccd=default_ccd, config_to_order_array=default_config_to_order_array, band='YJ', sw=80, sh=800
):
"""
Calculates Affine Matrices that describe spectrograph
The spectrograph can be described by affine transformations from the input slit to the focal plane.
an affine transofmration can be described by a 3x3 matrix.
this function calculates the 3x3 matrix per wavelength and order that matches the input slit to the focal plane
:param band:
:type band:
:param config_to_order_array:
:type config_to_order_array:
:param ccd:
:type ccd: PyEchelle.CCD
:param fw: fiber/slit width [microns]
:param fh: fiber/slit height [microns]
:return:
"""
from skimage import transform as tf
ray_trace_csv = 'RIMAS_{}_affine_dependencies.csv'.format(band.upper())
df = pd.read_csv(ray_trace_csv, encoding='utf-16')
df['config'] = df['config'].astype(np.int)
df['order'] = config_to_order_array[df['config']-1]
unique_orders = df['order'].unique()
fields = df[['fieldy', 'fieldx']]
unique_fields = fields.drop_duplicates()
unique_fields_array = unique_fields.to_numpy()
nfields = len((unique_fields_array.tolist()))
# norm_field = np.zeros(fields.shape)
# norm_field[fields > 0] = 1
# norm_field[fields < 0] = -1
norm_field = fields.loc[0:(nfields-1), :]
norm_field = norm_field.to_numpy()
norm_field = norm_field.astype(np.int)
norm_field_list = norm_field.tolist()
# nw = fields[0].max()
# nh = fields[1].max()
fw = sw
fh = sh
sampling_input_x = fw
sampling_input_y = fh
res = {
'MatricesPerOrder': np.int(unique_orders.shape[0]),
'norm_field': norm_field_list,
'sampling_input_x': np.int(sampling_input_x)
}
print('Field width: ' + str(fw))
print('Field height: ' + str(fh))
res['field_width'] = np.double(fw)
res['field_height'] = np.double(fh)
# sampling_x = sampling_input_x
# sampling_y = sampling_input_x * fh / fw
src = np.array(norm_field, dtype=float)
src[:, 0] -= np.min(src[:, 0])
src[:, 1] -= np.min(src[:, 1])
src[:, 0] /= np.max(src[:, 0])
src[:, 1] /= np.max(src[:, 1])
# src[:, 0] *= sampling_x
# src[:, 1] *= sampling_y
# ppp = []
dst_x = df['y'].to_numpy()
dst_y = df['x'].to_numpy()
orders = df['order'].to_numpy()
wavelength = df['wavelength'].to_numpy()
# ppp.append(np.array(self.do_spectral_format(nPerOrder=nPerOrder, FSRonly=False, hx=f[0], hy=f[1])))
# ppp = np.array(ppp)
dst_x = np.array(dst_x)
dst_y = np.array(dst_y)
dst = np.vstack((dst_x, dst_y))
dst /= (ccd.pixelSize / 1000.)
dst += ccd.Nx / 2
dst = dst.reshape(2, len(dst[0]) / nfields, nfields).transpose((1, 2, 0))
orders = orders.reshape((len(orders) / nfields, nfields))
wavelength = wavelength.reshape((len(wavelength) / nfields, nfields))
affine_matrices = {}
transformations = {}
p_headers = ["p{:d}".format(i) for i in range(nfields)]
src_headers = ["src{:d}".format(i) for i in range(nfields)]
affine_tsv_headers = ["order", "wavelength"] + p_headers + src_headers + [
"rotation", "scale0", "scale1", "shear", "translation0", "translation1"
]
affine_save_lines = ["\t".join(affine_tsv_headers)]
for order, wavel, p in zip(orders, wavelength, dst):
print("affine transformation inputs {} {}".format(src,p))
p_list = [i for i in p]
src_list = [i for i in src]
inputs_list = [order[0], wavel[0]] + p_list + src_list
params = tf.estimate_transform('affine', src, p)
params_list = [
params.rotation, params.scale[0], params.scale[1],
params.shear, params.translation[0], params.translation[1]
]
affine_save_line = inputs_list + params_list
affine_save_lines.append("\t".join([str(i) for i in affine_save_line]))
if affine_matrices.has_key(order[0]):
affine_matrices[order[0]].update({wavel[0]: np.array(params_list)})
else:
affine_matrices[order[0]] = {wavel[0]: np.array(params_list)}
with open(affine_tsv_filename(), 'w') as f:
f.write("\n".join(affine_save_lines))
res['matrices'] = affine_matrices
return res
def calculate_blaze_wavelength(blaze_angle_deg, gpmm, order):
blaze_angle = np.deg2rad(blaze_angle_deg)
d = 1000 / gpmm
blaze_wl = 2 * d * np.sin(blaze_angle) / order
print("order: {} blaze_wl: {}".format(order, blaze_wl))
return blaze_wl
def generate_orders(min_order, max_order, blaze_angle_deg, gpmm):
order_range = range(min_order, max_order+1)
orders = {}
for order in order_range:
fsr_min = 0
fsr_max = 10
wavelength_min = 0
wavelength_max = 10
blaze_wavelength = calculate_blaze_wavelength(blaze_angle_deg, gpmm, order)
orders[order] = PyEchelle.Order(order, blaze_wavelength, wavelength_min, wavelength_max, fsr_min, fsr_max)
return orders
class RIMAS(PyEchelle.Echelle):
def __init__(self, ln=None, name=''):
PyEchelle.Echelle.__init__(self, ln, name)
def get_psfs(self, nPerOrder=1, fieldnumber=3, fieldposition=[0., 0.]):
get_psfs()
if __name__ == '__main__':
# filename = 'RIMAS_YJ.hdf'
# spec = Spectrograph(49.9, 1000/40, 'RIMAS_YJ')
# PyEchelle.save_spectrograph_info_to_hdf(filename, spec)
# ccd = PyEchelle.CCD(4096, 4096, 10, dispersionDirection='y')
# PyEchelle.save_CCD_info_to_hdf(filename, ccd)
# print(parse_psf_txt_lines(get_psf_txt_lines(1, 1)))
ln = pyz.createLink()
filename = os.path.join(
r'ZEMAX',
'RIMAS_band1_echelle_capone_130711a_jc-analysis-190316a.zmx')
ln.zLoadFile(filename)
spectrograph = PyEchelle.Echelle(ln, 'RIMAS_YJ')
spectrograph.analyseZemaxFile(echellename='ech s2', blazename='Blaze', gammaname='Gamma')
spectrograph.minord = 30
spectrograph.maxord = 44
spectrograph.blaze = 49.9
spectrograph.gamma = 0
spectrograph.grmm = 1000 / 40
spectrograph.setCCD(PyEchelle.CCD(2048, 2048, 19, name='H2RG'))
config_array = range(30, 45)
config_array.reverse()
config_array = np.asarray(config_array)
att = do_affine_transformation_calculation(spectrograph.CCD, config_array, 'YJ')
psfs = get_psfs(config_array, 8, 'YJ')
directory = r'..\..\Documents\echelle\spectrographs'
files = os.listdir(directory)
iterations = [re.findall('\d+', f) for f in files]
iterations = [int(i[0]) for i in iterations if i != []]
iteration = max(iterations) + 1
filename = os.path.join(directory, 'RIMAS_YJ_v{:d}.hdf'.format(iteration))
PyEchelle.save_spectrograph_info_to_hdf(filename, spectrograph)
PyEchelle.save_CCD_info_to_hdf(filename, spectrograph.CCD)
PyEchelle.save_transformation_to_hdf(filename, att, 1)
PyEchelle.save_psfs_to_hdf(filename, psfs)
``` |
{
"source": "joee9/spotifystats",
"score": 3
} |
#### File: joee9/spotifystats/count.py
```python
import spotipy.util as util
import spotipy
#system related
from os.path import exists
import json
# misc
import pandas as pd
# user specific details
from secrets import username, client_id, client_secret, home_path
def get_auth():
redirect_uri = 'http://localhost:7777/callback'
# scope = 'user-read-recently-played'
scope = 'user-top-read'
token = util.prompt_for_user_token(username=username, scope=scope, client_id=client_id, client_secret=client_secret, redirect_uri=redirect_uri)
return spotipy.Spotify(auth=token)
def get_track_count(sp, id, tracks, db):
if not id in tracks:
if id in db:
data = db[id]
name = data['name']
else:
data = sp.track(id)
name = data['name']
sp_url = data['external_urls']['spotify']
artist_names = []
artist_ids = []
for entry in data['album']['artists']:
artist_names.append(entry['name'])
artist_ids.append(entry['id'])
album_name = data['album']['name']
album_id = data['album']['id']
album_artwork_url = data['album']['images'][2]['url']
db[id] = {
'name': name,
'url': sp_url,
'artist_names': artist_names,
'artist_ids': artist_ids,
'album_name': album_name,
'album_id': album_id,
'artwork_url': album_artwork_url,
}
tracks[id] = {
'name': name,
'timestamps': [], # initalize to empty list
'count': 0
}
return tracks[id]
def get_album_count(sp, id, albums, db):
if not id in albums:
if id in db:
data = db[id]
name = data['name']
else:
data = sp.album(id)
name = data['name']
sp_url = data['external_urls']['spotify']
artist_names = []
artist_ids = []
for entry in data['artists']:
artist_names.append(entry['name'])
artist_ids.append(entry['id'])
artwork_url = data['images'][2]['url']
db[id] = {
'name': name,
'url': sp_url,
'artist_names': artist_names,
'artist_ids': artist_ids,
'artwork_url': artwork_url,
}
albums[id] = {
'name': name,
'timestamps': [], # initalize to empty list
'count': 0
}
return albums[id]
def get_artist_count(sp, id, artists, db):
if not id in artists:
if id in db:
data = db[id]
name = data['name']
else:
data = sp.artist(id)
name = data['name']
sp_url = data['external_urls']['spotify']
artwork_url = data['images'][2]['url']
genres = data['genres']
db[id] = {
'name': name,
'url': sp_url,
'artwork_url': artwork_url,
'genres': genres,
}
artists[id] = {
'name': name,
'timestamps': [], # initalize to empty list
'count': 0
}
return artists[id]
def analyze_track(sp, row, analyses, dbs):
track_id, timestamp = row
track_db, artist_db, album_db = dbs
tracks, artists, albums = analyses
def add_curr_song(info):
info['timestamps'].append(timestamp)
info['count'] += 1
track_info = get_track_count(sp, track_id, tracks, track_db)
add_curr_song(track_info)
album_info = get_album_count(sp, track_db[track_id]['album_id'], albums, album_db)
add_curr_song(album_info)
for artist_id in track_db[track_id]['artist_ids']:
artist_info = get_artist_count(sp, artist_id, artists, artist_db)
add_curr_song(artist_info)
def analyze_dataframe(sp, df, dbs):
tracks = {}
artists = {}
albums = {}
analyses = tracks, artists, albums
for i in range(len(df)):
analyze_track(sp, df.iloc[i,:], analyses, dbs)
return tracks, artists, albums
def sort_items(dict):
def sort_scheme(d): return -d['count'], d['name']
total = 0
result = []
for id in dict.keys():
d = dict[id]
count = d['count']
result.append({'id': id, 'name': d['name'], 'count': count, 'timestamps': d['timestamps']})
total += count
result.sort(key=sort_scheme)
return (result, total)
def get_counts(sp, df, dbs):
track_cts, artist_cts, album_cts = analyze_dataframe(sp, df, dbs)
track_cts, total = sort_items(track_cts)
artist_cts, artist_total = sort_items(artist_cts)
album_cts, album_total = sort_items(album_cts)
return track_cts, artist_cts, album_cts, total
def print_top(list, num=10):
list = list[0:num]
for i, item in enumerate(list):
name = item['name']
count = item['count']
print(f'{i+1:2d}. ({count}) {name:35s}')
def main():
# test main
sp = get_auth()
yyyy_mm = '2022-01'
path = f'{home_path}/data/{yyyy_mm}'
db_path = f'{path}-database.txt'
if exists(db_path):
with open(db_path) as f:
dbs = json.loads(f.read())
else:
dbs = {},{},{}
month_file = f'{path}-songlist.txt'
df = pd.read_csv(month_file)
result = get_counts(sp, df, dbs)
with open(f'{path}-counts.txt','w') as f:
f.write(json.dumps(result))
with open(db_path, 'w') as f:
f.write(json.dumps(dbs))
track_cts, artist_cts, album_cts = result
track_cts, track_total = track_cts
artist_cts, artist_total = artist_cts
album_cts, album_total = album_cts
print_top(track_cts)
print(track_total)
print_top(artist_cts)
print_top(album_cts)
if __name__ == '__main__':
main()
```
#### File: joee9/spotifystats/get_rp.py
```python
import spotipy.util as util # for getting authorization
import spotipy # for getting tracks, etc.
# time related packages
from datetime import datetime, timedelta
import pytz
est = pytz.timezone("America/New_York")
utc = pytz.timezone("UTC")
from dateutil import parser
# os related
from os.path import exists
import os
# for analysis
import pandas as pd
# client information
from secrets import username, client_id, client_secret, home_path, gd_path
def get_auth():
redirect_uri = 'http://localhost:7777/callback'
# scope = 'user-read-recently-played'
scope = "user-top-read"
token = util.prompt_for_user_token(username=username, scope=scope, client_id=client_id, client_secret=client_secret, redirect_uri=redirect_uri)
return spotipy.Spotify(auth=token)
def get_rp_songs(sp, day):
my = datetime.strftime(day, "%Y-%m") # month year; for file paths
path = f"{home_path}/data/{my}-songlist.txt"
# beginning of very next day; lastest timestamp that could be added
latest_time = (day + timedelta(days = 1)).replace(hour=0,second=0,minute=0,microsecond=0)
if exists(path): # if monthly file already exists
new_month = False
df = pd.read_csv(path)
if len(df) == 0: # file created, but no songs yet; a new month
new_month = True
most_recent_time = day.astimezone(est).replace(day=1,second=0,minute=0,hour=0,microsecond=1)
else:
most_recent_timestamp = df.tail(1).to_numpy()[0][1]
most_recent_time = parser.parse(most_recent_timestamp)
else:
new_month = True
df = pd.DataFrame(columns = ["ID", "Timestamp"])
most_recent_time = day.astimezone(est).replace(day=1,second=0,minute=0,hour=0,microsecond=1)
# get recently played songs
lim = 40
recently_played = sp.current_user_recently_played(limit=lim)
# find the time that the oldest track in recently_played was played
oldest_rp_ts = recently_played["items"][lim-1]["played_at"]
oldest_rp_time = parser.parse(oldest_rp_ts)
# earliest time that can be in this month; beginning of first day
earliest_time = day.replace(day=1,second=0,minute=0,hour=0,microsecond=1)
if oldest_rp_time > most_recent_time: # all rp tracks are more recent than the df
idx = 1 # add all songs to the tail of df
# deterime all times in df that are also included in rp and remove them
elif not new_month:
n = -1
for i in range(len(df)):
curr_ts = parser.parse(df.iloc[i,1])
if curr_ts >= oldest_rp_time:
n = (len(df)-1) - i + 1
break
if n != -1:
# delete all tracks that are newer than last rp track
df.drop(df.tail(n).index,inplace=True)
idx = 1 # add all rp songs to the tail of df
else:
# determine which songs from rp are from this month and add to df
# only for new month
idx = lim + 1
for i in range(1,lim+1):
track_ts = recently_played["items"][lim - i]["played_at"]
parsed_track_ts = parser.parse(track_ts)
if parsed_track_ts > most_recent_time:
idx = i
break
# add appropriate songs to df
for i in range(idx, lim+1):
track_id = recently_played["items"][lim - i]["track"]["id"]
track_ts = recently_played["items"][lim - i]["played_at"]
# only add if in this month
track_time = parser.parse(track_ts)
if track_time >= earliest_time and track_time < latest_time:
df = df.append({
"ID": track_id,
"Timestamp": track_ts
}, ignore_index=True)
# write back to df
df.to_csv(path, index=False)
# copy to backup location
os.system(f"cp {home_path}/data/{my}-songlist.txt {gd_path}/backups/{my}-songlist.txt")
def main():
sp = get_auth()
# ========== GET DAYS TO RUN (usually just today)
today = datetime.today().astimezone(est)
yesterday = today - timedelta(days=1)
t_str = datetime.strftime(today,"%Y-%m")
y_str = datetime.strftime(yesterday,"%Y-%m")
days_to_run = [today]
if t_str != y_str: # today and yesterday were in different months
days_to_run.append(yesterday)
# run necessary days
for day in days_to_run:
get_rp_songs(sp, day)
if __name__ == "__main__":
main()
``` |
{
"source": "JoeEarlam/KiBoM",
"score": 3
} |
#### File: JoeEarlam/KiBoM/KiBOM_CLI.py
```python
from __future__ import print_function
import re
import csv
import sys
import os
import shutil
import argparse
here = os.path.abspath(os.path.dirname(sys.argv[0]))
sys.path.append(here)
sys.path.append(os.path.join(here,"KiBOM"))
from bomlib.columns import ColumnList
from bomlib.netlist_reader import *
from bomlib.bom_writer import *
from bomlib.preferences import BomPref
verbose = False
def close(*arg):
print(*arg)
sys.exit(0)
# Simple debug message handler
def say(*arg):
if verbose:
print(*arg)
def isExtensionSupported(filename):
result = False
extensions = [".xml",".csv",".txt",".tsv",".html"]
for e in extensions:
if filename.endswith(e):
result = True
break
return result
parser = argparse.ArgumentParser(description="KiBOM Bill of Materials generator script")
parser.add_argument("netlist", help='xml netlist file. Use "%%I" when running from within KiCad')
parser.add_argument("output", default="", help='BoM output file name.\nUse "%%O" when running from within KiCad to use the default output name (csv file).\nFor e.g. HTML output, use "%%O.html"')
parser.add_argument("-n", "--number", help="Number of boards to build (default = 1)", type=int, default=None)
parser.add_argument("-v", "--verbose", help="Enable verbose output", action='count')
parser.add_argument("-r", "--variant", help="Board variant, used to determine which components are output to the BoM", type=str, default=None)
parser.add_argument("--cfg", help="BoM config file (script will try to use 'bom.ini' if not specified here)")
parser.add_argument("-s","--separator",help="CSV Separator (default ',')",type=str, default=None)
args = parser.parse_args()
input_file = args.netlist
if not input_file.endswith(".xml"):
close("{i} is not a valid xml file".format(i=input_file))
verbose = args.verbose is not None
input_file = os.path.abspath(input_file)
say("Input:",input_file)
#look for a config file!
#bom.ini by default
ini = os.path.abspath(os.path.join(os.path.dirname(input_file), "bom.ini"))
config_file = ini #default value
#user can overwrite with a specific config file
if args.cfg:
config_file = args.cfg
#read preferences from file. If file does not exists, default preferences will be used
pref = BomPref()
have_cfile = os.path.exists(config_file)
if have_cfile:
pref.Read(config_file)
say("Config:",config_file)
#pass various command-line options through
pref.verbose = verbose
if args.number is not None:
pref.boards = args.number
pref.separatorCSV = args.separator
if args.variant is not None:
pref.pcbConfig = args.variant
print("PCB variant:", pref.pcbConfig)
#write preference file back out (first run will generate a file with default preferences)
if not have_cfile:
pref.Write(config_file)
say("Writing preferences file %s"%(config_file,))
#individual components
components = []
#component groups
groups = []
#read out the netlist
net = netlist(input_file, prefs = pref)
#extract the components
components = net.getInterestingComponents()
#group the components
groups = net.groupComponents(components)
columns = ColumnList(pref.corder)
#read out all available fields
for g in groups:
for f in g.fields:
columns.AddColumn(f)
#don't add 'boards' column if only one board is specified
if pref.boards <= 1:
columns.RemoveColumn(ColumnList.COL_GRP_BUILD_QUANTITY)
say("Removing:",ColumnList.COL_GRP_BUILD_QUANTITY)
#todo
write_to_bom = True
result = True
#Finally, write the BoM out to file
if write_to_bom:
output_file = args.output
if output_file is None:
output_file = input_file.replace(".xml","_bom.csv")
# KiCad BOM dialog by default passes "%O" without an extension. Append our default
if not isExtensionSupported(output_file):
output_file += "_bom.csv"
# If required, append the schematic version number to the filename
if pref.includeVersionNumber:
fsplit = output_file.split(".")
fname = ".".join(fsplit[:-1])
fext = fsplit[-1]
output_file = str(fname) + str(net.getVersion()) + "." + fext
output_file = os.path.abspath(output_file)
say("Output:",output_file)
result = WriteBoM(output_file, groups, net, columns.columns, pref)
os.remove(input_file)
os.remove(output_file + ".tmp")
if result:
sys.exit(0)
else:
sys.exit(-1)
``` |
{
"source": "joe-easley/AzureStorageWrapper",
"score": 3
} |
#### File: main/storagewrapper/_exceptions.py
```python
class AuthenticationError(Exception):
"""
Exceptions raised in Authentication
"""
def __init__(self, message):
self.message = message
super().__init__(self.message)
def __str__(self):
return self.message
class InvalidArguments(Exception):
def __init__(self, message):
self.message = message
super().__init__(self.message)
def __str__(self):
return self.message
class FileShareFunctionsError(Exception):
def __init__(self, message):
self.message = message
super().__init__(self.message)
def __str__(self):
return self.message
class BlobFunctionsError(Exception):
def __init__(self, message):
self.message = message
super().__init__(self.message)
def __str__(self):
return self.message
class QueueFunctionsError(Exception):
def __init__(self, message):
self.message = message
super().__init__(self.message)
def __str__(self):
return self.message
class InitialisationError(Exception):
def __init__(self, message):
self.message = message
super().__init__(self.message)
def __str__(self):
return self.message
```
#### File: main/storagewrapper/_queue.py
```python
from azure.storage.queue import QueueServiceClient
from storagewrapper._exceptions import QueueFunctionsError
import sys
class QueueFunctions:
"""
Using a token generated in AuthenticateFunctions gives access to the following queue functions.
clear messages
receive message
delete message
send message
update message
create queue
delete queue
Required params:
param token: Authentication.token
Optional params:
param storage_account: str
param queue_name: str
param queue_client: QueueClient obj
If a queue client exists (eg after using create queue) then this can be client can be used rather than a fresh client being generated
"""
def __init__(self, token, storage_account_name, queue_name=None, queue_client=None, handle_exceptions=False):
self.token = token
self.handle_exceptions = handle_exceptions
self.queue_client = queue_client
self.storage_account_name = storage_account_name
self.queue_name = queue_name
def __str__(self):
return f"Functions for operating queue storage within storage account:'{self.storage_account_name}'"
def __handle_errors(self, func_name, error, exception_type=None):
error_message = f"{error} in {func_name}"
if self.handle_exceptions:
return False
elif not self.handle_exceptions:
self.__raise_exceptions(message=error_message, exception_type=exception_type)
def __raise_exceptions(self, message, exception_type):
if exception_type is None:
raise QueueFunctionsError(message)
else:
raise exception_type(message)
def _generate_queue_service_client(self):
"""
Creates a queue service client using a token created by authentication module
param storage_account_name: str
param token: Authentication obj
return QueueServiceClient obj
"""
url = f"https://{self.storage_account_name}.blob.core.windows.net/"
queue_service_client = QueueServiceClient(account_url=url, credential=self.token)
return queue_service_client
def _gen_queue_client(self, queue_name):
"""
Generates a queue client using a queue service client
param storage_account: str
param queue_name: str
return QueueClient obj
"""
queue_service_client = self._generate_queue_service_client()
queue_client = queue_service_client.get_queue_client(queue_name)
return queue_client
def clear_messages(self, queue_name, timeout=10):
"""
Deletes all messages from a queue. Timeout value auto-set to 10seconds.
param timeout: int
"""
try:
queue_client = self._gen_queue_client(queue_name=queue_name)
queue_client.clear_messages(timeout=timeout)
except Exception as e:
status = self.__handle_errors(sys._getframe().f_code.co_name, e)
return status
def receive_message(self, timeout=10, visibility_timeout=300):
"""
Removes a message from the front of the queue.
Returns QueueMessage Class.
Server timeout defaults to 10 seconds
Visibility timeout defaults to 300 seconds
param timeout: int
param visibility_timeout: int
return message: QueueMessage class
"""
try:
message = self.queue_client.receive_message(visibility_timeout=visibility_timeout, timeout=timeout)
return message
except Exception as e:
status = self.__handle_errors(sys._getframe().f_code.co_name, e)
return status
def delete_message(self, message, pop_receipt, timeout=10):
"""
Deletes a message from the queue.
Timeout defaults to 10 seconds
Message can either be a message object or id as a str
param message: str or QueueMessage
param pop_receipt: str
param timeout: int
return None
"""
try:
self.queue_client.delete_message(message=message, pop_receipt=pop_receipt, timeout=timeout)
return None
except Exception as e:
status = self.__handle_errors(sys._getframe().f_code.co_name, e)
return status
def send_message(self, content, visibility_timeout=604800, time_to_live=604800, timeout=10):
"""
Sends a message to queue.
Default time to live is 7 days, however this can be specified in seconds. Set to infinity with -1.
visibility timeout specifies the time that the message will be invisible. After the timeout expires, the message will become visible. Defaults to 7 days
param content: str
param visibility_timeout: int
return sent_message: QueueMessage object
"""
try:
sent_message = self.queue_client.send_message(content=content, visibility_timeout=visibility_timeout, time_to_live=time_to_live, timeout=timeout)
return sent_message
except Exception as e:
status = self.__handle_errors(sys._getframe().f_code.co_name, e)
return status
def update_message(self, message, pop_receipt, content, visibility_timeout=604800, timeout=10):
"""
Updates the visibility timeout of a message, or updates the content of a message
Server timeout defaults to 10 seconds
param message: str or QueueMessage
param pop_receipt: str
param content: str
param visibility_timeout: int
param timeout: int
return updated_message: QueueMessage object
"""
try:
updated_message = self.queue_client.update_message(message, pop_receipt=pop_receipt, content=content)
return updated_message
except Exception as e:
status = self.__handle_errors(sys._getframe().f_code.co_name, e)
return status
def create_queue(self, name, metadata, timeout=10):
"""
Creates a new queue in storage acct. Timeout value auto-set to 10seconds.
Returns a queue client object for created queue
param name: name
param metadata: dict
param timeout: int
return QueueClient obj
"""
try:
queue_service_client = self._generate_queue_service_client()
queue_client = queue_service_client.create_queue(name=name, metadata=metadata, timeout=timeout)
return queue_client
except Exception as e:
status = self.__handle_errors(sys._getframe().f_code.co_name, e)
return status
def delete_queue(self, queue_name, timeout=120):
"""
Deletes the queue and all contained messages
Operation likely to take at least 40 seconds. Configure timeout accordingly. Default 120 seconds
param queue_name: str
param timeout: int (secs)
return None
"""
try:
queue_service_client = self._generate_queue_service_client()
queue_service_client.delete_queue(queue=queue_name, timeout=timeout)
return None
except Exception as e:
status = self.__handle_errors(sys._getframe().f_code.co_name, e)
return status
def list_queues(self, name_starts_with="", include_metadata=True, results_per_page=100, timeout=60):
"""
Returns a generator to list the queues under the specified account.
The generator will lazily follow the continuation tokens returned by the service and stop when all queues have been returned.
All params are optional, default behaviour is to list all queues in specified account.
param name_starts_with: str
param include_metadata: bool default=True,
results_per_page: int
param timeout: int
return iterable (auto-paging) of QueueProperties
"""
try:
queue_service_client = self._generate_queue_service_client()
list_queues = queue_service_client.list_queues(
name_starts_with=name_starts_with,
include_metadata=include_metadata,
results_per_page=results_per_page,
timeout=timeout
)
return list_queues
except Exception as e:
status = self.__handle_errors(sys._getframe().f_code.co_name, e)
return status
def create_queue_service_client(self):
queue_service_client = self._generate_queue_service_client()
return queue_service_client
``` |
{
"source": "joeedh/cserver",
"score": 3
} |
#### File: joeedh/cserver/cs_parse.py
```python
import os.path
from ply import yacc
from cs_lex import *
from cs_ast import *
"""
grammar format:
<$include file.ct>
<#
c code
#>
<html code>
<#include "something.ccs">
<a href=<#=str "some_c_func()">>
so it's a templating system.
"""
def p_statementlist(p):
''' statementlist : statement
| statementlist statement
|
'''
if len(p) == 1:
p[0] = StatementList()
elif len(p) == 2:
p[0] = StatementList()
p[0].add(p[1])
elif len(p) == 3:
p[0] = p[1]
p[0].add(p[2])
def p_statement(p):
''' statement : code
| html
| binding
| include
'''
p[0] = p[1]
def p_code(p):
''' code : CODE
'''
p[0] = CodeNode(p[1])
def p_include(p):
'''include : INCLUDE
'''
p[0] = IncludeNode(p[1])
def p_binding(p):
''' binding : BINDING
'''
start = p[1].find("|")
type = p[1][:start]
val = p[1][start+1:]
p[0] = BindingNode(val.strip(), type)
def p_html(p):
''' html : HTML
'''
p[0] = HtmlNode(p[1])
class JSCCError (RuntimeError):
pass
def get_lineno(p):
line = p.lineno
if type(p.lineno) != int:
line = line(0)
return line
def get_linestr(p):
if p == None: return "(EOF)"
i = p.lexpos
ld = p.lexer.lexdata
col = 0
while i >= 0 and ld[i] != "\n":
i -= 1
col += 1
if ld[i] == "\n":
i += 1
col -= 1
start = i
linestr = ""
colstr = ""
i = p.lexpos
while i < len(ld) and ld[i] != "\n":
i += 1
end = i
for i in range(col):
colstr += " "
colstr += "^"
linestr = ld[start:end]
return linestr, colstr
def p_error(p):
line = get_lineno(p)+1
if not glob.g_msvc_errors:
errstr = "\n%s(%i): Syntax Error" % (glob.g_file, line)
sys.stderr.write(errstr+"\n");
linestr, colstr = get_linestr(p)
sys.stderr.write(" %s\n %s" % (linestr, colstr))
else:
linestr, colstr = get_linestr(p)
errstr = "%s(%s,%s): error: Syntax Error\n" % (os.path.abspath(glob.g_file), line, len(colstr))
sys.stderr.write(errstr)
raise JSCCError("Parse error")
parser = yacc.yacc()
if __name__ == "__main__":
tst = """
<!DOCTYPE html>
<html>
<head><title><#=PAGE_TITLE#></title>
</head>
<body>
<#
int i;
char arr[32];
for (i=0; i<32; i++) {
#>
<p><#=i></p><br/>
<#
}
#>
"""
from cs_process import *
ret = parser.parse(tst)
compact_strnodes(ret, StrNode)
compact_strnodes(ret, HtmlNode)
print(ret)
``` |
{
"source": "joeedh/FacialAutoRigger",
"score": 2
} |
#### File: FacialAutoRigger/FacialAutoRigger/__init__.py
```python
import imp
from . import ops, ui, rigger, props, utils, shapekeys, data, cage
from . import spline, shapedrivers, serializer, rig_utils
imp.reload(data)
imp.reload(utils)
imp.reload(serializer)
imp.reload(spline)
imp.reload(props)
imp.reload(rig_utils)
imp.reload(cage)
imp.reload(shapedrivers)
imp.reload(ui)
imp.reload(rigger)
imp.reload(shapekeys)
imp.reload(ops)
from .utils import Registrar
bpy_exports = Registrar([
props.bpy_exports,
ops.bpy_exports,
ui.bpy_exports
]);
def register():
global bpy_exports
bpy_exports.register()
def unregister():
global bpy_exports
#bpy_exports.unregister()
```
#### File: FacialAutoRigger/FacialAutoRigger/props.py
```python
import bpy
from bpy.props import *
from .utils import Registrar, RegItem
class ModelSwapListItem (bpy.types.PropertyGroup):
object : PointerProperty(type=bpy.types.Object)
bpy.utils.register_class(ModelSwapListItem)
#motivated by need for seperate lower/upper teeth meshes,
#instead of swapping meshes one at a time we swap them in groups
#
class ModelSwapGroup (bpy.types.PropertyGroup):
name : StringProperty(default="unnamed")
objects : CollectionProperty(type=ModelSwapListItem)
#XXX unfortunately seems like we can't defer
#registering this to bpy_exports
bpy.utils.register_class(ModelSwapGroup)
class ModelSwapper(bpy.types.PropertyGroup):
groups : CollectionProperty(type=ModelSwapGroup)
active : IntProperty()
show_all : BoolProperty(default=False)
bpy.utils.register_class(ModelSwapper)
class FaceRigProps (bpy.types.PropertyGroup):
teeth_models : PointerProperty(type=ModelSwapper)
meshob : PointerProperty(type=bpy.types.Object)
rigname : StringProperty(default="FaceRig")
metaob : PointerProperty(type=bpy.types.Object)
devmode : BoolProperty(default=False)
ismeta : BoolProperty(default=True)
#XXX unfortunately seems like we can't defer
#registering this to bpy_exports
bpy.utils.register_class(FaceRigProps)
class OnPostRegister (RegItem):
def reg(self):
bpy.types.Armature.facerig = PointerProperty(type=FaceRigProps)
bpy_exports = Registrar([
# ModelSwapList,
# FaceRigProps,
OnPostRegister()
])
```
#### File: FacialAutoRigger/FacialAutoRigger/serializer.py
```python
import bpy
from mathutils import *
from math import *
import json, random, time, os, sys
import bmesh
from .utils import saveBpyContext, loadBpyContext, ensureObjectMode
def bpyGenericSave(source, target):
for k in dir(source):
if k.startswith("_"):
continue
v = getattr(source, k)
if type(v) in [bool, int, float, str, bytes]:
target[k] = v
elif type(v) in [Vector, Quaternion, Euler]:
target[k] = list(v)
elif type(v) == Matrix:
target[k] = [list(v2) for v2 in v]
def bpyGenericLoad(source, target):
for k in source:
v = source[k]
if type(v) == list:
if len(v) == 0:
continue
if type(v[0]) == list and len(v) in [3, 4] and len(v[0]) == len(v): #matrix?
v = Matrix(v)
elif len(v) in [2, 3, 4] and type(v[0]) in [int, float]: #vector?
v = Vector(v)
else:
continue
elif type(v) not in [bool, int, float, str, bytes]:
continue
try:
setattr(target, k, v)
except:
print("Failed to set property", k);
def getTempObject(data, id="1"):
name = "__tempob_" + id
if name not in bpy.data.objects:
bpy.data.objects.new(name, data)
scene = bpy.context.scene
ob = bpy.data.objects[name]
if ob.name not in scene.objects:
scene.collection.objects.link(ob)
ob.data = data
return ob
def loadArmature(rarm, objs, name_suffix=""):
rctx = saveBpyContext()
ensureObjectMode()
arm = bpy.data.armatures.new(rarm["name"] + name_suffix)
name = arm.name
del rarm["name"]
ob = getTempObject(arm)
print("ARM", arm, name, name_suffix)
bpy.ops.object.select_all(action="DESELECT")
ob.select_set(True)
bpy.context.view_layer.objects.active = ob
print(list(rarm.keys()))
bpy.ops.object.mode_set(mode="EDIT")
bpyGenericLoad(rarm, arm)
print("ARM2", arm)
arm = bpy.data.armatures[name]
for rb in rarm["bones"]:
print(arm)
b = arm.edit_bones.new(rb["name"])
for rb in rarm["bones"]:
parent = rb["parent"]
del rb["parent"]
b = arm.edit_bones[rb["name"]]
if parent is not None:
b.parent = arm.edit_bones[parent]
bpyGenericLoad(rb, b)
b.tail = Vector(rb["tail"])
b.head = Vector(rb["head"])
loadBpyContext(rctx)
return arm
def loadObject(rob, objs, name_suffix=""):
print("name_suffix", name_suffix)
data = rob["data"]
if data["TYPE"] == "ARMATURE":
data = loadArmature(data, objs, name_suffix)
else:
print("Failed to load object", rob["name"])
return
name = rob["name"] + name_suffix
del rob["name"]
if name not in bpy.data.objects:
ob = bpy.data.objects.new(name, data)
bpy.context.scene.collection.objects.link(ob)
else:
ob = bpy.data.objects[name]
if name not in bpy.context.scene.objects:
bpy.context.scene.collection.objects.link(ob)
ob.data = data
ob = bpy.data.objects[name]
bpyGenericLoad(rob, ob)
return ob
return bpy.data.objects[name]
def saveArmature(arm, refs):
ret = {"TYPE" : "ARMATURE"}
ret["bones"] = []
bpyGenericSave(arm, ret)
ret["name"] = arm.name
for b in arm.bones:
rb = {}
if b.parent:
rb["parent"] = b.parent.name
else:
rb["parent"] = None
ret["bones"].append(rb)
bpyGenericSave(b, rb)
rb["head"] = list(b.head_local)
rb["tail"] = list(b.tail_local)
return ret
def saveTextCurve(tc, refs):
ret = {"TYPE" : "TEXTCURVE"}
ret["text_boxes"] = []
bpyGenericSave(tc, ret)
for tb in tc.text_boxes:
rb = {}
bpyGenericSave(tb, rb)
ret["text_boxes"].append(rb)
return ret
def savePose(pose, refs):
ret = {"TYPE" : "POSE"}
ret["bones"] = []
for b in pose.bones:
rb = {}
if b.custom_shape is not None:
refs.add(b.custom_shape)
if b.parent:
rb["parent"] = b.parent.name
else:
rb["parent"] = None
ret["bones"].append(rb)
bpyGenericSave(b, rb)
return ret
def saveMesh(mesh, refs):
bm = bmesh.new()
bm.from_mesh(mesh)
ret = {"TYPE" : "MESH", "VERTS" : [], "FACES" : [], "EDGES" : []}
vs, es, fs = (ret["VERTS"], ret["FACES"], ret["EDGES"])
bm.verts.index_update()
bm.edges.index_update()
bm.faces.index_update()
for v in bm.verts:
rv = {}
rv["co"] = list(v.co)
rv["normal"] = list(v.normal)
rv["select"] = v.select
rv["hide"] = v.hide
rv["index"] = v.index
vs.append(rv)
for e in bm.edges:
re = {}
re["v1"] = e.verts[0].index
re["v2"] = e.verts[1].index
re["hide"] = e.hide
re["select"] = e.select
re["index"] = e.index
es.append(re)
for f in bm.faces:
rf = {}
rf["select"] = f.select
rf["hide"] = f.hide
rf["index"] = f.index
vs = rf["verts"] = []
for v in f.verts:
vs.append(v.index)
return ret
def saveData(ob, refs):
if isinstance(ob.data, bpy.types.Armature):
return saveArmature(ob.data, refs)
if isinstance(ob.data, bpy.types.Mesh):
return saveMesh(ob.data, refs)
if isinstance(ob.data, bpy.types.TextCurve):
return saveTextCurve(ob.data, refs)
print("WARNING: can't save object data for " + ob.name)
def saveObject(ob, refs=None):
refs = set() if refs is None else refs
ret = {}
ret["TYPE"] = "OBJECT"
ret["name"] = ob.name
ret["data"] = saveData(ob, refs)
if (ob.pose):
ret["pose"] = savePose(ob.pose, refs)
ret["location"] = list(ob.location)
ret["rotation_euler"] = list(ob.rotation_euler)
ret["scale"] = list(ob.scale)
if ob.parent is not None:
ret["parent"] = ob.parent.name
refs.add(ob.parent)
else:
ret["parent"] = None
return ret
def saveObjects(obs):
obs = set(obs)
ret = []
done = set()
stop = False
while not stop:
stop = True
for ob in list(obs):
if ob in done: continue
stop = False
done.add(ob)
ret.append(saveObject(ob, obs))
return ret
def saveInternalData():
obs = [
bpy.data.objects["_autorig_FaceRigBase"],
bpy.data.objects["_autorig_MetaFaceRig"],
bpy.data.objects["_autorig_InternalFaceRig"]
]
path = os.path.split(bpy.data.filepath)[0]
path = os.path.join(path, "FacialAutoRigger")
path = os.path.join(path, "data.json")
print(path)
ret = json.dumps(saveObjects(obs))
file = open(path, "w")
file.write(ret)
file.close()
def loadInternalData():
path = os.path.split(bpy.data.filepath)[0]
path = os.path.join(path, "FacialAutoRigger")
path = os.path.join(path, "data.json")
print(path)
file = open(path, "r")
ret = file.read()
file.close()
return json.loads(ret)
```
#### File: FacialAutoRigger/FacialAutoRigger/ui.py
```python
import bpy
from bpy.props import *
class ArmaturePanel (bpy.types.Panel):
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "data"
@classmethod
def poll(cls, context):
return context.armature
def drawSwappableList(layout, context, prop):
ob = context.object
arm = context.armature
space = context.space_data
facerig = arm.facerig
col = layout.column()
row = col.row()
row.alignment = "LEFT"
op = row.operator("object.facerig_add_swappable_mesh_group", text="+")
op.path = "facerig." + prop
for i, item in enumerate(getattr(facerig, prop).groups):
#print("ITEM", item.objects, prop)
row = col.row(align=True)
row.alignment = "LEFT"
row.prop(item, "name", text="")
box = col.box()
op = row.operator("object.facerig_add_swappable_mesh", text="+")
op.path = "facerig." + prop + ".groups[%i].objects[0]" % (i)
op = row.operator("object.facerig_rem_swappable_mesh_group", text="x")
op.path = "facerig." + prop + ".groups[%i]" % (i)
col2 = box.column()
for j, item2 in enumerate(item.objects):
row2 = col2.row()
row2.alignment = "LEFT"
row2.prop(item2, "object", text="")
#row2.label(text="-")
#row2.label(text="^")
#row2.label(text="v")
op = row2.operator("object.facerig_rem_swappable_mesh", text="-")
op.path = "facerig." + prop + ".groups[%i].objects[%i]" % (i, j)
#layout.prop(item, "object"
class DATA_PT_FaceRigPanel(ArmaturePanel):
"""Creates a Panel in the scene context of the properties editor"""
bl_label = "Face Auto Rigger"
def draw(self, context):
layout = self.layout
ob = context.object
arm = context.armature
space = context.space_data
ob = context.object
arm = context.armature
space = context.space_data
facerig = arm.facerig
col = layout.column()
col.prop(arm.facerig, "meshob", text="Model");
col.prop(arm.facerig, "rigname", text="Rig Name");
col.prop(arm.facerig, "devmode", text="DevMode");
op = col.operator("object.facerig_gen_shapekey_rig");
op = col.operator("object.facerig_gen_shapekeys");
op = col.operator("object.facerig_gen_vgroups");
op = col.operator("object.facerig_update_final");
op = col.operator("object.facerig_make_shape_drivers")
op = col.operator("object.facerig_make_swap_drivers", text="Make Swap Drivers")
op.path = "facerig.teeth_models"
box = layout.box()
box.label(text="Teeth Swap Meshes")
drawSwappableList(box, context, "teeth_models")
class DATA_PT_FaceRigCtrl(ArmaturePanel):
"""Creates a Panel in the scene context of the properties editor"""
bl_label = "Face Auto Rigger"
def draw(self, context):
layout = self.layout
ob = context.object
arm = context.armature
space = context.space_data
layout.prop(arm.facerig.teeth_models, "active")
layout.prop(arm.facerig.teeth_models, "show_all")
from .utils import Registrar
bpy_exports = Registrar([
DATA_PT_FaceRigPanel,
DATA_PT_FaceRigCtrl
])
``` |
{
"source": "joeedh/Fairing.blend",
"score": 2
} |
#### File: joeedh/Fairing.blend/ops.py
```python
from . import util, shadow
from . import simplemesh
import bpy, bmesh, time, random
from bpy_extras import view3d_utils
from mathutils import *
from math import *
from bpy.props import *
import bgl
import blf
class AXES:
X = 1
Y = 2
Z = 4
colormap = [
Vector([0, 0, 1, 1]),
Vector([1, 0, 1, 1]),
Vector([0, 1, 0, 1]),
Vector([0, 1, 1, 1]),
Vector([1, 1, 0, 1]),
Vector([1, 0, 0, 1])
]
def fract(f):
return f - floor(f)
def colormap_get(f):
f = min(max(f, 0.0), 1.0)
t = fract(f*len(colormap)*0.9999999)
f = int(f*len(colormap)*0.9999999)
if f >= len(colormap)-1:
return colormap[f]
else:
a = colormap[f]
b = colormap[f+1]
return a + (b - a) * t
def handle_mirror_clipping(self, ob, bm, vcos):
axes = 0
limit = 0.0
for mod in ob.modifiers:
#print(mod.type)
if mod.type != "MIRROR" or not mod.use_clip: continue
if mod.use_axis[0]:
axes |= AXES.X
if mod.use_axis[1]:
axes |= AXES.Y
if mod.use_axis[2]:
axes |= AXES.Z
limit = max(limit, mod.merge_threshold)
for i, v in enumerate(bm.verts):
if not v.select or v.hide: continue
for j in range(3):
if not (axes & (1<<j)):
continue
d = abs(vcos[i][j])
if d <= limit:
v.co[j] = 0
def draw_callback_3dpx(self, context):
if not hasattr(self, "sm"):
print("no 3d draw data")
return
matrix = bpy.context.region_data.perspective_matrix
sm = self.sm
bgl.glDisable(bgl.GL_DEPTH_TEST)
bgl.glEnable(bgl.GL_DEPTH_TEST)
#bgl.glPolygonOffset(100000, 100000);
#bgl.glDisable(bgl.GL_BLEND)
bgl.glEnable(bgl.GL_BLEND)
sm.draw({
"uColor" : [0.7, 0.8, 1, 0.3],
"viewProjectionMatrix" : matrix,
"uPolyOffset" : 0.5
})
#bgl.glEnable(bgl.GL_BLEND)
if self.sm2 is not None:
sm2 = self.sm2
sm2.draw({
"uColor" : [1, 1, 1, 0.7],
"viewProjectionMatrix" : matrix,
"uPolyOffset" : 0.5
})
if self.sm3 is not None:
self.sm3.draw({
"uColor" : [1, 1, 1, 0.5],
"viewProjectionMatrix" : matrix,
"uPolyOffset" : 0.5
})
bgl.glDisable(bgl.GL_DEPTH_TEST)
bgl.glEnable(bgl.GL_DEPTH_TEST)
def draw_callback_px(self, context):
#print("mouse points", len(self.mouse_path))
font_id = 0 # XXX, need to find out how best to get this.
area = context.area
w = area.width
h = area.height
self.text = "yay"
# draw some text
if self.text != "":
blf.position(font_id, 15, 30, 0)
blf.size(font_id, 20, 72)
blf.draw(font_id, self.text)
sm = simplemesh.SimpleMesh()
d = 100
#sm.tri([0,0,0], [0, d, 0], [d, d, 0])
for l in self._lines:
v1 = [(l[0][0]-w*0.5)/w*2.0, (l[0][1]-h*0.5)/h*2.0, 0]
v2 = [(l[1][0]-w*0.5)/w*2.0, (l[1][1]-h*0.5)/h*2.0, 0]
v1[0] = v1[1] = 0
#print(v1, v2)
sm.line(v1, v2)
#sm.line([0, 0, 0], [d, d, 0])
sm.draw({
"uColor" : [1, 1, 1, 0.75]
})
#print(dir(bgl))
return
# 50% alpha, 2 pixel width line
bgl.glEnable(bgl.GL_BLEND)
bgl.glColor4f(0.0, 0.0, 0.0, 0.5)
bgl.glLineWidth(2)
bgl.glBegin(bgl.GL_LINE_STRIP)
for x, y in self.mouse_path:
bgl.glVertex2i(x, y)
bgl.glEnd()
# restore opengl defaults
bgl.glLineWidth(1)
bgl.glDisable(bgl.GL_BLEND)
bgl.glColor4f(0.0, 0.0, 0.0, 1.0)
class ProjectToUnSubD(bpy.types.Operator):
"""Modal object selection with a ray cast"""
bl_idname = "mesh.project_to_unsubd"
bl_label = "UnSubD Project"
bl_options = {'REGISTER', 'UNDO'}
factor: bpy.props.FloatProperty(name="factor")
sm2 = None
sm3 = None
def main(self, context):
# get the context arguments
scene = context.scene
region = context.region
rv3d = context.region_data
LT = simplemesh.LayerTypes
lf = LT.VERTEX | LT.NORMALS | LT.UVS | LT.COLORS
self.sm3 = simplemesh.SimpleMesh(shader=simplemesh.SimpleShader3D, layerflag=lf)
sm3 = self.sm3
dl = self.factor
def obj_ray_cast(obj, matrix_inv, ray_origin, ray_target):
"""Wrapper for ray casting that moves the ray into object space"""
#ray_origin_obj = matrix_inv @ ray_origin
#ray_target_obj = matrix_inv @ ray_target
ray_direction = ray_target - ray_origin
# cast the ray
success, location, normal, face_index = obj.ray_cast(ray_origin, ray_direction)
dist = (ray_origin - location).length
if success:
return location, dist, normal, face_index
else:
return None
ob = context.active_object
if ob is None or ob.type != "MESH" or ob.name.startswith("_") or ob.mode != "EDIT":
print("invalid object", ob)
return
ob2 = self._ob2
self.sm2 = simplemesh.SimpleMesh(shader=simplemesh.SimpleShader3D)
sm2 = self.sm2
bm = bmesh.from_edit_mesh(ob.data)
cos = self.cos
nos = self.nos
for i, v in enumerate(bm.verts):
v.co = cos[i]
# get the ray relative to the object
matrix_inv = ob.matrix_world.inverted()
dav = 0
dtot = 0
matrix = ob.matrix_world
for i, v in enumerate(bm.verts):
if not v.select or v.hide: continue
no = v.normal
target = v.co + no*1000
ret = obj_ray_cast(ob2, matrix_inv, v.co, target)
target = v.co - no*1000
ret2 = obj_ray_cast(ob2, matrix_inv, v.co, target)
if ret is None and ret2 is None: continue
elif ret is not None and ret2 is not None:
if ret2[1] < ret[1]:
ret = ret2
elif ret is None and ret2 is not None:
ret = ret2
no = Vector(v.normal)
no.normalize()
v.co = cos[i] + (ret[0] - cos[i]) * dl
dist = (v.co - cos[i]).length
dav += dist
dtot += 1
sm2.line(matrix @ v.co, matrix @ Vector(ret[0]))
sm2.line(matrix @ v.co, matrix @ Vector(ret[0]))
for e in bm.edges:
ok = not e.verts[0].hide and e.verts[0].select
ok = ok or (not e.verts[1].hide and e.verts[1].select)
if not ok: continue
v1, v2 = e.verts
#sm3.line(matrix @ v1.co, matrix @ v2.co)
if dtot > 1:
dav /= dtot
for i, v in enumerate(bm.verts):
if not v.select or v.hide: continue
no = Vector(nos[i])
no.normalize()
sl = -1 if dl < 0 else 1
v.co += no*sl*dav
handle_mirror_clipping(self, ob, bm, self.cos)
bmesh.update_edit_mesh(ob.data, destructive=False)
@classmethod
def poll(cls, context):
return (context.mode == 'EDIT_MESH')
def execute(self, context):
self._ob2 = shadow.getUnSubShadow(context.active_object, ctx=context)
self._lines = []
self.main(context)
self.stop()
return {'FINISHED'}
def makeDrawData(self, ob2):
me = shadow.ob_get_final_mesh(ob2)
LT = simplemesh.LayerTypes
lf = LT.VERTEX | LT.NORMALS | LT.UVS | LT.COLORS
drawbm = bmesh.new()
self.sm = simplemesh.SimpleMesh(shader=simplemesh.SimpleShader3D, layerflag=lf)
fset = set()
sm = self.sm
layer = me.polygon_layers_int["origindex"].data
for i, p in enumerate(me.polygons):
i2 = layer[i].value
if i2 == -1: #i2 in fset:
li = p.loop_start
vs = []
for j in range(p.loop_total):
vi = me.loops[li].vertex_index
v = me.vertices[vi]
vs.append(drawbm.verts.new(Vector(v.co)))
li += 1
drawbm.faces.new(vs)
matrix = ob2.matrix_world
for v in drawbm.verts:
v.co = matrix @ v.co
drawbm.normal_update()
c = [1, 1, 1, 1.0];
for f in drawbm.faces:
#c = colormap_get(0.9) #random.random()*0.15 + 0.15)
if len(f.verts) == 3:
v1, v2, v3 = f.verts
t = sm.tri(v1.co, v2.co, v3.co)
t.normals(v1.normal, v2.normal, v3.normal)
t.colors(c, c, c);
#t.uvs([0, 0], [0, 1], [1, 1])
elif len(f.verts) == 4:
v1, v2, v3, v4 = f.verts
q = sm.quad(v1.co, v2.co, v3.co, v4.co)
q.normals(v1.normal, v2.normal, v3.normal, v4.normal)
q.colors(c, c, c, c);
#q.uvs([0, 0], [0, 1], [1, 1], [1, 0])
else:
print("error; should have gotten subd surface with all quads");
ob2.to_mesh_clear()
def modal(self, context, event):
coord = event.mouse_region_x, event.mouse_region_y
dx = coord[0] - self.start_mpos[0]
dy = coord[1] - self.start_mpos[1]
#dl = sqrt(dx*dx + dy*dy) / 250
#print(dl)
self.factor = -dy / 250
self._lines = [
[self.start_mpos, coord]
]
#print(event.type, dir(event), event.value, event.oskey, event.tilt)
if event.type in {'MIDDLEMOUSE', 'WHEELUPMOUSE', 'WHEELDOWNMOUSE'}:
context.area.tag_redraw()
# allow navigation
return {'PASS_THROUGH'}
elif event.type == 'MOUSEMOVE':
context.area.tag_redraw()
self.main(context)
return {'RUNNING_MODAL'}
elif event.type == "LEFTMOUSE" or (event.type == "RET" and event.value != "RELEASE"):
self.stop()
return {'FINISHED'}
elif event.type in {'RIGHTMOUSE', 'ESC'}:
self.stop()
context.area.tag_redraw()
ob = context.active_object
#can't rely on aborting earlier if this is false (in invoke) cause of dumb crash
if ob is not None and ob.type == "MESH" and not ob.name.startswith("_"):
bm = bmesh.from_edit_mesh(ob.data)
for i, v in enumerate(bm.verts):
v.co = self.cos[i]
bmesh.update_edit_mesh(ob.data)
return {'CANCELLED'}
return {'RUNNING_MODAL'}
def stop(self):
if hasattr(self, "_handle") and self._handle is not None:
bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW')
self._handle = None
if hasattr(self, "_handle3d") and self._handle3d is not None:
bpy.types.SpaceView3D.draw_handler_remove(self._handle3d, 'WINDOW')
self._handle3d = None
self._ob2 = None
self._me = None
def invoke(self, context, event):
self._lines = []
if context.space_data.type == 'VIEW_3D':
self._ob2 = shadow.getUnSubShadow(context.active_object, ctx=context)
self.makeDrawData(self._ob2)
#print(event, dir(event))
self.start_mpos = event.mouse_region_x, event.mouse_region_y
self.mouse_path = []
args = (self, context)
self._handle = bpy.types.SpaceView3D.draw_handler_add(draw_callback_px, args, 'WINDOW', 'POST_PIXEL')
self._handle3d = bpy.types.SpaceView3D.draw_handler_add(draw_callback_3dpx, args, 'WINDOW', 'POST_VIEW')
self.text = ""
context.window_manager.modal_handler_add(self)
ob = context.active_object
if ob.mode == "EDIT":
ob.update_from_editmode()
if ob is not None and ob.type == "MESH" and not ob.name.startswith("_"):
#bpy.ops.ed.undo_push()
self.cos = [Vector(v.co) for v in ob.data.vertices]
self.nos = [Vector(v.normal) for v in ob.data.vertices]
self.wos = [0.0 for v in ob.data.vertices]
return {'RUNNING_MODAL'}
else:
return {'RUNNING_MODAL'} #XXX below line is crashing!
#return {'CANCELLED'}
else:
self.report({'WARNING'}, "Active space must be a View3d")
return {'CANCELLED'}
from . import simple_loop_optimize, solver
import imp
class FairLoopOperator(bpy.types.Operator):
"""UV Operator description"""
bl_idname = "mesh.fair_loop"
bl_label = "Fair Loop"
bl_options = {'UNDO', 'REGISTER', 'PRESET'}
factor = FloatProperty(name="factor", default=1.0)
@classmethod
def poll(cls, context):
obj = context.active_object
return obj and obj.type == 'MESH' and obj.mode == 'EDIT'
def execute(self, context):
obj = context.active_object
me = obj.data
bm = bmesh.from_edit_mesh(me)
imp.reload(solver)
imp.reload(simple_loop_optimize)
simple_loop_optimize.fairLoops(bm, obj.matrix_world, self.factor)
bmesh.update_edit_mesh(me)
return {'FINISHED'}
registrar = util.Registrar([
ProjectToUnSubD,
FairLoopOperator
]);
```
#### File: joeedh/Fairing.blend/optimize.py
```python
import bpy, bmesh, random, time, struct
from math import *
from mathutils import *
import bpy, bmesh, time, random, os, os.path
from ctypes import *
from mathutils import *
from math import *
from .sym import sym
from .bezpatch import BezPatch
from .subsurf import subdivide2, subdivide, subdivide3, catmull_clark
from . import globals
from .smatrix import getSMat, getVertDisk, getBoundFlags
from .subsurf_evaluate import *
import numpy as np
import numpy.linalg
#from .smatrix import *
def edgeloop_next(l):
l = l.link_loop_next.link_loop_radial_next.link_loop_next
return l
def edgeloop_prev(l):
l = l.link_loop_prev.link_loop_radial_prev.link_loop_prev
#l = l.link_loop_radial_prev
return l
class OptState:
def __init__(self, bm):
self.bm = bm
self.sm = None
def init(self):
bm = self.bm
sm = self.sm = SSMesh()
sm.load(bm)
def test(inob_name, outob_name, steps=3):
ob = bpy.data.objects[inob_name]
bm = bmesh.new()
bm.from_mesh(ob.data)
outbm = bmesh.new()
optstate = OptState(bm)
optstate.init()
optstate.sm.renderVal4Cages(outbm, False)
optstate.sm.renderDerivatives(outbm)
outob = bpy.data.objects[outob_name]
outbm.to_mesh(outob.data)
outob.data.update()
``` |
{
"source": "joeedh/fairmotion",
"score": 2
} |
#### File: fairmotion/pyserver/config.py
```python
import os, sys, os.path, math, random, time
import shelve, imp, struct
import mimetypes
#don't store file tree in database, serv file system directly
#serv_simple.py sets this to true
serv_local = False
#if serv_local is true, will allow access to full filesystem
#serv_simple also sets this to true
serv_all_local = False
#turn filenames into gibberish
mangle_file_paths = False
#serv_simple.py sets this to true
use_sqlite = False
try:
WITH_PY2 = True if sys.version_info.major <= 2 else False
except:
WITH_PY2 = True
_orig_str = str
def bytes_py2(s, encoding=None):
return _orig_str(s)
def safestr(s, encoding=None):
return _orig_str(s)
if WITH_PY2:
g = globals()
g["bytes"] = bytes_py2
g["str"] = safestr
unit_path = "/unit_test.html"
serv_unit_tests = False
content_path = "/content"
json_mimetype = "application/x-javascript"
#example config_local.py file parameters
#serverhost = "127.0.0.1:8081"
#serverport = 8081
#base_path = "/" #base URL path
#server_root = "/home/joeedh/dev/fairmotion/pyserver"
#doc_root = "/home/joeedh/dev/fairmotion"
#files_root = os.path.abspath(doc_root+".."+os.path.sep+"formacad_user_files"+os.path.sep)
#ipaddr = "127.0.0.1"
#db_host = "localhost"
#db_user = "root"
#db_passwd = ""
#db_db = "fairmotion"
#import local config file
import config_local
mself = sys.modules["config"].__dict__
mlocal = sys.modules["config_local"].__dict__
_is_set = set()
def is_set(k):
global _is_set
return k in _is_set
for k in mlocal:
mself[k] = mlocal[k]
_is_set.add(k)
#private globals
client_ip = ""
```
#### File: fairmotion/pyserver/fileapi_local.py
```python
from logger import elog, mlog, alog
from db_engine import mysql_connect, mysql_reconnect, get_qs, \
estr, valid_pass, SQLParamError, sql_selectall, \
sql_insertinto, do_param_error, sq, sql_update
import random, time, json, os, os.path, sys, math, types
from utils import *
from math import *
from auth import do_auth, gen_token, toktypes, rot_userid, unrot_userid
import datetime
from config import *
from db_engine import *
import db_engine
import base64
import os, os.path, sys, stat
from fileapi import file_restricted_fields, FOLDER_MIME, EMPTY_TAG, ROOT_PARENT_ID
import urllib
#stupid unicode!
def jsondumps(obj):
if type(obj) in [int, float, long]:
return str(obj);
elif type(obj) in [list, tuple, set]:
s = "["
for i, item in enumerate(obj):
if i > 0: s += ", "
s += jsondumps(item)
s += "]"
return s
elif type(obj) == dict:
s = "{"
for i, k in enumerate(obj):
if i > 0: s += ", "
s += '"' + k + '" : '
s += jsondumps(obj[k])
s += "}"
return s;
else: #XXX type(obj) == str:
return '"' + str(obj) + '"'
#else:
# raise RuntimeError("unknown object " + str(type(obj)));
WIN32 = sys.platform.startswith("win")
if not WIN32: #unix functions; need to test these!
def unixnorm(path):
#strip out '.', so ./path works
while path[0] == ".":
path = path[1:]
return path
def listdir(path):
path = unixnorm(path)
return os.listdir(path)
def exists(path):
path = unixnorm(path)
return os.path.exists(path)
def dostat(path):
path = unixnorm(path)
return os.stat(path)
def local_to_real(path):
path = unixnorm(path)
if path == "/.settings.bin":
print("APPDATA", get_appdata()) #os.environ["APPDATA"])
dir = get_appdata() + os.path.sep + ".fairmotion" #os.path.join(get_appdata(), "/.fairmotion")
if not os.path.exists(dir):
print("make dirs", dir)
os.makedirs(dir)
path = os.path.join(dir, ".settings.bin")
print("DIRPATH", dir)
print("PATH", path)
if not os.path.exists(path):
templ = config.server_root + "/default_settings_bin"
f = open(templ, "rb")
buf = f.read()
f.close()
f = open(path, "wb")
f.write(buf)
f.close()
return os.path.abspath(os.path.normpath(path))
if not serv_all_local:
path = files_root + os.path.sep + path
return os.path.abspath(os.path.normpath(path))
def real_to_local(path):
path = unixnorm(path)
if os.path.abspath(os.path.normpath(path)) == unixnorm(local_to_real("/.settings.bin")):
return "/.settings.bin"
path = os.path.abspath(os.path.normpath(path))
froot = os.path.abspath(os.path.normpath(files_root))
path = path[len(froot):].replace(os.path.sep, "/")
return path
if WIN32:
import win_util
local_to_real = win_util.local_to_real
real_to_local = win_util.real_to_local
listdir = win_util.listdir
dostat = win_util.dostat
exists = win_util.exists
get_appdata = win_util.get_appdata
else:
def get_appdata():
return os.environ["HOME"]
FOLDER_MIME = "application/vnd.google-apps.folder"
import fileapi_db
ROOT_PARENT_ID = fileapi_db.ROOT_PARENT_ID
def is_folder(file):
return file.mimeType == FOLDER_MIME or file.id == ROOT_PARENT_ID
def is_valid_file(file):
return file["realpath"] != EMPTY_TAG
try:
a = FileNotFoundError
except:
FileNotFoundError = OSError
class FileClass (dict):
def __init__(self, path, userid):
print(" FCLS PATH", path, userid)
path = os.path.normpath(path).replace(os.path.sep, "/")
diskpath = local_to_real(path)
froot = local_to_real("/")
if not os.path.exists(diskpath):
self.bad = True
return
else:
try:
nstat = dostat(diskpath)
except:
self.bad = True
return
rootid = fileapi_db.fileid_to_publicid(userid, ROOT_PARENT_ID)
if stat.S_ISDIR(nstat.st_mode):
mime = FOLDER_MIME
self.is_dir = True
else:
mime = "application/x-javascript"
self.is_dir = False
self.name = ""
self.bad = False
if not serv_all_local and not diskpath.startswith(froot):
elog("Error! " + diskpath)
print("Error!", diskpath, froot)
self.bad = True
return
self.diskpath = diskpath
self.mimeType = mime
self.id = fileid_to_publicid(path, userid)
#print("Final relative path:", path, len(froot));
oname = path
while len(oname) > 0 and oname[0] in ["\\", "/"]:
oname = oname[1:]
name = oname[oname.rfind("/")+1:].strip()
name = name.replace("/", "")
if name == "":
name = oname
self.name = name
#print("Final name:", self.name)
parentpath = path[:path.rfind("/")].strip()
if "/" not in path:
parentpath = "/"
#print("PARENT PATH", "'"+parentpath+"'", fileid_to_publicid(parentpath, userid))
if name == "/" or parentpath == "/" or parentpath == "":
self.parentid = rootid
else:
self.parentid = fileid_to_publicid(parentpath, userid)
def File(path, userid):
f = FileClass(path, userid)
if f.bad: return None
return f
#for local serving, encode file path as the id
def fileid_to_publicid(path, userid):
if ".." in path: return "-1"
path = bytes(path, "latin-1")
path = str(base64.b64encode(path), "latin-1")
return path
def publicid_to_fileid(publicid):
if len(publicid) == 17:
userid, fileid = fileapi_db.publicid_to_fileid(publicid)
if fileid == ROOT_PARENT_ID:
return "/"
if publicid == "/":
return publicid
#print(":::", publicid)
path = base64.b64decode(bytes(publicid, "latin-1"));
path = str(path, "latin-1")
if ".." in path: return "-1"
return path
class FileAPI_DirList:
basepath = "/api/files/dir/list"
def __init__(self):
pass
def do_GET(self, serv):
qs = get_qs(serv.path)
if "accessToken" not in qs or ("path" not in qs and "id" not in qs):
serv.send_error(400)
return
tok = qs["accessToken"][0]
userid = do_auth(tok)
if userid == None:
elog("Invalid access in file api")
serv.send_error(401)
return
if "id" in qs:
path = publicid_to_fileid(qs["id"][0])
else:
path = qs["path"][0]
path = urllib.unquote(path).strip();
print("PATHPATH", path);
dir = File(path, userid)
if ".." in path:
serv.send_error(401)
return
if not serv_all_local:
prefix = files_root#+rot_userid(userid)
try:
os.makedirs(prefix)
except FileExistsError:
pass
dirpath = local_to_real(path)
files = []
for f in listdir(dirpath):
path2 = path + os.path.sep + f
file = File(path2, userid)
f = {}
if file == None:
continue
print("error!", dirpath)
#if file == None: continue
f["name"] = file.name
f["id"] = file.id
f["mimeType"] = file.mimeType
f["is_dir"] = 1 if file.is_dir else 0
f["parentid"] = file.parentid
files.append(f)
body = jsondumps({"items": files})
body = bstr(body)
serv.gen_headers("GET", len(body), json_mimetype)
serv.wfile.write(body)
class FileAPI_MakeFolder:
basepath = "/api/files/dir/new"
def __init__(self):
pass
def do_GET(self, serv):
qs = get_qs(serv.path)
if "name" not in qs or "accessToken" not in qs or ("path" not in qs and "id" not in qs):
serv.send_error(400)
return
if ".." in qs["name"][0]:
serv.send_error(403)
return
tok = qs["accessToken"][0]
userid = do_auth(tok)
if userid == None:
serv.send_error(401)
return
if "id" in qs:
folderid = publicid_to_fileid(qs["id"][0])
else:
folderid = qs["path"][0]
if folderid == None:
serv.send_error(400)
return
path = local_to_real(folderid + "/" + qs["name"][0])
print("PATH", path, exists(path))
#see if folder (or a file) already exists
if exists(path):
serv.send_error(400)
return
os.makedirs(path)
body = json.dumps({"success": True})
body = bstr(body)
serv.gen_headers("GET", len(body), json_mimetype)
serv.wfile.write(body)
class FileAPI_GetMeta:
basepath = "/api/files/get/meta"
def __init__(self):
pass
def do_POST(self, serv):
buf = serv.rfile.read()
try:
obj = json.loads(buf)
except:
self.send_error(401)
return
def do_GET(self, serv):
qs = get_qs(serv.path)
if "accessToken" not in qs or ("path" not in qs and "id" not in qs):
serv.send_error(400)
return
tok = qs["accessToken"][0]
userid = do_auth(tok)
if userid == None:
serv.send_error(401)
return
if "path" in qs:
fileid = qs["path"][0]
fileid = urllib.unquote(fileid);
else:
fileid = qs["id"][0]
path = local_to_real(fileid);
if not os.path.exists(path):
serv.send_error(404);
return
st = os.stat(path)
fname = fileid.replace("\\", "/").strip()
dir = ""
if "/" in fname and fname[-1] != "/":
dir = fname[:fname.rfind("/")].strip()
fname = fname[len(dir):]
while fname[0] == "/":
fname = fname[1:]
#ROOT_PARENT_ID
mime = "unknown"
if stat.S_ISDIR(st.st_mode):
mime = FOLDER_MIME
else:
pass #deal with later
#stupid quoting
#id = urllib.quote(fileid, "").strip()
id = fileid_to_publicid(fileid, userid).strip()
#if id[0] == "'" or id[0] == "\"" and id[0] == id[-1]:
f = {
'name' : fname,
'id' : id,
'parentid' : dir,
'mimeType' : mime,
'modified' : st.st_mtime,
'is_dir' : stat.S_ISDIR(st.st_mode)
};
f2 = {}
for k in f:
if k in file_restricted_fields: continue
f2[k] = f[k]
body = json.dumps(f2)
body = bstr(body)
serv.gen_headers("GET", len(body), json_mimetype)
serv.wfile.write(body)
class UploadStatus:
def __init__(self, uploadToken=None):
self.invalid = False
if uploadToken != None:
self.from_sql(uploadToken)
def from_sql(self, utoken):
cur, con = mysql_connect()
try:
qstr = sql_selectall("uploadtokens", ["tokenid"], [utoken], [sq.token])
except SQLParamError:
do_param_error("UploadToken.from_sql")
raise SQLParamError()
#qstr = "SELECT * FROM uploadtokens WHERE tokenid="+estr(utoken)
cur.execute(qstr)
ret = cur.fetchone()
if ret == None:
self.invalid = True
return
self.token = ret["tokenid"]
self.path = ret["path"]
self.time = ret["time"]
self.name = ret["name"]
self.fileid = ret["fileid"]
self.realpath = ret["realpath"]
self.userid = ret["userid"]
self.permissions = ret["permissions"]
self.expiration = ret["expiration"]
self.size = ret["size"]
self.cur = ret["cur"]
def toJSON(self):
obj = {}
for k in this.__dict__:
val = getattr(self, k)
if type(val) in [types.MethodType, types.FunctionType]: continue
obj[k] = getattr(self, k)
return obj
def commit(self):
cur, con = mysql_connect()
dnow = datetime.datetime.now()
dend = datetime.datetime.now()+datetime.timedelta(days=1)
types = [sq.token, sq.path, sq.datetime, sq.int ]
cols = ["tokenid", "path", "time", "fileid" ]
values = [self.token, self.path, dnow, 32423423] #we don't use database fileids in local mode
types += [sq.str(100), sq.path, sq.int, sq.int ]
cols += ["name", "realpath", "userid", "permissions"]
values += [self.name, self.realpath, self.userid, 0 ]
types += [sq.datetime, sq.int, sq.int ]
cols += ["expiration", "size", "cur" ]
values += [dend, self.size, self.cur]
try:
qstr = sql_insertinto("uploadtokens", cols, values, types)
except SQLParamError:
#do_param_error(json.dumps(self));
raise SQLParamError("upload token error; see error.log for details")
print("QSTR", qstr)
cur.execute(qstr)
con.commit()
def create(self, token, path, userid, fileid, parentid=ROOT_PARENT_ID):
self.token = token
self.path = path
cs = os.path.split(path)
self.dir = cs[0];
self.time = time.time();
self.size = -1
self.cur = 0
self.file = None
self.file_init = False
self.fileid = fileid
self.userid = userid;
self.parentid = parentid; #note: not cached in database
if len(cs) == 1 or cs[1] == "" or cs[1] == None:
self.name = cs[0]
else:
self.name = cs[1]
self.gen_realpath()
def gen_realpath(self):
f = File(self.fileid, self.userid)
fpath = os.path.split(f.diskpath)[0]
if not os.path.exists(fpath):
os.makedirs(fpath)
self.realpath = f.diskpath
return f.diskpath
class FileAPI_UploadStart:
basepath = "/api/files/upload/start"
def __init__(self):
pass
def do_GET(self, serv):
elog("fileapi access" + serv.path)
qs = get_qs(serv.path)
if "accessToken" not in qs or ("path" not in qs and "id" not in qs):
serv.send_error(400)
return
tok = qs["accessToken"][0]
userid = do_auth(tok)
if userid == None:
elog("Need user id")
print("Bad auth")
serv.send_error(401)
return
path = qs["path"][0]
if "id" in qs:
fileid = publicid_to_fileid(qs["id"][0])
else:
fileid = urllib.unquote(path)
meta = File(fileid, userid)
if meta != None:
print("DISKPATH", meta.diskpath)
if meta == None or not os.path.exists(meta.diskpath):
elog("creating new file")
cs = os.path.split(path)
folderid = cs[0]
f = File(folderid, userid)
if not os.path.exists(f.diskpath):
elog("invalid folder " + f.diskpath)
print("invalid folder " + f.diskpath)
serv.send_error(401);
return
if len(cs) == 1 or cs[1] == "":
fname = cs[0]
else:
fname = cs[1]
mime = "application/octet-stream"
#create empty file
f = open(f.diskpath+"/"+fname, "w")
f.close()
meta = File(fileid, userid)
if meta == None:
elog("Invalid file id")
serv.send_error(400)
return
print("\n\nFILE", meta, "\n\n")
if is_folder(meta):
elog("target file is a folder" + meta["name"])
serv.send_error(401)
return
utoken = gen_token("U", userid);
ustatus = UploadStatus()
#ignore fileid/parentid in upload status token
ustatus.create(utoken, path, userid, fileid, -1)
try:
ustatus.commit()
except:
import traceback
elog("USTATUS.COMMIT failed!")
traceback.print_exc()
f = open(ustatus.realpath, "w");
f.close();
realpath = ustatus.realpath
body = json.dumps({"uploadToken" : utoken});
body = bstr(body)
print("\nupload start result:", body, "\n\n\n")
serv.gen_headers("GET", len(body), json_mimetype)
serv.wfile.write(body)
cur_uploads = {}
class FileAPI_UploadChunk:
basepath = "/api/files/upload"
def __init__(self):
pass
def do_PUT(self, serv):
alog("fileapi access" + serv.path)
qs = get_qs(serv.path)
if "accessToken" not in qs or "uploadToken" not in qs:
elog("fileapi: invalid tokens")
serv.send_error(400)
return
tok = qs["accessToken"][0]
utoken = qs["uploadToken"][0]
userid = do_auth(tok)
if userid == None:
elog("invalid authorization")
serv.send_error(401)
return
status = UploadStatus(utoken)
if status.invalid:
elog("invalid upload token ", utoken)
serv.send_error(401)
return
if "Content-Range" not in serv.headers:
elog("missing header " + json.dumps(serv.headers))
serv.send_error(400)
return
r = serv.headers["Content-Range"].strip()
if not r.startswith("bytes"):
elog("malformed request 1")
serv.send_error(400)
return
r = r[len("bytes"):].strip()
r = r.split("/")
if r == None or len(r) != 2:
elog("malformed request 2")
serv.send_error(400)
return
try:
max_size = int(r[1])
except ValueError:
elog("malformed request 3")
serv.send_error(400)
return
r = r[0].split("-")
if r == None or len(r) != 2:
elog("malformed request 4")
serv.send_error(400)
return
try:
r = [int(r[0]), int(r[1])]
except ValueError:
elog("malformed request 4")
serv.send_error(400)
return
if r[0] < 0 or r[1] < 0 or r[0] >= max_size or r[1] >= max_size \
or r[0] > r[1]:
elog("malformed request 5")
serv.send_error(400)
return
if status.size == -1:
status.size = max_size
buflen = r[1]-r[0]+1
if serv.rfile == None:
elog("serv.rfile was None! eek! " + str(buflen));
serv.send_error(500)
return;
buf = serv.rfile.read(buflen)
if len(buf) != buflen:
elog("malformed request 6")
serv.send_error(400)
return
if r[0] == 0:
mode = "wb"
else:
mode = "ab"
status.file = open(status.realpath, mode);
status.file.seek(r[0]);
status.file.write(buf);
status.file.flush()
status.file.close()
status.commit()
body = json.dumps({"success" : True});
body = bstr(body)
serv.gen_headers("PUT", len(body), json_mimetype)
serv.wfile.write(body)
class FileAPI_GetFile:
basepath = "/api/files/get"
def __init__(self):
pass
def do_GET(self, serv):
qs = get_qs(serv.path)
if "accessToken" not in qs or ("path" not in qs and "id" not in qs):
serv.send_error(400)
return
tok = qs["accessToken"][0]
userid = do_auth(tok)
if userid == None:
serv.send_error(401)
return
if "path" in qs:
path = qs["path"][0]
else:
path = publicid_to_fileid(qs["id"][0])
if path == None:
serv.send_error(404)
return
alog("fetching file %s" % path);
f = File(path, userid)
if f == None:
serv.send_error(400)
return
if is_folder(f):
serv.send_error(401)
return
print("diskpath:", f.diskpath)
try:
file = open(f.diskpath, "rb")
except OSError:
serv.send_error(404)
return
body = file.read()
file.close()
serv.gen_headers("GET", len(body), "application/octet-stream")
serv.send_header("Content-Disposition", "attachment; filename=\"%s\"" % f.name)
#Content-Disposition: attachment; filename=FILENAME
serv.wfile.write(body)
```
#### File: fairmotion/pyserver/logger.py
```python
from datetime import *
from time import time
import config
import os.path
_c = time()
def log(file, msg):
global _c
file.write("%s: %s\n" % (str(datetime.now()), str(msg)))
if 1: #time() - _c > 0.3:
file.flush()
_c = time()
prefix = config.doc_root+os.path.sep+"pyserver"+os.path.sep
def getfile(path):
try:
return open(path, "a")
except:
return open(path, "w")
messages = getfile(prefix+"messages.log")
errors = getfile(prefix+"errors.log")
access = getfile(prefix+"access.log")
def mlog(msg):
print(msg)
log(messages, msg)
def elog(msg):
print(msg)
log(errors, msg)
def alog(msg):
print(msg)
log(access, "%s: %s" % (config.client_ip, msg))
```
#### File: pymysql3/MySQLdb/converters.py
```python
from _mysql import string_literal, escape_sequence, escape_dict, escape, NULL
from .constants import FIELD_TYPE, FLAG
from .times import *
import datetime
import array
try:
set
except NameError:
from sets import Set as set
def Bool2Str(s, d): return str(int(s))
def Str2Set(s):
return set([ i for i in s.split(',') if i ])
def Set2Str(s, d):
return string_literal(','.join(s), d)
def Thing2Str(s, d):
"""Convert something into a string via str()."""
return str(s)
def Bytes2Str(s, d):
"""Convert a bytes object to a string using the default encoding.
This is only used as a placeholder for the real function, which
is connection-dependent."""
return s.decode()
Long2Int = Thing2Str
def Float2Str(o, d):
return '{:f}'.format(o)
def None2NULL(o, d):
"""Convert None to NULL."""
return NULL # duh
def Thing2Literal(o, d):
"""Convert something into a SQL string literal. If using
MySQL-3.23 or newer, string_literal() is a method of the
_mysql.MYSQL object, and this function will be overridden with
that method when the connection is created."""
return string_literal(o, d)
def Instance2Str(o, d):
"""
Convert an Instance to a string representation. If the __str__()
method produces acceptable output, then you don't need to add the
class to conversions; it will be handled by the default
converter. If the exact class is not found in d, it will use the
first class it can find for which o is an instance.
"""
if d.has_key(o.__class__):
return d[o.__class__](o, d)
cl = filter(lambda x,o=o:
type(x) is types.ClassType
and isinstance(o, x), d.keys())
if not cl and hasattr(types, 'ObjectType'):
cl = filter(lambda x,o=o:
type(x) is types.TypeType
and isinstance(o, x)
and d[x] is not Instance2Str,
d.keys())
if not cl:
return d[types.StringType](o,d)
d[o.__class__] = d[cl[0]]
return d[cl[0]](o, d)
def char_array(s):
return array.array('c', s)
def array2Str(o, d):
return Thing2Literal(o.tostring(), d)
conversions = {
int: Long2Int,
float: Float2Str,
type(None): None2NULL,
tuple: escape_sequence,
list: escape_sequence,
dict: escape_dict,
object: Instance2Str,
array.ArrayType: array2Str,
str: Thing2Literal, # default
bytes: Bytes2Str,
bool: Bool2Str,
datetime.date: DateTime2literal,
datetime.time: DateTime2literal,
datetime.datetime: DateTime2literal,
datetime.timedelta: DateTimeDelta2literal,
set: Set2Str,
FIELD_TYPE.TINY: int,
FIELD_TYPE.SHORT: int,
FIELD_TYPE.LONG: int,
FIELD_TYPE.FLOAT: float,
FIELD_TYPE.DOUBLE: float,
FIELD_TYPE.DECIMAL: float,
FIELD_TYPE.NEWDECIMAL: float,
FIELD_TYPE.LONGLONG: int,
FIELD_TYPE.INT24: int,
FIELD_TYPE.YEAR: int,
FIELD_TYPE.SET: Str2Set,
FIELD_TYPE.TIMESTAMP: mysql_timestamp_converter,
FIELD_TYPE.DATETIME: DateTime_or_None,
FIELD_TYPE.TIME: TimeDelta_or_None,
FIELD_TYPE.DATE: Date_or_None,
FIELD_TYPE.BLOB: [
(FLAG.BINARY, str),
],
FIELD_TYPE.STRING: [
(FLAG.BINARY, str),
],
FIELD_TYPE.VAR_STRING: [
(FLAG.BINARY, str),
],
FIELD_TYPE.VARCHAR: [
(FLAG.BINARY, str),
],
}
try:
from decimal import Decimal
conversions[FIELD_TYPE.DECIMAL] = Decimal
conversions[FIELD_TYPE.NEWDECIMAL] = Decimal
except ImportError:
pass
```
#### File: pyserver/pymysql3/setup_windows.py
```python
def get_config():
import os, sys
from setup_common import get_metadata_and_options, enabled, create_release_file
metadata, options = get_metadata_and_options()
mysql_root = options['mysql_location']
extra_objects = []
static = enabled(options, 'static')
# XXX static doesn't actually do anything on Windows
if enabled(options, 'embedded'):
client = "mysqld"
else:
client = "mysqlclient"
library_dirs = [ os.path.join(mysql_root, r'lib') ]
libraries = [ 'kernel32', 'advapi32', 'wsock32', client ]
include_dirs = [ os.path.join(mysql_root, r'include') ]
extra_compile_args = [ '/Zl' ]
name = "MySQL-python"
if enabled(options, 'embedded'):
name = name + "-embedded"
metadata['name'] = name
define_macros = [
('version_info', metadata['version_info']),
('__version__', metadata['version']),
]
create_release_file(metadata)
del metadata['version_info']
ext_options = dict(
name = "_mysql",
library_dirs = library_dirs,
libraries = libraries,
extra_compile_args = extra_compile_args,
include_dirs = include_dirs,
extra_objects = extra_objects,
define_macros = define_macros,
)
return metadata, ext_options
if __name__ == "__main__":
print ("""You shouldn't be running this directly; it is used by setup.py.""")
```
#### File: fairmotion/pyserver/serv.py
```python
import sys
if sys.version_info.major > 2:
from http import *
from http.server import *
else:
from httplib import *
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
#from SimpleHTTPServer import SimpleHTTPRequestHandler as BaseHTTPRequestHandler
from logger import elog, mlog, alog
import os, sys, os.path, math, random, time, io, gc
import shelve, imp, struct, ctypes
import mimetypes
from auth import AuthAPI_RefreshToken_WPHack, AuthAPI_OAuthStart, AuthAPI_GetUserInfo, AuthAPI_RefreshToken, AuthAPI_SessionToken
from fileapi import FileAPI_DirList, FileAPI_GetMeta, FileAPI_UploadStart, FileAPI_UploadChunk, FileAPI_GetFile
import config, json
import pymysql.err
from api import api_handlers
debug_files = [] #"triangulate.js"]
from config import doc_root, serverhost, ipaddr, serverport
def bstr(s):
if type(s) == bytes: return s
else: return bytes(str(s), "ascii")
def mime(path):
return mimetypes.guess_type(path)
log_file = open("log.txt", "w")
py_bin = sys.executable
if py_bin == "":
sys.stderr.write("Warning: could not find python binary, reverting to default\n")
py_bin = "python3.2"
def debug_file(path):
for d in debug_files:
if d in path: return True
return False
def run_build(path, do_all=False, always_build_file=False):
import subprocess
base = doc_root+os.path.sep+"js_build"+os.path.sep
db = shelve.open(os.path.abspath(base+"../../../jbuild.db".replace("/", os.path.sep)))
f = os.path.split(path)[1]
realpath = f
if not always_build_file and not do_all and f in db and os.path.exists(realpath):
stat = os.stat(realpath).st_mtime
if stat == db[f]:
db.close()
return
db.close()
cmd = [py_bin, base+"js_build.py"]
if always_build_file and not do_all:
cmd.append(os.path.split(path)[1])
elif not do_all:
cmd.append("filter")
cmd.append(os.path.split(path)[1])
cwd = doc_root+os.path.sep+"js_build"+os.path.sep
ret = subprocess.Popen(cmd, cwd=cwd, stdout=sys.stdout, stderr=subprocess.PIPE)
ret.wait()
if ret.returncode != 0:
errbuf = ""
try:
errbuf += str(ret.communicate(timeout = 0.1)[1], "latin-1");
except subprocess.TimeoutExpired:
pass
return errbuf
class ReqHandler (BaseHTTPRequestHandler):
def format_err(self, buf):
if type(buf) == bytes: buf = str(buf, "latin-1")
header = """
<!DOCTYPE html><html><head><title>Build Error</title></head>
<body><h1>Build Failure</h1><h3>
"""
footer = """
</h3>
</body>
"""
ret = ""
for b in buf:
if b == "\n": ret += "<br />"
if b == " ": ret += " "
if b == "\t": ret += "  "
ret += b
return (header + ret + footer).encode()
def set_ipaddr(self):
adr = self.client_address
if type(adr) != str and len(adr) == 2:
adr = str(adr[0]) + ":" + str(adr[1])
else:
adr = str(adr)
config.client_ip = adr
def do_GET(self):
self.set_ipaddr()
alog("GET " + self.path)
if "Connection" in self.headers:
keep_alive = self.headers["Connection"].strip().lower() == "keep-alive"
else:
keep_alive = False
wf = self.wfile
body = [b"yay, tst"]
print(self.path)
path = os.path.normpath(doc_root + self.path)
if not os.path.exists(path):
print("ble")
if self.has_handler(self.path):
self.exec_handler(self.path, "GET")
return
self.send_error(404)
return
if not self.path.startswith("/js_build/"):
print(self.path)
self.send_error(401)
return
if debug_file(path):
always = True
errbuf = run_build(path, always_build_file=always)
if "js_build" in path and path.strip().endswith(".html"):
errbuf = run_build(path, do_all=True)
else:
errbuf = None
if errbuf != None:
body = [self.format_err(errbuf)]
else:
f = open(path, "rb")
csize = 1024*1024
ret = f.read(csize)
body = [ret];
while ret not in ["", b'', None]:
ret = f.read(csize);
body.append(ret);
f.close()
if type(body) == str:
body = [bytes.decode(body, "latin-1")]
elif type(body) == bytes:
body = [body]
bodysize = 0
for chunk in body:
bodysize += len(chunk)
if path.strip().endswith(".js"):
mm = "application/javascript"
else:
mm = mime(path)[0]
self.gen_headers("GET", bodysize, mm);
b = b""
for chunk in body:
b += chunk
wf.write(b);
print(mm)
#for chunk in body:
# wf.write(chunk);
def _handle_mesh_post(self):
buf = self.rfile.read()
print(len(buf))
body = "ok"
self.gen_headers("POST", len(body), "text/text")
self.wfile.write(body)
def _handle_logger_post(self):
body = b"ok"
length = None
for k in self.headers:
if k.lower() == "content-length":
length = int(self.headers[k])
break
if length == None:
self.send_error(300)
return
buf = self.rfile.read(length)
buf = str(buf, "ascii")
log_file.write(buf + "\n")
log_file.flush()
#self.gen_headers("POST", len(body), "text/text")
#self.wfile.write(body)
#self.wfile.flush()
def has_handler(self, path):
print("bleh!!!")
for k in api_handlers:
print(k, path, "--")
if path.startswith(k): return True
return False
def exec_handler(self, path, op):
print(path, op)
handler = None
#find matching handler with largest prefix
for k in api_handlers:
if path.startswith(k):
if handler == None or len(k) > len(handler):
handler = k
if handler != None:
getattr(api_handlers[handler](), "do_"+op)(self)
def restart(self):
global restart_server
#restart_server = True
print("\nRestarting Server...\n")
self.server.shutdown()
def do_POST(self):
self.set_ipaddr()
path = self.path
alog("POST " + self.path)
if path == "/webgl_helper.webpy":
self._handle_mesh_post()
elif path == "/logger":
self._handle_logger_post()
elif self.has_handler(path):
self.exec_handler(path, "POST")
else:
self.send_error(404)
def do_PUT(self):
alog("PUT " + self.path)
self.set_ipaddr()
path = self.path
if self.has_handler(path):
self.exec_handler(path, "PUT")
else:
self.send_error(404)
def gen_headers(self, method, length, type, extra_headers={}):
#if type == "text/html":
# type = "application/xhtml"
self.wfile.write(bstr(method) + b" http/1.1\r\n")
self.send_header("Content-Type", type)
self.send_header("Content-Length", length)
if "Via" in self.headers:
uri = "http://"+serverhost+self.path
print(uri)
self.send_header("Content-Location", uri)
for k in extra_headers:
self.send_header(k, extra_headers[k])
if "Via" in self.headers:
pass
#self.send_header("Via", self.headers["Via"])
#self.send_header("Connection", "close")
#self.send_header("Host", serverhost)
self.send_header("Server-Host", serverhost)
self.end_headers()
def handle_mesh_post():
body = "ok"
def bstr(s):
return bytes(str(s), "ascii")
wf.write(body);
def send_error(self, code, obj=None):
if obj == None: obj = {}
obj["result"] = 0
obj["error"] = code
body = json.dumps(obj)
self.gen_headers("GET", len(body), "application/x-javascript")
self.wfile.write(bstr(body))
restart_server = True
while restart_server:
restart_server = False
server = HTTPServer((ipaddr, serverport), ReqHandler);
#server.socket = ssl.wrap_socket(server.socket, certfile=certpath, keyfile="privateKey.key")
server.serve_forever()
```
#### File: fairmotion/pyserver/serv_wsgi.py
```python
from logger import elog, mlog, alog
import os, sys, os.path, math, random, time, io, gc
import shelve, imp, struct, ctypes
import mimetypes
from auth import AuthAPI_RefreshToken_WPHack, AuthAPI_OAuthStart, AuthAPI_GetUserInfo, AuthAPI_RefreshToken, AuthAPI_SessionToken
from fileapi import FileAPI_DirList, FileAPI_GetMeta, FileAPI_UploadStart, FileAPI_UploadChunk, FileAPI_GetFile
import config, json
from config import *
from db_engine import mysql_close_connections
import pymysql.err
from api import api_handlers
def bstr_py3(s):
if type(s) == bytes: return s
else: return bytes(str(s), "ascii")
def bstr_py2(s):
return str(s)
import sys
if not WITH_PY2:
from io import StringIO
bstr = bstr_py3
else:
from StringIO import StringIO
bstr = bstr_py2
def Header_WSGI2HTTP(h):
h = h.replace("-", "_")
h = h.lower().split("_")
h2 = ""
ilen = len(h)
for i, s in enumerate(h):
if i != 0: h2 += "-"
h2 += s[0].upper() + s[1:]
return h2
class WSGIServerBridge:
def __init__(self, environ):
self.headers = {}
self.res_headers = {}
self.code = 200
self.codemsg = "OK"
for k in environ:
if k.startswith("HTTP_"):
k2 = Header_WSGI2HTTP(k[5:])
self.headers[k2] = environ[k]
self.method = environ["REQUEST_METHOD"]
self.path = environ["REQUEST_URI"]
self.query = environ["QUERY_STRING"]
#if self.path.startswith(base_path) and base_path != "/":
# self.path = self.path[len(base_path):]
try:
self.clen = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
self.clen = 0
self.rfile = environ["wsgi.input"]
self.wfile = StringIO()
def send_error(self, code, msg=None):
self.wfile = StringIO()
if msg == None:
msg = "Error occurred"
else:
msg = "Error occurred: " + msg
body = bstr("{error : %d, result : 0, message : \"%s\"}" % (code, msg))
self.wfile.write(body)
self.res_headers = {"Content-Length" : len(body), "Content-Type" : "application/x-javascript"}
self.code = code
self.codemsg = "ERR"
def _finish(self):
headers = []
for k in self.res_headers:
headers.append((k, bstr(self.res_headers[k])))
self.wfile.seek(0)
body = self.wfile.read()
return [bstr(str(self.code) + " " + self.codemsg), body, headers]
def send_header(self, header, value):
self.res_headers[header] = value
def gen_headers(self, method, length, type, extra_headers={}):
self.send_header("Content-Type", type)
self.send_header("Content-Length", length)
if "Via" in self.res_headers:
uri = "http://"+serverhost+self.path
print(uri)
self.send_header("Content-Location", uri)
for k in extra_headers:
self.send_header(k, extra_headers[k])
if "Via" in self.res_headers:
pass
#self.send_header("Via", self.res_headers["Via"])
self.send_header("Server-Host", serverhost)
def has_handler(self, path):
for k in api_handlers:
if path.startswith(k): return True
return False
def exec_handler(self, path, op):
handler = None
#find matching handler with largest prefix
for k in api_handlers:
if path.startswith(k):
if handler == None or len(k) > len(handler):
handler = k
if handler != None:
getattr(api_handlers[handler](), "do_"+op)(self)
def do_request(self):
if self.has_handler(self.path):
self.exec_handler(self.path, self.method)
elif self.path == "/video.mp4":
file = open((doc_root+"/build/video.mp4").replace("/", os.path.sep), "rb");
body = file.read()
file.close()
self.wfile.write(body)
self.gen_headers(self.method, len(body), "video/mp4")
elif self.path == unit_path:
file = open((doc_root+"/build/unit_test.html").replace("/", os.path.sep), "rb")
body = file.read()
file.close()
self.wfile.write(body)
self.gen_headers(self.method, len(body), "text/html")
elif self.path == base_path:
file = open((doc_root+"/src/html/main.html").replace("/", os.path.sep), "rb")
body = file.read()
file.close()
self.wfile.write(body)
self.gen_headers(self.method, len(body), "text/html")
else:
self.send_error(404, self.path)
def application(environ, start_response):
bridge = WSGIServerBridge(environ)
bridge.do_request()
status, output, headers = bridge._finish()
"""
output = ""
for k in environ:
output += "%s : \"%s\"\n" % (k, environ[k])
response_headers = [('Content-type', 'text/plain'),
('Content-Length', str(len(output)))]
"""
start_response(status, headers)
mysql_close_connections()
return [output]
```
#### File: fairmotion/pyserver/sqlite_db.py
```python
import sqlite3, datetime
class DateTimeParseError (RuntimeError):
pass
#stupid sqlite
datetime_fields = {
"last_login", "expiration", "expiration", "time"
}
def sql_close_connections():
return;
def parse_dt(str):
i = [0]
def expect(s):
if not str[i[0]:].startswith(s):
raise DateTimeParseError("bad1 " + str[i[0]:i[0]+4])
i[0] += len(s)
return s
def get(n):
if i[0]+n > len(str):
raise DateTimeParseError("bad2 " + str[i[0]:i[0]+n])
ret = str[i[0]:i[0]+n]
i[0] += n
try:
ret = int(ret)
except:
raise DateTimeParseError("bad3 " + str[i[0]:i[0]+n])
return ret
year = get(4)
expect("-")
month = get(2)
expect("-")
day = get(2)
if str[i[0]] == " " or str[i[0]] == "\t":
i[0] += 1
hour = get(2)
expect(":")
minute = get(2)
expect(":")
second = str[i[0]:]
try:
second = float(second)
except:
raise DateTimeParseError("bad4 " + str[i[0]:i[0]+n])
else:
hour = 0
minute = 0
second = 0
second = int(second+0.5)
return datetime.datetime(year, month, day, hour, minute, second)
def parse_datetime(s):
try:
return parse_dt(s)
except DateTimeParseError:
print("Parse error!", s)
return None
sqlite3.register_converter("datetime", parse_datetime)
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
if col[0] in datetime_fields and type(d[col[0]]) == str:
d[col[0]] = parse_datetime(d[col[0]])
return d
DBError = sqlite3.OperationalError
gcon = None
gcur = None
class CurProxy:
def __init__(self, cur):
self.cur = cur
def execute(self, str):
return self.cur.execute(str)
def fetchone(self):
ret = self.cur.fetchone()
if ret == None: return None
return ret
def sql_connect():
global gcon, gcur
if gcon == None:
gcon = sqlite3.connect("database.db")
gcon.row_factory = dict_factory
gcur = gcon.cursor()
return gcur, gcon
def sql_reconnect():
return sql_connect()
def init_sql():
pass
def default_db():
cur, con = sql_connect()
f = open("fairmotion.sql", "r")
buf = f.read()
f.close()
statements = [""]
s = ""
for l in buf.split("\n"):
if l.strip().startswith("--") or l.strip().startswith("/*") \
or l.strip().startswith("//"):
continue;
if "ENGINE" in l:
l = ");"
if l.strip() == "": continue
if l.startswith("SET"): continue
s += l + "\n"
for l in s.split("\n"):
if l.strip() == "": continue
if len(l) > 2 and l[:3] == l[:3].upper() and l[0] not in ["\t", " ", "\n", "\r", "("]:
statements.append("")
if l.strip().startswith("PRIMARY KEY"): continue
if l.strip().startswith("KEY"): continue
#l = l.replace("AUTO_INCREMENT", "")
statements[-1] += l + "\n"
for s in statements:
# buf = s.replace("IF NOT EXISTS ", "")
print("===executing====")
print(s)
con.execute(s)
con.commit()
pass
def get_last_rowid(cur):
return cur.lastrowid
```
#### File: fairmotion/scripts/note_win32_manual.py
```python
import sys
import traceback
def safeFilter_intern(msg):
#get rid of terminal codes
s = ""
i = 0
def consume(c, i):
if i < len(msg) and msg[i] == c:
return i+1
return i
def consume_num(i):
i2 = i
while 1:
i2 = consume("0", i2)
i2 = consume("1", i2)
i2 = consume("2", i2)
i2 = consume("3", i2)
i2 = consume("4", i2)
i2 = consume("5", i2)
i2 = consume("6", i2)
i2 = consume("7", i2)
i2 = consume("8", i2)
i2 = consume("9", i2)
if i2 == i or i2 >= len(msg):
i = i2
break
i = i2
return i
while i < len(msg):
c = msg[i]
if ord(c) == 27:
i += 1
i = consume("[", i)
i = consume_num(i)
if i < len(msg) and msg[i] == ";":
i = consume_num(i+1)
i = consume(";", i)
i = consume_num(i)
i = consume("m", i)
if i < len(msg):
s += msg[i]
i += 1
return s
def safeFilter(s):
try:
return safeFilter_intern(s)
except:
try:
traceback.print_exc()
except:
print("failed to fetch backtrace")
sys.exit(-1)
import ctypes
import ctypes.wintypes
import traceback
import sys
if sys.version[0] != '2':
import queue #for thread-safe queue
else:
import Queue as queue
import threading
import time
from win32gui import *
from win32con import *
from win32api import *
from win32gui_struct import *
from win32ui import *
from .note_base import NoteBase
import threading
def tst(a, b, c, d):
return 0;
DEBUG = 0
UPDATE_TEXT = 0
SETHIDE = 1
KILLNOTE = 2
_BREAK_THREAD = 3
#InitCommonControlsEx(param)
InitRichEdit()
width = 200
height = 150
cls_idgen = 0
class SimpleNotifier (NoteBase):
def __init__(self, note_id, totlines=3):
super().__init__(note_id)
self._block_commands = False
self._has_spawned = False
self.hwnd = None
self.twnd = None
self.lines = []
self.totlines = totlines
self.hidden = False
self.buf = ""
self._threadref = None
self.queue = queue.Queue()
def push_line(self, s):
self.lines.append(s)
if len(self.lines) > self.totlines:
self.lines.pop(0)
self.buf = "\n".join(self.lines)
self._pushcmd(UPDATE_TEXT, self.buf[:])
def showNote(self, title, msg):
msg = safeFilter(msg)
title = safeFilter(title)
self.title = title
if self.hwnd is None and not self._has_spawned:
self._has_spawned = True
self.spawnWindow(title, msg)
self._pushcmd(SETHIDE, 0)
self.push_line(msg)
return
self._pushcmd(SETHIDE, 0)
self.push_line(msg)
def makeTextbox(self):
#self.tbox = CreateRichEditCtrl()
#rect = (10, 10, 380, 180)
#self.tbox.CreateWindow(ES_MULTILINE | WS_VISIBLE | WS_CHILD | WS_BORDER | WS_TABSTOP, rect, self.hwnd, 0)
self.twnd = CreateWindowEx(0, "Edit", "", ES_MULTILINE | WS_VISIBLE | WS_CHILD | WS_BORDER | WS_TABSTOP,
10, 10, width-30, height-60, self.hwnd, None, self.hinst, None)
def RegisterClassCtypes(self, cls):
c_uint = ctypes.c_uint;
c_int = ctypes.c_int
c_char_p = ctypes.c_char_p;
c_void_p = ctypes.c_void_p
POINTER = ctypes.POINTER;
pointer = ctypes.pointer
byref = ctypes.byref
c_int_p = POINTER(c_int)
c_wchar_p = ctypes.c_wchar_p
wnd = cls.lpfnWndProc
LRESULT = c_void_p #ctypes.wintypes.LPHANDLE
LPARAM = c_void_p
HANDLE = ctypes.wintypes.HANDLE
DefWindowProcW = ctypes.windll.user32.DefWindowProcW
DefWindowProcW.restype = LRESULT
def DefWindowProc(hwnd, msg, wparam, lparam):
return DefWindowProcW(hwnd, c_uint(msg), LPARAM(wparam), LPARAM(lparam))
self.DefWindowProc = DefWindowProc
self._wndproc = wndproc = cls.lpfnWndProc
def callback(hwnd, msg, wparam, lparam):
ret = wndproc(hwnd, msg, wparam, lparam)
return 0 if ret is None else ret
self._callback_ref = callback
MYFUNCTYPE = ctypes.WINFUNCTYPE(LRESULT, HANDLE, c_uint, LPARAM, LPARAM)
class WNDCLASS (ctypes.Structure):
_fields_ = [
("style", c_uint),
("lpfnWndProc", MYFUNCTYPE),
("cbClsExtra", c_int),
("cbWndExtra", c_int),
("hInstance", HANDLE),
("hIcon", HANDLE),
("hCursor", HANDLE),
("hbrBackground", HANDLE),
("lpszMenuName", c_wchar_p),
("lpszClassName", c_wchar_p)
]
wnd = WNDCLASS()
self.wnd = wnd
wnd.style = cls.style;
wnd.lpfnWndProc = MYFUNCTYPE(callback)
wnd.hInstance = cls.hInstance;
#wnd.lpszMenuName = ctypes.create_unicode_buffer("")
wnd.lpszClassName = cls.lpszClassName#ctypes.create_unicode_buffer(cls.lpszClassName)
#str(cls.lpszClassName, "latin-1"))
#wnd.hIcon = cls.hIcon
#print(cls.cbWndExtra)
wnd.hbrBackground = 7 #COLOR_WINDOW+1
#wnd.hbrBackground = cls.hbrBackground; #COLOR_BACKGROUND+1
ret = ctypes.windll.user32.RegisterClassW(pointer(wnd))
if not ret:
raise RuntimeError("failed to create window: " + str(GetLastError()))
pass
return ret
#self.wc.style |= CS_GLOBALCLASS;
#self.wc.lpszClassName = str("PythonTaskbar" + str(id)) # must be a string
#self.wc.lpfnWndProc = wndproc # could also specify a wndproc.
def _pushcmd(self, cmd, args):
self.queue.put([cmd, args])
def spawnWindow_intern(self, title, msg):
message_map = {WM_DESTROY: self.on_destroy}
map = {}
import win32con
for k in dir(win32con):
if k.startswith("WM_"):
map[getattr(win32con, k)] = k
def wndproc(hwnd, msg, wparam, lparam):
DefWindowProc = self.DefWindowProc
if msg == WM_PAINT:
ps = BeginPaint(hwnd)
#UpdateWindow(self.twnd)
EndPaint(hwnd, ps[1])
return 0
elif msg == WM_DESTROY:
print(" Got WM_DESTROY!")
PostQuitMessage(0)
return 0
elif msg == WM_QUIT:
self._pushcmd(_BREAK_THREAD, 0)
return 0
# PostQuitMessage(0)
# self.hwnd = None
# return 0
#if msg == WM_QUIT:
# print("got quit message")
# self.hwnd = None
#elif msg == WM_GETICON:
# return self.icon
ret = DefWindowProc(hwnd, msg, wparam, lparam)
if msg in map:
smsg = map[msg]
else:
smsg = "UNKNOWN" + ":" + str(msg)
if DEBUG: print(ret, smsg, wparam, lparam, GetLastError())
return ret
ctypes.windll.user32.DisableProcessWindowsGhosting()
# Register the window class.
self.wc = WNDCLASS()
global cls_idgen
id = cls_idgen
cls_idgen += 1
self.icon = self.wc.hIcon = LoadIcon(None, IDI_QUESTION);
self.hinst = self.wc.hInstance = GetModuleHandle(None)
self.wc.style |= CS_GLOBALCLASS;
self.wc.lpszClassName = str("PythonTaskbar" + str(id)) # must be a string
self.wc.lpfnWndProc = wndproc # could also specify a wndproc.
self.className = self.wc.lpszClassName
self.classAtom = self.RegisterClassCtypes(self.wc)
if DEBUG: print("Error:", GetLastError())
#print(self.classAtom)
style = WS_VISIBLE | WS_OVERLAPPED | WS_POPUP | WS_VISIBLE | WS_CAPTION
#exstyle = WS_EX_NOACTIVATE #move to CreateWindowEx and put this in extended style
self.hwnd = CreateWindow(self.classAtom, self.title, style,
1335, 0, width, height, None, None, self.hinst, None)
SetWindowPos(self.hwnd, HWND_TOPMOST, 0,0,0,0,
SWP_NOMOVE | SWP_NOSIZE)
self.makeTextbox()
ShowWindow(self.hwnd, SW_SHOW)
UpdateWindow(self.hwnd);
#add timer to nudge message queue every once in a while
ctypes.windll.user32.SetTimer(self.hwnd, 1, 250, None)
#buf = "\n".join(self.lines)
#self.set_lines(buf)
def spawnWindow(self, title, msg):
if self.hwnd is not None:
sys.stderror.write("ERROR ERROR! spawnWindow called twice!!\n");
self.title = title
self.msg = msg
def threadloop():
print("Starting notification thread")
self.spawnWindow_intern(self.title, self.msg)
t = time.time()
while 1:
#if time.time() - t > 3: break #XXX
try:
cmd = self.queue.get(0) #don't wait
except queue.Empty:
cmd = None
if cmd is not None and cmd[0] == _BREAK_THREAD:
print("Got break signal in thread")
break
elif cmd is not None:
self._docommand(cmd)
if self.hwnd is None:
break;
msg = ctypes.wintypes.MSG()
byref = ctypes.byref
POINTER = ctypes.POINTER
#QS_ALLPOSTMESSAGE
#ctypes.windll.user32.MsgWaitForMultipleObjects();
handles = ctypes.wintypes.HANDLE
#retw = ctypes.windll.user32.MsgWaitForMultipleObjectsEx(
# 0, #// no handles
# 0, #// no handles
# 5, # 55,
# QS_ALLINPUT,
# MWMO_ALERTABLE)
#SleepEx(1, 1);
#if (self.hidden):
# continue;
#retw = ctypes.windll.user32.WaitMessage()
#SleepEx(1, True)
Sleep(1);
ret = ctypes.windll.user32.PeekMessageW(byref(msg), 0, 0);
if ret == 0: #no messages available
continue
retm = ctypes.windll.user32.GetMessageW(byref(msg), None, 0, 0);
if retm:
if DEBUG: print("got message!!!! -----------------------------");
retd = ctypes.windll.user32.DispatchMessageW(byref(msg));
if DEBUG: print(retd)
else: #if GetLastError() == 0:
if DEBUG: print("quit time", GetLastError())
#PostQuitMessage(0)
break
#TranslateMessage(ret);
#print("RET", ret)
#PumpWaitingMessages(self.hwnd);
if DEBUG: print("Exit notification thread")
self._threadref = None
thread = threading.Thread(target=threadloop)
thread.start()
self._threadref = thread
def set_lines(self, txt):
txt = safeFilter(txt)
if not "\r" in txt:
txt = txt.replace("\n", "\r\n")
#if txt.strip() == "":
# return
#print("TEXT", repr(txt), txt == "Test")
if self.hwnd is None:
print("hwnd was none in set_lines")
return
if DEBUG: print(self.twnd)
SetWindowText(self.twnd, txt);
#UpdateWindow(self.twnd)
UpdateWindow(self.hwnd)
def handleUpdates(self):
pass #using threads now
def appendNote(self, msg):
msg = safeFilter(msg)
self.push_line(msg)
def clearNote(self):
self.lines = [];
def hideNote(self):
self._pushcmd(SETHIDE, 1)
def _docommand(self, cmd):
if DEBUG: print(" Got command!")
if self.hwnd is None:
print("got command after hwnd died")
return
cmd, args = cmd
if cmd == SETHIDE:
self.hidden = args
ShowWindow(self.hwnd, SW_HIDE if args else SW_SHOW);
UpdateWindow(self.hwnd)
elif cmd == UPDATE_TEXT:
self.buf = args
self.set_lines(self.buf)
elif cmd == KILLNOTE and self.hwnd is not None and self._threadref is not None:
hwnd = self.hwnd
self.hwnd = None
if DEBUG: print("DESTROYING WINDOW================", hwnd)
if not DestroyWindow(hwnd):
sys.stderr.write("Error closing window " + str(hwnd) + "\n");
sys.stderr.write(" lasterrer: " + str(GetLastError()) + "\n");
self.hwnd = None
else:
self.hwnd = None
if DEBUG: print("Closing window class")
UnregisterClass(self.className, self.hinst)
def killNote(self):
self._pushcmd(_BREAK_THREAD, 0)
self._pushcmd(KILLNOTE, 0)
print("waiting for note thread to disappear. . .")
while self._threadref is not None:
time.sleep(0.001)
pass
print("done waiting")
def on_destroy(self):
self._pushcmd(KILLNOTE, 0)
if __name__ == "__main__":
pass
#print("Start");
#n = SimpleNotifier(1)
#print(n.note_id)
"""
for si in range(1):
n = SimpleNotifier();
n.showNote("Build System", "Test")
time.sleep(1)
for i in range(3):
n.appendNote("Bleh!" + str(i))
time.sleep(0.5)
time.sleep(0.5)
n.hideNote()
time.sleep(0.5)
print("yay");
#"""
```
#### File: src/util/bicubic_patch.py
```python
import bpy
from mathutils import *
from math import *
import bmesh
import time, random, sys, os, io, imp
def facto(n):
prod = 1
for i in range(1, n+1):
prod *= i
return prod
class Vector1 (object):
def __init__(self, vec=None):
self.vec = [0.0, 0.0, 0.0]
if vec != None:
for i in range(min(3, len(vec))):
self[i] = vec[i]
def __getitem__(self, i):
return self.vec[i]
def __setitem__(self, i, v):
self.vec[i] = v
def __len__(self):
return len(self.vec)
def copy(self):
return Vector(self)
def __mul__(self, b):
r = Vector(self)
if type(b) in [int, float]:
r[0] *= b; r[1] *= b; r[2] *= b
else:
r[0] *= b[0]; r[1] *= b[1]; r[2] *= b[2]
return r
def __div__(self, b):
r = Vector(self)
if type(b) in [int, float]:
r[0] /= b; r[1] /= b; r[2] /= b
else:
r[0] /= b[0]; r[1] /= b[1]; r[2] /= b[2]
return r
def __sub__(self, b):
r = Vector(self)
if type(b) in [int, float]:
r[0] -= b; r[1] -= b; r[2] -= b
else:
r[0] -= b[0]; r[1] -= b[1]; r[2] -= b[2]
return r
def __str__(self):
return str(list(self))
def __str__(self):
return repr(list(self))
def __add__(self, b):
r = Vector(self)
if type(b) in [int, float]:
r[0] += b; r[1] += b; r[2] += b
else:
r[0] += b[0]; r[1] += b[1]; r[2] += b[2]
return r
def dot(self, b):
return r[0]*b[0] + r[1]*b[1] + r[2]*b[2]
def length(self):
return sqrt(self.dot())
class Patch:
def __init__(self, degx, degy):
self.size = size = [degx+1, degy+1]
self.points = [[Vector() for i1 in range(size[1])] for i2 in range(size[0])]
self.degree = [degx, degy]
#self.coeff = [[1.0 for i1 in range(size[1])] for i2 in range(size[0])]
def eval(self, u, v):
dx, dy = self.size
n, m = self.degree
"""
max_c = 0.0
for i in range(n+1):
for j in range(m+1):
c = self.coeff[i][j]
max_c = max(max_c, c)
for i in range(n+1):
for j in range(m+1):
self.coeff[i][j] /= max_c
#"""
u2 = u; v2 = v
k = self.points
p = Vector()
for i in range(n+1):
for j in range(m+1):
bi = facto(n)/(facto(i)*facto(n-i))
bi *= u**i*(1-u)**(n-i)
bj = facto(m)/(facto(j)*facto(m-j))
bj *= v**j*(1-v)**(m-j)
p += k[i][j]*bi*bj
return p
def tess_patch(bm, patch, steps):
df = 1.0 / (steps-1)
verts = [[0 for x in range(steps)] for y in range(steps)]
for i in range(steps):
for j in range(steps):
p = patch.eval(df*i, df*j)
v = bm.verts.new(p)
verts[i][j] = v
for i in range(steps-1):
for j in range(steps-1):
vs = [verts[i][j], verts[i+1][j], verts[i+1][j+1], verts[i][j+1]]
f = bm.faces.new(vs)
def ensure_edge(bm, v1, v2):
e = bm.edges.get([v1, v2])
if e == None:
e = bm.edges.new([v1, v2])
return e
def out_patch(bm, patch):
verts = [[0 for x in range(4)] for y in range(4)]
for i in range(4):
for j in range(4):
p = patch.points[i][j]
v = bm.verts.new(p)
verts[i][j] = v
for i in range(3):
for j in range(3):
vs = [verts[i][j], verts[i+1][j], verts[i+1][j+1], verts[i][j+1]]
#f = bm.faces.new(vs)
ensure_edge(bm, vs[0], vs[1])
ensure_edge(bm, vs[1], vs[2])
ensure_edge(bm, vs[2], vs[3])
ensure_edge(bm, vs[3], vs[0])
def norm(m):
sum = 0
for i in range(len(m)):
sum += m[i]
for i in range(len(m)):
m[i] /= sum
def range2(a, b):
if a <= b:
return range(a, b+1)
else:
return range(b, a+1)
#we're assuming consistent face windings
def get_ring(v, f):
lst = []
l = None
for l2 in v.link_loops:
if l2.face == f:
l = l2
break
l = l.link_loop_prev.link_loop_radial_next
startl = l
lset = set()
while 1:
lst.append(l.link_loop_next.vert)
lst.append(l.link_loop_next.link_loop_next.vert)
l = l.link_loop_prev.link_loop_radial_next
if l == startl:
break
if l in lset:
break
lset.add(l)
return lst
def lerp(a, b, t):
return a + (b-a)*t
def match_quad(f):
ma = [[1, 1],
[1, 1]]
mb = [[2, 1],
[8, 4],
[2, 1]]
mc = [[1, 4, 1],
[4, 16, 4],
[1, 4, 1]]
ptch = Patch(3, 3)
ls = list(f.loops)
v1, v2, v3, v4 = [l.vert for l in ls]
ps = ptch.points
mc = [4, 1, 4, 1, 4, 1, 4, 1, 16]
print(mc)
norm(mc)
mc11 = [4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5**2]
mc5 = [4, 1, 4, 1, 2**2]
norm(mc11)
norm(mc5)
def corner(x, y, i):
ring = get_ring(ls[i].vert, f) + [ls[i].vert]
ps[x][y] = Vector()
print("lr", len(ring), len(list(ls[i].vert.link_edges)))
mc = [4 if x%2==0 else 1 for x in range(len(ring)-1)]
mc.append(len(list(ls[i].vert.link_edges))**2)
norm(mc)
for i, v in enumerate(ring):
if i >= len(mc): break
ps[x][y] += v.co*mc[i]
corner(0, 0, 0)
corner(0, 3, 1)
corner(3, 3, 2)
corner(3, 0, 3)
def get_e_ring(v1, v2, f):
l1 = l2 = None
r = []
for l in v1.link_loops:
if l.face == f:
l1 = l
break
for l in v2.link_loops:
if l.face == f:
l2 = l
break
#corner1 adj1 adj2 corner2
if (l1.link_loop_next.vert == v2):
r.append(l1.link_loop_radial_next.link_loop_next.link_loop_next.vert)
r.append(l1.link_loop_prev.vert)
r.append(l1.link_loop_radial_next.link_loop_prev.vert)
r.append(l1.link_loop_next.link_loop_next.vert) #link_loop_radial_next.link_loop_prev.link_loop_radial_next.link_loop_next.link_loop_next.vert)
else:
r.append(l2.link_loop_radial_next.link_loop_prev.vert)
r.append(l2.link_loop_prev.link_loop_prev.vert)
r.append(l2.link_loop_radial_next.link_loop_next.link_loop_next.vert)
r.append(l2.link_loop_prev.vert)
return r + [v1, v2]
def edge(x1, y1, x2, y2, v1, v2):
r = get_e_ring(v1, v2, f)
print(len(r))
if len(r) != 6: return
#r1[5] = v1
#r2[5] = v2
v11 = Vector()
v22 = Vector()
me1 = [2, 2, 1, 1, 8, 4]
me2 = [1, 1, 2, 2, 4, 8]
me1[-2] = 2*len(list(v1.link_edges))
me2[-1] = 2*len(list(v2.link_edges))
norm(me1)
norm(me2)
for i in range(len(me1)):
v11 += r[i].co*me1[i]
for i in range(len(me2)):
v22 += r[i].co*me2[i]
ps[x1][y1] = v11
ps[x2][y2] = v22
def rot(m, end=0):
m2 = []
for i1 in range(len(m)-end):
m2.append(m[(i1+1)%(len(m)-end)])
for i1 in range(len(m)-end, len(m)):
m2.append(m[i1])
m[:] = m2
def me_rot(m):
m1 = m[:8]
m2 = m[8:16]
rot(m1)
rot(m2)
m2 = m1 + m2 + [m[-2] + m[-1]]
m[:] = m2
#"""
edge(0, 1, 0, 2, v1, v2)
edge(1, 3, 2, 3, v2, v3)
edge(3, 1, 3, 2, v4, v3)
edge(1, 0, 2, 0, v1, v4)
#"""
def interior(x, y, v):
r = get_ring(v, f)
r[3] = v
if v == ls[0].vert:
r = [ls[0].vert, ls[1].vert, ls[2].vert, ls[3].vert]
elif v == ls[1].vert:
r = [ls[1].vert, ls[2].vert, ls[3].vert, ls[0].vert]
elif v == ls[2].vert:
r = [ls[2].vert, ls[3].vert, ls[0].vert, ls[1].vert]
elif v == ls[3].vert:
r = [ls[3].vert, ls[0].vert, ls[1].vert, ls[2].vert]
r.remove(v)
r.append(v)
mi = [2, 1, 2, len(list(v.link_edges))]
norm(mi)
ps[x][y] = Vector()
for i in range(4):
ps[x][y] += r[i].co*mi[i]
interior(1, 1, v1)
interior(1, 2, v2)
interior(2, 2, v3)
interior(2, 1, v4)
"""
for i, c in enumerate(cs):
ring = get_ring(ls[i].vert, f)
ring.append(ls[i].vert)
x, y = c
ps[x][y] = Vector()
for j, vn in enumerate(ring):
ps[x][y] += vn.co*m[j]
#"""
return ptch
def v_in_e(e, v):
return v == e.verts[0] or v == e.verts[1]
def main():
ob = bpy.data.objects["Cube"]
m = ob.data
bm = bmesh.new()
inbm = bmesh.new()
inob = bpy.data.objects["Cube.001"]
if (inob.mode == "EDIT"):
inbm = bmesh.from_edit_mesh(inob.data).copy()
else:
inbm.from_mesh(inob.data)
inbm.faces.index_update()
inbm.verts.index_update()
def do_boundary_edges():
edata = []
vmap = {}
def new_vert(v):
if len(list(v.link_edges)) == 2:
return inbm.verts.new(v.co)
if v.index in vmap:
return vmap[v.index]
else:
vmap[v.index] = inbm.verts.new(v.co)
return vmap[v.index]
for e in list(inbm.edges):
if len(list(e.link_faces)) != 1: continue
v1 = new_vert(e.verts[0])
v2 = new_vert(e.verts[1])
do_crn1 = do_crn2 = False
do_crn1 = len(list(e.verts[0].link_edges)) == 2
do_crn2 = len(list(e.verts[1].link_edges)) == 2
edata.append([e, v1, v2, do_crn1, do_crn2])
for ed in edata:
e, v1, v2, co_crn1, do_crn2 = ed
l = list(e.link_loops)[0]
if l.vert == e.verts[1]:
f = inbm.faces.new([e.verts[0], e.verts[1], v2, v1])
else:
f = inbm.faces.new([e.verts[1], e.verts[0], v1, v2])
f.index = -1
if do_crn1:
v3 = None
for e2 in e.verts[0].link_edges:
if e.index != e2.index and e2.index != -1 and e2.index < len(edata):
if e.verts[0] == e2.verts[0]:
v3 = edata[e2.index][1]
else:
v3 = edata[e2.index][2]
if v3 != None:
vs = [e.verts[1], v1, inbm.verts.new(v1.co), v3]
print(vs)
try:
f = inbm.faces.new(vs)
f.index = -1
except:
pass
if do_crn2:
v3 = None
for e2 in e.verts[1].link_edges:
if e.index != e2.index and e2.index != -1 and e2.index < len(edata):
print(e2.index, len(edata))
if e.verts[1] == e2.verts[0]:
v3 = edata[e2.index][1]
else:
v3 = edata[e2.index][2]
if v3 != None:
vs = [e.verts[1], v2, inbm.verts.new(v2.co), v3]
print(vs)
f = inbm.faces.new(vs)
f.index = -1
do_boundary_edges()
print("\n\n")
steps = 3
for f in list(inbm.faces):
if f.index >= 0: #== 9:
qpatch = match_quad(f)
tess_patch(bm, qpatch, steps)
#out_patch(bm, qpatch)
bm.to_mesh(m)
m.update()
def scene_update_post(scene):
main()
scene_update_post.bicubic_tag = 1
#"""
for h in list(bpy.app.handlers.scene_update_pre):
if hasattr(h, "bicubic_tag"):
bpy.app.handlers.scene_update_pre.remove(h)
#"""
main()
#bpy.app.handlers.scene_update_pre.append(scene_update_post)
```
#### File: tools/extjs_cc/js_let.py
```python
from js_cc import js_parse
from js_ast import *
from js_process_ast import *
from js_global import glob
from js_typespace import *
_s_rec_idgen = 1
class ScopeRecord:
def __init__(self, type, name):
global _s_rec_idgen
self.type = type
self.name = name
self.func_nest = 0; #how deep into nested functions are we
self.nest = 0;
self.uuid = "$_"+type+"_" + name + str(_s_rec_idgen);
_s_rec_idgen += 1
def __repr__(self):
return str(self)
def __str__(self):
s = "ScopeRecord(name="+str(self.name)
s += ", type=" + str(self.type) + ", nest=" + str(self.nest)
s += ", uuid=" + str(self.uuid) + ", func_nest="
s += str(self.func_nest)+")"
return s
def copy(self):
ret = ScopeRecord(self.type, self.name)
ret.uuid = self.uuid;
ret.nest = self.nest;
ret.func_nest = self.func_nest;
return ret
class Scope(dict):
pass
def copy_scope(scope):
scope2 = Scope(scope)
scope2.parent = scope
if hasattr(scope, "func_parent"):
scope2.func_parent = scope.func_parent
scope = scope2
for k in scope:
if type(scope[k]) == ScopeRecord:
scope[k] = scope[k].copy()
scope[k].nest += 1
return scope
class LetVisitor (NodeVisit):
def __init__(self, typespace):
NodeVisit.__init__(self)
self.typespace = typespace
self.required_nodes = set()
def FunctionNode(self, node, scope, traverse, tlevel):
scope2 = copy_scope(scope)
scope2.func_parent = scope
scope = scope2
for k in scope:
if scope[k].type == "let":
scope[k].func_nest += 1
for arg in node[0]:
val = arg.val
scope[val] = ScopeRecord("argument", val)
for c in node[1:]:
traverse(c, scope, tlevel)
def IdentNode(self, node, scope, traverse, tlevel):
if type(node.parent) == BinOpNode and node.parent.op == "." and node == node.parent[1]:
return
#if node.val in scope and scope[node.val].type == "let":
# node.val = scope[node.val].uuid
def ForLoopNode(self, node, scope, traverse, tlevel):
scope = copy_scope(scope)
for c in node:
traverse(c, scope, tlevel)
def VarDeclNode(self, node, scope, traverse, tlevel):
if node.val in scope:
if scope[node.val].nest == 0 and ("let" in node.modifiers or "const" in node.modifiers):
self.typespace.error(node.val + " is already let-declared", node);
else:
del scope[node.val]
if "let" in node.modifiers:
if hasattr(scope, "func_parent") and node.val in scope.func_parent:
pass
#p = scope.func_parent
#if p[node.val].type == "let" and p[node.val].func_nest == 0:
# self.typespace.error("Tried to let variable that was \n\t\tlet'd in parent function scope", node);
if node.val in scope and scope[node.val].type in ["let", "const"]:
self.typespace.error(node.val + " is already let-declared", node);
rec = ScopeRecord("let", node.val)
scope[node.val] = rec
#node.val = rec.uuid
for c in node:
traverse(c, scope, tlevel)
def StatementList(self, node, scope, traverse, tlevel):
scope = copy_scope(scope)
for c in node:
traverse(c, scope, tlevel)
def process_let(node, typespace):
flatten_statementlists(node, typespace);
visit = LetVisitor(typespace)
visit.traverse(node);
```
#### File: tools/extjs_cc/js_preprocessor.py
```python
import os, sys, traceback, struct, random, math, time, io, imp, os.path
import ply_preprocessor_parse as ppp
def preprocess_text_intern(data, filename, working_dir=None):
#kill \r's
data = data.replace("\r", "")
lexer = ppp.lexer
p = ppp.Preprocessor(lexer)
p.parse(data, filename);
s = ""
s2 = ""
while True:
tok = p.token()
#print(tok, tok.lineno)
if not tok: break
if tok.type == "CPP_WS" and "\n" in tok.value:
s2 += str(tok.lineno)
pass
#print(tok.type)
if 1: #tok.type != "CPP_COMMENT":
s += tok.value
s2 += tok.value
#ensure trailing newline
if not s.endswith("\n"):
s += "\n"
#smap = p.sourcemap
#print(smap.map)
#smap.invert(s)
#print(s)
#sys.exit()
"""
out = ""
for i, c in enumerate(s):
if c == "\n":
line = smap.lookup(i)
out += " -> "+str(line[0])+":"+line[1]
out += c
print(out)
print("\n====\n\n", s2)
#"""
return s
def preprocess_text(data, filename, working_dir=None):
oldcwd = None
if working_dir != None:
oldcwd = os.getcwd()
try:
os.chdir(oldcwd);
except OSError:
sys.stderr.write("Warning: could not change working directory")
ret = preprocess_text_intern(data, filename, working_dir)
if working_dir != None:
try:
os.chdir(oldcwd);
except OSError:
sys.stderr.write("Warning: could not restore working directory")
return ret
```
#### File: tools/extjs_cc/js_process_ast.py
```python
from random import random, seed
import time
def find_node(node, ntype, strict=False, depth=0):
if type(node) == ntype:
if not (depth == 0 and strict):
return node
for n in node.children:
ret = find_node(n, ntype, strict, depth+1)
if ret != None: return ret
return None
def traverse_i(n, ntype, func, i, cur=None, use_depth=False,
exclude=[], copy_children=False, depth=0):
if cur == None:
cur = [0]
if type(n) in exclude and depth != 0:
return
if copy_children:
cs = n[:]
if type(n) == ntype:
if cur[0] == i:
cur[0] += 1
if use_depth:
func(n, depth)
else:
func(n)
else:
cur[0] += 1
if not copy_children:
cs = n.children
for c in cs:
traverse_i(c, ntype, func, i, cur, use_depth, exclude, copy_children, depth+1)
def null_node(n):
return n in [0, None]
def traverse(n, ntype, func, use_depth=False,
exclude=[], copy_children=False,
use_scope=False, scope=None, depth=0):
if scope == None: scope = {}
scope = handle_scope(n, scope)
if type(exclude) != list and type(exclude) != tuple and issubclass(exclude, Node):
exclude = [exclude]
if type(n) in exclude and depth != 0:
return
if copy_children:
cs = n[:]
if type(n) == ntype:
if use_depth and use_scope:
func(n, scope, depth)
elif use_scope:
func(n, scope)
elif use_depth:
func(n, depth)
else:
func(n)
if not copy_children:
cs = n.children
for c in cs:
traverse(c, ntype, func, use_depth, exclude, copy_children, use_scope, scope, depth+1)
def validate_class_this_refs(typespace, cls, scope):
def visit(n):
#this isn't bound in nested functions
if type(n) == FunctionNode:
return;
if type(n) == IdentNode and n.val == "this":
if type(n.parent) == BinOpNode and n.parent.op == "." and n == n.parent[0]:
if n.parent[1].gen_js(0).strip() not in scope:
typespace.error("Unknown property " + n.parent[1].gen_js(0).strip(), n.parent)
for c in n:
visit(c)
for c2 in cls:
if type(c2) != VarDeclNode:
visit(cls)
"""
a.b.?c().?d.f.?d
var _t = undefined;
function qexpr(a) {
var _t = a.b;
if (_t == undefined) {
return undefined;
}
_t = _t.c();
if (_t == undefined) {
return undefined;
}
_t = _t.d.f;
if (_t == undefined) {
return undefined;
}
_t = _t.d;
return _t;
}
#"""
import hashlib, base64
def transform_exisential_operators(node, typespace):
vset = set()
def get_ud(n, hash=None):
if hash == None:
hash = hashlib.sha1()
bstr = bytes(str(n.get_line_str())+str(n.lexpos), "latin-1")
hash.update(bstr);
if n.parent != None:
get_ud(n.parent)
ret = base64.b64encode(hash.digest())
ret = str(ret, "latin-1")
ret = ret.replace("=", "").replace("/", "").replace("=", "").replace("+", "").replace("-", "")
return "q" + ret[:8] + "_" + str(n.lexpos)
def visit(n):
#find distinct chain
if n.op != ".?": return
if n in vset: return
vset.add(n);
id = get_ud(n);
validops = [".", ".?"]
#find head of chain
p = n
#print(p.get_line_str())
while p != None and type(p) == BinOpNode and type(p.parent) == BinOpNode: # and p.op in validops:
#vset.add(p[0])
#vset.add(p[1])
p = p.parent
vset.add(p)
#print(p.get_line_str())
#print(p.parent.get_line_str())
slist = StatementList()
en = js_parse("_t = _t.$n;", [n[1]], start_node=AssignNode);
slist.add(en)
en = en[1]
n2 = n.parent
while n2 != p.parent and type(n2) == BinOpNode: # and n2.parent != p:
if n2.op != ".?":
#print(type(p))
enp = en.parent
en2 = BinOpNode(en, n2[1], n2.op)
enp.replace(en, en2)
en = en2
else:
slist.add(js_parse("if (_t == undefined) return undefined;"))
en = js_parse("_t = _t.$n;", [n2[1]], start_node=AssignNode);
slist.add(en)
n2 = n2.parent
slist.add(js_parse("return _t;"));
fn = js_parse("""
function $s(obj) {
var _t = obj;
if (_t == undefined) return undefined;
$n
}""", [id, slist], start_node=FunctionNode);
print(fn.gen_js(1))
#find body to put function in
p2 = p
lastp2 = p
while p2.parent != None and not isinstance(p2, FunctionNode):
lastp2 = p2
p2 = p2.parent
if isinstance(p2, FunctionNode):
p2.insert(1, fn)
else:
p2.insert(p2.index(lastp2), fn)
#"""
cn = FuncCallNode(id)
cn.add(ExprListNode([n[0]]))
p.parent.replace(p, cn)
#"""
#traverse(node, BinOpNode, visit)
def dovisit(n):
for c in n:
dovisit(c)
if type(n) == BinOpNode:
visit(n)
dovisit(node)
flatten_statementlists(node, typespace)
def transform_exisential_operators_old(node, typespace):
tfuncmap = {}
idfuncmap = {}
def ensure_tempvar(n, prefix="$_eop"):
startn = n
while n.parent != None and not isinstance(n, FunctionNode):
n = n.parent
if n not in tfuncmap:
tfuncmap[n] = {}
idfuncmap[n] = 0
tmap = tfuncmap[n]
if startn not in tmap:
tmap[startn] = idfuncmap[n];
idfuncmap[n] += 1
else:
idx = tmap[startn]
tname = "%s%i" % (prefix, idx)
return tname
idx = tmap[startn]
tname = "%s%i" % (prefix, idx)
for c in n:
if type(c) == VarDeclNode and c.val == tname:
#return tname
pass
pass
n2 = VarDeclNode(ExprNode([]), name=tname)
n2.add(UnknownTypeNode())
n2.modifiers.add("local")
n.prepend(n2)
return tname
def has_leaf(n):
for c in n:
if type(c) == BinOpNode and c.op == ".?": return True
if has_leaf(c): return True
return False
doneset = set()
def tag(n):
doneset.add(n)
for c in n:
tag(c);
def has_cond(n):
if type(n) == BinOpNode and n.op == ".?": return True
n2 = n
while type(n2.parent) == BinOpNode and type(n2) == BinOpNode and n2.op in [".", ".?"]:
n2 = n2.parent
if n2.op == ".?": return True
for c in n:
if has_cond(c):
return True
return False
condset = set()
def start_tag(n):
if has_cond(n):
condset.add(n)
for c in n:
start_tag(c)
def visit(n):
if n in doneset: return
doneset.add(n)
if type(n) == BinOpNode and n.op in [".", ".?"]:
start_tag(n)
for c in n:
visit(c)
if type(n) != BinOpNode: return
if n.op not in [".?", "."]: return
if n not in condset: return
#find head of chain
n2 = n
while n2.parent != None and type(n2.parent) == BinOpNode and n2.parent.op in [".?", "."]:
n2 = n2.parent
t = ensure_tempvar(n2, "$_eop_t")
name = t
idx = -1
if type(n[0]) == ExprListNode and type(n[1]) == ExprListNode:
accu = n[0]
for c in n[1]:
accu.add(c)
elif type(n[0]) == ExprListNode:
accu = n[0]
idx = 1
elif type(n[1]) == ExprListNode:
accu = n[1]
idx = 0
else:
accu = ExprListNode([])
accu.add_parens = True
idx = 2
if idx in [0, 1]:
n2 = js_parse("""
$s1 = $s1 ? $s1.$n2 : undefined
""", [name, n[1]], start_node=AssignNode)
accu.add(n2)
elif idx == 2:
n2 = js_parse("""
$s1 = $n2 ? $n2.$n3 : undefined
""", [name, n[0], n[1]], start_node=AssignNode)
accu.add(n2)
n.parent.replace(n, accu)
#print(n)
def visit1(n):
if n in doneset: return
doneset.add(n)
stop = has_leaf(n) #and (type(n) == BinOpNode and n.op == ".?")
for c in n:
visit(c)
if stop: return
if type(n) != BinOpNode: return
if type(n.parent) != BinOpNode: return
if n.op != ".?": return
#print(stop, n.get_line_str(), n[0].get_line_str(), n[1].get_line_str(), n.op if type(n) == BinOpNode else "")
tname = ensure_tempvar(n, "t")
startn = n
lst = [tname, n[0], n[0], n[1]]
n2 = js_parse("$s = $n != undefined ? $n.$n : undefined;", lst, start_node=AssignNode)
accu = ExprListNode([])
accu.add(n2)
return
n = n.parent
lastn = n
while n != None and type(n) == BinOpNode and n.op in [".", ".?"]:
doneset.add(n)
tag(n)
print(type(n.children[0]), type(n.children[1]))
lst = [tname, n[1]]
n3 = js_parse("$s1 = $s1 != undefined ? $s1.$n2 : undefined;", lst, start_node=AssignNode)
accu.prepend(n3)
lastn = n
n = n.parent
#print(lastn)
#startn.parent.remove(startn)
tag(lastn)
if lastn in lastn.parent.children:
print("removing")
lastn.parent.replace(lastn, accu)
print("--starting")
visit(node)
def gen_manifest_file(result, typespace):
"""this bit of code clears all type info.
helpful for figuring out how much of the program,
percentage-wise, is already typed.
NOTE: this corrupts the AST tree.
def rec1(n):
n.type = UnknownTypeNode()
for c in n:
rec1(c)
rec1(result)
#"""
def build_func_name(n):
if type(n) == FunctionNode:
s1 = "function"
elif type(n) == MethodGetter:
s1 = "getter"
elif type(n) == MethodSetter:
s1 = "setter"
elif type(n) == MethodNode:
s1 = "method"
s1 = n.name
if type(n) in [MethodGetter, MethodSetter, MethodNode]:
s1 = n.parent.name + "." + s1
return s1
def function_sig(n):
s = n.name + "("
needs_typeinfo = False
for i, c in enumerate(n[0]):
if i > 0: s += ","
if c.type != None and type(c.type) != (UnknownTypeNode):
s += c.type.get_type_str() + " "
else:
s += "Object "
needs_typeinfo = True
s += c.gen_js(0)
s += ")"
if n.type != None and type(n.type) != UnknownTypeNode:
s += " : " + n.type.get_type_str();
elif not (n.name == "constructor" and type(n) == MethodNode):
def rec(n2):
if type(n2) == ReturnNode and len(n2[0].gen_js(0).strip()) > 0:
return True
ret = False
for c in n2:
if not isinstance(c, FunctionNode):
ret |= rec(c)
return ret
needs_typeinfo = needs_typeinfo or rec(n)
if needs_typeinfo and glob.g_warn_types:
typespace.warning(build_func_name(n) + " needs typing", n)
return s
def visit_cls(n):
name = n.name
parents = [c.gen_js(0) for c in n.parents]
s = "class " + name + " "
if len(parents) > 0:
s += "extends "
for i, p in enumerate(parents):
if i > 0: s += ","
s += p
s += " {\n"
for c in n:
if type(c) == VarDeclNode:
s += c.gen_js(0) + ";\n"
else:
s += " " + function_sig(c) + "\n"
s += "}"
return s
#only visit top-level classes, for now
s = "EXPORT \"%s\"\n" % glob.g_file
for c in result:
if type(c) == ClassNode:
s += visit_cls(c) + "\n"
elif type(c) == FunctionNode:
s += "function " + function_sig(c) + "\n"
elif type(c) == AssignNode and "." not in c[0].gen_js(0) and "[" not in c[0].gen_js(0):
if c.type != None and type(c.type) != UnknownTypeNode:
s += c.type.get_type_str() + " "
else:
s += "Object "
if glob.g_warn_types:
typespace.warning("type missing for global " + c[0].gen_js(0), c)
s += c[0].gen_js(0)+"\n";
elif type(c) == VarDeclNode:
s += "global "
if c.type != None and type(c.type) != UnknownTypeNode:
s += c.type.get_type_str() + " "
else:
s += "Object "
if glob.g_warn_types:
typespace.warning("type missing for global " + c.val, c)
s += c.val+"\n";
s += "\n"
#traverse(result, ClassNode, visit_cls)
return s
_the_typespace = None
def expand_harmony_class(result, cls, parent):
global _the_typespace
arglist = ExprListNode([])
methodlist = ArrayLitNode(ExprListNode([]))
arglist.add(StrLitNode('"'+cls.name+'"'))
if len(cls.parents) > 0:
arglist.add(cls.parents[0])
arglist.add(methodlist)
#find constructor method
found_con = False
methods = []
con = None
for m in cls:
if type(m) not in [MethodNode, MethodGetter, MethodSetter]:
continue
if m.name == "constructor":
found_con = True
con = m
methods.append(m)
if not found_con:
#create a default constructor
if len(cls.parents) > 0:
slist = js_parse("""
$s.apply(this, arguments);
""", [cls.parents[0].gen_js(0)])
else:
slist = StatementList()
con = MethodNode("constructor", False);
con.add(ExprListNode([]))
con.add(slist)
methods.append(con)
cls_scope = {}
for m in methods:
if type(m) in [MethodNode, MethodGetter, MethodSetter]:
if m.name != "constructor":
cls_scope[m.name] = m
if m.name == "eval":
_the_typespace.error("Class methods can't be named eval", m);
callnode = None
if type(m.name) != str:
if type(m.name) == BinOpNode:
name = m.name[1]
if type(name) == IdentNode:
name = name.val
if type(name) == str:
fnode = FunctionNode(name)
else:
fnode = FunctionNode("(anonymous)")
fnode.is_anonymous = True
else:
fnode = FunctionNode("(anonymous)")
fnode.is_anonymous = True
callnode = FuncCallNode(BinOpNode("_ESClass", "symbol", "."))
name = m.name
if type(name) in (int, float):
name = NumLitNode(name)
callnode.add(ExprListNode([name, fnode]))
else:
fnode = FunctionNode(m.name if m.name != "constructor" else cls.name)
fnode[:] = []
for c in m:
fnode.add(c.copy())
if callnode != None:
fnode = callnode
if type(m) == MethodGetter:
callnode = FuncCallNode(BinOpNode("_ESClass", "get", "."))
callnode.add(fnode)
fnode = callnode
if type(m) == MethodSetter:
callnode = FuncCallNode(BinOpNode("_ESClass", "set", "."))
callnode.add(fnode)
fnode = callnode
if m.is_static:
callnode = FuncCallNode(BinOpNode("_ESClass", "static", "."))
callnode.add(fnode)
fnode = callnode
methodlist[0].add(fnode)
con = None
found_con = False
for m in methods:
if m.name == "constructor":
if found_con: raise SyntaxError("Cannot have multiple constructor methods")
if type(m) != MethodNode: raise SyntaxError("Constructors cannot be get/setters")
found_con = True
con = m
#parent = cls.parents[0] if len(cls.parents) != 0 else None
n = FuncCallNode("_ESClass")
n.add(arglist)
if not isinstance(parent, AssignNode) and not isinstance(parent, VarDeclNode):
n = VarDeclNode(n, local=True, name=cls.name);
return n
def expand_harmony_classes(result, typespace):
global _the_typespace
_the_typespace = typespace
check_constructor_return(result, typespace)
expand_harmony_super(result, typespace)
def visit(n):
n2 = expand_harmony_class(typespace, n, n.parent)
n.parent.replace(n, n2)
traverse(result, ClassNode, visit)
flatten_statementlists(result, typespace)
def check_constructor_return(result, typespace):
def check_constructor(n):
flatten_statementlists(n, typespace);
def visit4(n2):
if isinstance(n2, ReturnNode): #type(n2) == ReturnNode:
typespace.warning("Detected return in a constructor", n2);
else:
for n3 in n2:
if not isinstance(n3, FunctionNode):
visit4(n3)
visit4(n)
def visit(n):
for n2 in n:
if type(n2) == MethodNode and n2.name == "constructor":
check_constructor(n2)
traverse(result, ClassNode, visit)
def check_constructor_super(result, typespace):
def check_constructor(n):
flatten_statementlists(n, typespace);
#I hate how python does closures
has_super = [False]
def visit2(n2):
if not has_super[0] and type(n2[0]) == BinOpNode and type(n2[0][0]) == IdentNode and n2[0][0].val == "this":
typespace.error("Can't assign to this before calling super() in constructor", n2)
#note that we allow super inside of control blocks,
#we don't check if all branches leads to its invocation
def visit3(n2):
if n2[0].gen_js(0).strip() == "super":
has_super[0] = True
if n2.parent != n[1]:
#sys.stderr.write(repr(n2.parent) + "\n" + repr(n2.parent.get_line_str()) + "\n")
typespace.warning("Super inside a control block", n2)
for n2 in n[1]:
traverse(n2, FuncCallNode, visit3)
traverse(n2, AssignNode, visit2)
def visit(n):
if n.parents is None or len(n.parents) == 0:
return
for n2 in n:
if type(n2) == MethodNode and n2.name == "constructor":
check_constructor(n2)
traverse(result, ClassNode, visit)
def add_class_list(typespace, n):
name = n.name
#n2 = n.parent
#while n2 is not None and not (type(n2) == StatementList or isinstance(n2, FunctionNode)):
# n2 = n2.parent
n2 = n.parent
ok = type(n2) == StatementList or isinstance(n2, FunctionNode)
if not ok:
if type(n2) == VarDeclNode:
name = n2.val
elif type(n2) == AssignNode:
name = n2[0].gen_js(0).strip()
n2 = js_parse("_ESClass.register($s)", name)[0]
return n2
def create_class_list(result, typespace):
#check_constructor_super(result, typespace)
#check_constructor_return(result, typespace)
if not glob.g_register_classes:
return
global _the_typespace
_the_typespace = typespace
def visit(n):
insert_after(n, add_class_list(typespace, n))
#n.parent.insert(n.parent.index(n)+1, )
flatten_statementlists(result, typespace)
traverse(result, ClassNode, visit)
flatten_statementlists(result, typespace)
def get_parent_statementlist(node):
ln = node
pn = node.parent
while pn is not None and type(pn) not in [StatementList, FunctionNode]:
ln = pn
pn = pn.parent
return [pn, ln]
def insert_after(n, n2):
pn, ln = get_parent_statementlist(n)
pn.insert(pn.index(ln)+1, n2)
def expand_harmony_super(result, typespace):
check_constructor_super(result, typespace)
global _the_typespace
_the_typespace = typespace
flatten_statementlists(result, typespace)
def repl_super(cls, method, base, gets, sets, methods):
def static_visit(n):
if n.val != "super":
return
#print("found static super!", base.val)
if isinstance(n.parent, FuncCallNode):
n.parent[1].prepend("this")
n.parent.replace(n, BinOpNode(base.copy(), "call", "."))
elif isinstance(n.parent, BinOpNode) and n.parent.op == "." and isinstance(n.parent[1], FuncCallNode):
n3 = n.parent[1].copy()
n3[1].prepend("this")
n4 = BinOpNode(n3[0], "call", ".")
n3.replace(n3[0], n4)
#if type(base) == BinOpNode:
# n2 = js_parse("$n", [base, n3], start_node=BinOpNode)
#else:
# n2 = js_parse("$s", [base.val, n3], start_node=BinOpNode)
n2 = base.copy()
n.parent.replace(n, n2)
n.parent.replace(n.parent[1], n3)
else:
n.parent.replace(n, base.copy())
def visit(n):
if n.val != "super":
return
#print("found super!", base.val)
if isinstance(n.parent, FuncCallNode):
n.parent[1].prepend("this")
n.parent.replace(n, BinOpNode(base.copy(), "call", "."))
elif isinstance(n.parent, BinOpNode) and n.parent.op == "." and isinstance(n.parent[1], FuncCallNode):
n3 = n.parent[1].copy()
n3[1].prepend("this")
n4 = BinOpNode(n3[0], "call", ".")
n3.replace(n3[0], n4)
if type(base) == BinOpNode:
n2 = js_parse("$n.prototype", [base, n3], start_node=BinOpNode)
else:
n2 = js_parse("$s.prototype", [base.val, n3], start_node=BinOpNode)
n.parent.replace(n, n2)
n.parent.replace(n.parent[1], n3)
elif isinstance(n.parent, BinOpNode) and n.parent.op == "." and isinstance(n.parent[1], IdentNode):
typespace.warning("Super property access!", n);
n2 = js_parse("__bind_super_prop(this, $s, $s, '$s')", [cls.name, base.val, n.parent[1].val], start_node=FuncCallNode)
n.parent.parent.replace(n.parent, n2)
if method.is_static:
traverse(method, IdentNode, static_visit);
else:
traverse(method, IdentNode, visit);
def visit(node):
def has_super(node):
if type(node) == IdentNode and node.val == "super":
return True
ret = False
for c in node:
ret = ret or has_super(c)
return ret
if not has_super(node):
return
gets = {}
sets = {}
methods = {}
for c in node:
if isinstance(c, MethodGetter):
gets[c.name] = c
elif isinstance(c, MethodSetter):
sets[c.name] = c
elif isinstance(c, MethodNode):
methods[c.name] = c
if len(node.parents) > 1:
typespace.error("Super not allowed in classes with multiple inheritance", node)
elif len(node.parents) == 0:
print("----------------->", has_super(node), node.get_line_str())
typespace.error("Class " + str(node.name) + " has no parent", node)
for c in node:
repl_super(node, c, node.parents[0], gets, sets, methods)
traverse(result, ClassNode, visit)
flatten_statementlists(result, typespace)
def expand_requirejs_class(typespace, cls):
node = FunctionNode(cls.name, 0)
params = ExprListNode([])
slist = StatementList()
vars = [];
cls_scope = {}
#properties
for c in cls:
if type(c) == VarDeclNode:
cls_scope[c.val] = c;
cs = c[2:]
c.children = c.children[:2]
vars.append(c)
for c2 in cs:
cls_scope[c2.val] = c2;
vars.append(c2)
methods = []
for c in cls:
if type(c) in [MethodNode, MethodGetter, MethodSetter]:
if c.name != "constructor":
cls_scope[c.name] = c
methods.append(c)
if glob.g_validate_classes:
validate_class_this_refs(typespace, cls, cls_scope)
#find constructor method
found_con = False
con = None
for m in methods:
if m.name == "constructor":
if found_con: raise SyntaxError("Cannot have multiple constructor methods")
if type(m) != MethodNode: raise SyntaxError("Constructors cannot be get/setters")
found_con = True
params = m[0]
slist = m[1]
con = m
parent = cls.parents[0] if len(cls.parents) != 0 else None
if found_con == False:
#build a default constructor
m = MethodNode("constructor")
print("generating default constructor...");
params = ExprListNode([])
slist = StatementList()
m.add(params)
m.add(slist)
con = m
vars.reverse();
for c in vars:
val = c[0]
if type(val) == ExprNode and len(val) == 0:
#val = IdentNode("undefined");
continue;
a = AssignNode(BinOpNode("this", c.val, "."), val)
slist.prepend(a)
#do getters/setters
gets = {}
sets = {}
props = set()
for m in methods:
if m.name == "constructor": continue
if type(m) == MethodGetter:
gets[m.name] = m
props.add(m.name)
if type(m) == MethodSetter:
sets[m.name] = m
props.add(m.name)
def to_exprfunc(method):
f = FunctionNode("(anonymous)", 0)
f.is_anonymous = True
f.children = method.children
for c in f.children:
c.parent = f
f.type = method.type
f.line = method.line
f.lexpos = method.lexpos
return f
def gen_prop_define(prop, gets, sets, flags=[]):
#since this is called from *within* the parser, we
#can't use js_parse().
name_expr = BinOpNode(IdentNode("Object"), IdentNode("defineProperty"), ".");
fcall = FuncCallNode(name_expr)
exprlist = ExprListNode([])
fcall.add(exprlist)
params = ObjLitNode()
if prop in gets:
an = AssignNode(IdentNode("get"), to_exprfunc(gets[p]))
params.add(an)
if prop in sets:
an = AssignNode(IdentNode("set"), to_exprfunc(sets[p]))
params.add(an)
exprlist.add(IdentNode("this"))
exprlist.add(StrLitNode('"%s"'%prop))
exprlist.add(params)
return fcall;
def gen_method(cls, m):
f = FunctionNode(m.name)
f.children = m.children
f.name = "(anonymous)"
f.is_anonymous = True
for c in f.children:
c.parent = f
if not m.is_static:
an = AssignNode(IdentNode(m.name), f)
f = an
else:
f2 = FuncCallNode(IdentNode("util.static_method"))
f2.add(f)
an = AssignNode(IdentNode(m.name), f2)
f = an
return f
for p in props:
n = gen_prop_define(p, gets, sets)
slist.prepend(n)
if found_con == False:
#call parents hackishly
lst = list(cls.parents)
lst.reverse()
for p in lst:
if type(p) == str: p = IdentNode(p)
bn = BinOpNode(p, "apply", ".")
args = ExprListNode([IdentNode("this"), IdentNode("arguments")])
fn = FuncCallNode(bn)
fn.add(args)
slist.prepend(fn)
node.add(params)
node.add(slist)
node.name = "(anonymous)"
node.is_anonymous = True
an = AssignNode(BinOpNode("exports", cls.name, "."), node)
vn = VarDeclNode(an, local=True)
vn.val = cls.name
node = vn
#add stuff outside of the constructor function
slist = StatementList()
slist.add(node)
node = slist
proto = RJSObjLitNode();
if len(cls.parents) != 0:
#for requirejs, just do single inheritance
if len(cls.parents) > 1:
typespace.error("The type system we use for RequireJS doesn't support multiple inheritance", cls)
p = cls.parents[0]
fn = FuncCallNode(IdentNode("util.inherit"))
fn.add(ExprListNode([IdentNode(cls.name), p, proto]))
slist.add(AssignNode(BinOpNode(cls.name, "prototype", "."), fn))
else:
fn = FuncCallNode(IdentNode("util.init_prototype"))
fn.add(ExprListNode([IdentNode(cls.name), proto]))
slist.add(AssignNode(BinOpNode(cls.name, "prototype", "."), fn))
#generate methods
for m in cls:
if type(m) != MethodNode: continue
if m.name == "constructor": continue
n = gen_method(cls, m)
proto.add(n)
return node
def expand_requirejs_classes(result, typespace):
def visit(n):
n.parent.replace(n, expand_requirejs_class(typespace, n))
traverse(result, ClassNode, visit)
flatten_statementlists(result, typespace)
class VarBinding:
def __init__(self, node, name, type):
self.node = node
self.name = name
self.type = type
def copy(self):
return VarBinding(self.node, self.name, self.type)
def add(self, type):
self.types.add(type)
def remove(self, type):
self.types.remove(type)
def __getitem__(self, i):
return self.types[i]
def __setitem__(self, i, v):
self.types[i] = v
def __len__(self):
return len(self.types)
class NodeScope:
def __init__(self, parent=None):
self.scopestack = []
self.scope = {}
self.childscopes = []
if parent != None:
parent.childscopes.append(self)
#for k in self.parent:
# self[k] = self.parent[k].copy()
self.parent = parent
def __str__(self):
return str(self.scope.keys())
def __repr__(self):
return str(self)
def push(self):
self.scopestack.append(self.scope)
self.scope = dict(self.scope)
if hasattr(glob, "g_debug_typeinfer") and glob.g_debug_typeinfer:
print("===pushing...===")
#for k in self.scope:
# self.scope[k] = self.scope[k].copy()
def pop(self):
if hasattr(glob, "g_debug_typeinfer") and glob.g_debug_typeinfer:
print("===popping...===")
d = self.scope
self.scope = self.scopestack.pop(-1)
return d
def __getitem__(self, item):
return self.scope[item]
def __setitem__(self, item, val):
self.scope[item] = val
def __contains__(self, item):
return item in self.scope
def __delitem__(self, item):
del self.scope[item]
def __len__(self):
return len(self.scope)
def __iter__(self):
return iter(self.scope)
def keys(self):
return self.scope.keys()
def values(self):
return self.scope.values()
class NodeVisit:
def __init__(self):
pass
def traverse(self, node, scope=None, tlevel=0):
if scope == None and tlevel > 0:
raise RuntimeError("NodeVisit.traverse called without scope")
if scope == None:
scope = {}
if scope == None: scope = NodeScope()
typestr = type(node).__name__
if not hasattr(self, typestr) and typestr in self.required_nodes:
raise RuntimeError("Unimplemented node visit for node type %s", typestr)
if not hasattr(self, typestr):
for c in node.children:
self.traverse(c, scope, tlevel)
else:
getattr(self, typestr)(node, scope, self.traverse, tlevel)
def tab2(t):
s = ""
for i in range(t):
s += " "
return s
class RequireJSWriter (NodeVisit):
def __init__(self):
self.required_nodes = []
NodeVisit.__init__(self)
self.buf = ""
def o(self, s):
self.buf += str(s)
def traverse(self, node, scope=None, tlevel=-1):
return NodeVisit.traverse(self, node, scope, tlevel)
def endstatem(self, node):
sb = self.buf.strip()
ret = not sb.endswith("\n")
ret = ret and(not sb.endswith("}") or type(node) in [AssignNode, BinOpNode])
return ret
def IdentNode(self, node, scope, t, tlevel):
self.o(node.val)
def NumLitNode(self, node, scope, t, tlevel):
self.o(node.val)
def StatementList(self, node, scope, t, tlevel):
t1 = tab2(tlevel)
t2 = tab2(tlevel+1)
for c in node:
self.o(t1)
t(c, scope, tlevel+1)
if self.endstatem(c):
self.o(";");
self.o("\n");
def ForLoopNode(self, node, scope, t, tlevel):
t1 = tab2(tlevel)
self.o("for (")
t(node.children[0], scope, tlevel)
self.o(") {\n")
t(node.children[1], scope, tlevel+1)
self.o(t1+"}\n");
def ForInNode(self, node, scope, t, tlevel):
t(node[0], scope, tlevel)
self.o(" in ")
self.o(node[1])
def ForCNode(self, node, scope, t, tlevel):
t(node[0], scope, tlevel)
self.o("; ")
t(node[1], scope, tlevel)
self.o("; ")
t(node[2], scope, tlevel)
def IfNode(self, node, scope, t, tlevel):
t1 = tab2(tlevel)
self.o("if (")
t(node[0], scope, tlevel)
self.o(") {\n")
t(node[1], scope, tlevel+1)
self.o(t1+"}\n");
def ElseNode(self, node, scope, t, tlevel):
t1 = tab2(tlevel)
if self.buf.endswith("\n"):
self.buf = self.buf[:-2]
self.o(" else ")
if type(node[0]) == StatementList:
self.o("{\n")
t(node[0], scope, tlevel+1);
self.o(t1+"}\n");
else:
t(node[0], scope, tlevel)
def BinOpNode(self, node, scope, t, tlevel):
t(node[0], scope, tlevel)
if node.op in ["in", "instanceof", "&&", "||", "<", ">", "<=", ">=", "==", "!=", "===", "!=="]:
self.o(" %s "%node.op)
else:
self.o(node.op)
t(node[1], scope, tlevel)
def NegateNode(self, node, scope, t, tlevel):
self.o("-")
t(node[0], scope, tlevel)
def AssignNode(self, node, scope, t, tlevel):
t(node[0], scope, tlevel)
self.o(" %s "%node.mode)
t(node[1], scope, tlevel)
def WhileNode(self, node, scope, t, tlevel):
t1 = tab2(tlevel)
self.o("while (")
t(node[0], scope, tlevel)
self.o(") {\n")
t(node[1], scope, tlevel+1)
self.o(t1+"}\n")
def FunctionNode(self, node, scope, t, tlevel):
self.o("function ")
if not node.is_anonymous:
self.o(node.name)
self.o("(")
t(node[0], scope, tlevel)
self.o(") {\n")
t1 = tab2(tlevel+1)
for c in node[1:]:
self.o(t1)
t(c, scope, tlevel+1)
if self.endstatem(node):
self.o(";")
self.o("\n")
self.o(tab2(tlevel)+"}\n")
def ExprListNode(self, node, scope, t, tlevel):
for i, c in enumerate(node):
if i > 0:
self.o(", ")
t(c, scope, tlevel)
def VarDeclNode(self, node, scope, t, tlevel):
if "global" in node.modifiers: return
if "local" in node.modifiers:
self.o("var ")
self.o(node.val)
if len(node[0].gen_js(0).strip()) > 0: #type(node[0]) not in [ExprListNode, ExprListNode] or len(node[0]) > 0:
self.o(" = ")
t(node[0], scope, tlevel)
def BreakNode(self, node, scope, t, tlevel):
self.o("break");
def YieldNode(self, node, scope, t, tlevel):
self.o("yield");
if len(node) != 0:
self.o(" ")
t(node[0], scope, tlevel)
def ContinueNode(self, node, scope, t, tlevel):
self.o("continue");
def ReturnNode(self, node, scope, t, tlevel):
self.o("return");
if len(node) != 0:
self.o(" ")
t(node[0], scope, tlevel)
def FuncCallNode(self, node, scope, t, tlevel):
t(node[0], scope, tlevel)
self.o("(")
t(node[1], scope, tlevel)
self.o(")")
def StrLitNode(self, node, scope, t, tlevel):
self.o(node.val)
def ArrayRefNode(self, node, scope, t, tlevel):
t(node[0], scope, tlevel)
self.o("[")
t(node[1], scope, tlevel)
self.o("]")
def ClassNode(self, node, scope, t, tlevel):
#find constructor
cls = None
for m in node:
if type(m) == MethodNode and m.name == "constructor":
cls = m
break
if cls == None:
typespace.error("Need constructor for class", node)
t1 = tab2(tlevel)
self.o("var %s = exports.%s = function(" % (cls.name, cls.name));
t(cls[0], scope, tlevel)
self.o(") {\n")
t(cls[1], scope, tlevel)
self.o(t1+"}\n")
pass
def MethodNode(self, node, scope, t, tlevel):
self.o(node.name)
def MethodGetter(self, node, scope, t, tlevel):
pass
def MethodSetter(self, node, scope, t, tlevel):
pass
def ExprNode(self, node, scope, t, tlevel):
pass
def handle_nodescope_pre(n, scope):
if type(n) in [IdentNode, VarDeclNode]:
"""
p = n.parent
add = False
while p not in [None, 0]:
if type(p) in [FunctionNode, ForLoopNode, DoWhileNode, WhileNode,
WithNode, CaseNode, DefaultCaseNode, IfNode, ElseNode,
TryNode, CatchNode]:
break
if type(p) in [AssignNode, VarDeclNode]:
add = True
break
p = p.parent
#if add and n.final_type != None:
# scope[n.val] = VarBinding(n, n.final_type, n.val)
#"""
pass
elif type(n) in [FunctionNode, ForLoopNode, DoWhileNode,
TryNode, CatchNode, SwitchNode, WhileNode,
IfNode, ElseNode]:
if type(n) == FunctionNode:
if n.parent == None or type(n.parent) in [StatementList, FunctionNode]:
scope[n.name] = n #VarBinding(n, n.name, n.final_type)
scope["this"] = n
scope.push()
elif type(n) == BinOpNode and n.op == ".":
scope.push()
def handle_nodescope_post(n, scope):
if type(n) in [FunctionNode, ForLoopNode, DoWhileNode, WhileNode,
WithNode, CaseNode, DefaultCaseNode, IfNode, ElseNode,
TryNode, CatchNode]:
scope.pop()
elif type(n) == BinOpNode and n.op == ".":
scope.pop()
def templates_match(n1, n2):
if n1 != None and n2 == None: return False
if n1 == None and n2 != None: return False
return len(n1[0]) == len(n2[0])
def types_match(n1, n2, typespace):
if type(n1) == TypeRefNode and n1.template == None:
n1 = typespace.get_type(n1.type)
if type(n2) == TypeRefNode and n2.template == None:
n2 = typespace.get_type(n2.type)
if type(n1) == IdentNode:
n1 = typespace.get_type(n1.val)
if type(n2) == IdentNode:
n2 = typespace.get_type(n2.val)
if type(n1) == BuiltinTypeNode and n1.type in typespace.functions:
n1 = typespace.get_type(n1.type)
if type(n2) == BuiltinTypeNode and n2.type in typespace.functions:
n2 = typespace.get_type(n2.type)
if type(n1) != type(n2):
if type(n1) == BuiltinTypeNode and type(n2) == IdentNode:
if n1.type == "String" and n2.val == "String": return True
if type(n2) == BuiltinTypeNode and type(n1) == IdentNode:
if n2.type == "String" and n1.val == "String": return True
if type(n1) == TemplateNode and type(n2) == FunctionNode:
if type(n1.name_expr) == IdentNode and n1.name_expr.val == n2.val:
return templates_match(n1, n2.template)
if type(n2) == TemplateNode and type(n1) == FunctionNode:
if type(n2.name_expr) == IdentNode and n2.name_expr.val == n1.val:
return templates_match(n2, n1.template)
return False
if type(n1) == BuiltinTypeNode:
return n1.compatible(n2)
elif type(n1) == VoidTypeNode: return True
elif type(n1) == FunctionNode:
return n1 == n2
def handle_scope(n, scope):
if type(n) in [IdentNode, VarDeclNode]:
scope[n.val] = n
elif type(n) in [FunctionNode, ForLoopNode, DoWhileNode,
TryNode, CatchNode, SwitchNode, WhileNode,
IfNode, ElseNode]:
scope = dict(scope)
if type(n) == FunctionNode:
scope[n.name] = n
elif type(n) == BinOpNode and n.op == ".":
scope = dict(scope)
return scope
def flatten_statementlists(node, typespace):
if node == None:
print("None passed to flatten_statementlists")
return
def visit_slists(n):
#don't flatten blocks with forced {}
if n.force_block:
return
if not null_node(n.parent) and type(n.parent) in [FunctionNode, StatementList]:
p = n.parent
i = p.index(n)
p.remove(n)
for c in n:
p.insert(i, c)
i += 1
traverse(node, StatementList, visit_slists, copy_children=True)
"""
if node.gen_js(0) != c:
if typespace != None:
typespace.error("Internal parser error in flatten_statementlists", node)
return None
#"""
return node
def flatten_var_decls_exprlists(node, typespace):
def visit(start):
def visit1(n):
if type(n[0]) != ExprListNode:
return
if len(n[0]) < 2 or type(n[0][1]) != AssignNode:
return
an = n[0][1]
if type(an[0]) != IdentNode:
return
vn = VarDeclNode(an[1], name=an[0].val)
vn.val = an[0].val
vn.add(UnknownTypeNode())
an.parent.remove(an)
start.add(vn)
visit1(vn)
#print(n)
visit1(start)
traverse(node, VarDeclNode, visit, False)
def kill_bad_globals(node, typespace):
treatAssignsAsLets = [0]
def recurse(n, scope, tlevel=0):
def descend(n2, start=0):
for c in n2.children[start:]:
recurse(c, scope, tlevel)
if isinstance(n, ExpandNode):
ok = "var" in n.modifiers
ok = ok or "let" in n.modifiers
ok = ok or "const" in n.modifiers
ok = ok or "local" in n.modifiers
if not ok:
return
for c in n:
if isinstance(c, IdentNode):
c = c.val
scope[c] = n
elif isinstance(n, ForCNode):
scope = dict(scope)
treatAssignsAsLets[0] = True
recurse(n[0], scope, tlevel)
treatAssignsAsLets[0] = False
#descend(n, 1)
elif isinstance(n, FunctionNode):
scope = dict(scope)
args = n.get_args()
for i, a in enumerate(args):
scope[a] = n[0][i];
descend(n, 1);
elif type(n) == BinOpNode and n.op == ".":
scope = dict(scope) #not sure what to do here
descend(n)
elif type(n) == VarDeclNode:
scope[n.val] = n;
descend(n[0])
if len(n) > 2:
descend(n, 2);
elif type(n) == AssignNode:
#deal with ambiguous grammar with expression lists
if treatAssignsAsLets:
scope[n[0].gen_js(0).strip()] = n[1]
return
if type(n.parent) == ObjLitNode:
descend(n)
return
#if n[0].gen_js(0).replace(";", "").strip() == "mode":
# raise "sd"
if type(n[0]) in [IdentNode, VarDeclNode] and n[0].val not in scope:
ok = n.parent is not None and n.parent.parent is not None and n.parent.parent.parent is not None
ok = ok and (type(n.parent) == ExprListNode and type(n.parent.parent) == BinOpNode and type(n.parent.parent.parent) == VarDeclNode)
print(scope.keys())
if not ok:
typespace.error("Undeclared global %s"%n[0].val, n[0])
descend(n);
else:
descend(n);
sc = {}
recurse(node, sc, 0);
from js_cc import js_parse
from js_ast_match import ast_match
def add_func_opt_code(result, typespace):
def visit_func(node):
if len(node) < 2:
#should we still insert opt initialization code
#in empty functions? like if people want to write
#evil, hackish code like function(i=do_something()) {},
#which would turn into function(i) { if (i == undefined) do_something();}
#
#yeek.
#print("Warning: function defined without any statements")
node.add(StatementList())
#ensure we have a proper statement list
if type(node[1]) != StatementList:
sl = StatementList()
sl.add(node[1])
node.replace(node[1], sl)
was_opt = False
codelist = []
for p in node[0]:
if type(p) == IdentNode:
is_opt = False
else:
is_opt = p[0].gen_js(0).strip() != "";
#XXX okay, strange, browsers allow this, I thought spec didn't?
#unless that was removed in final draft. . .
#if not is_opt and was_opt:
# typespace.error("Cannot have required parameter after an optional one", node)
name = p.val
if is_opt:
was_opt = True
code = js_parse("""
if ($s1 === undefined) {
$s1 = $n2;
}
""", (name, p[0]));
codelist.append(code)
p.parent.replace(p, IdentNode(p.val))
codelist.reverse()
for code in codelist:
node[1].prepend(code)
traverse(result, FunctionNode, visit_func)
flatten_statementlists(result, typespace)
typespace = None
def traverse_files(ntype, func, use_depth=False, exclude=[], copy_children=False):
for n in typespace.filenodes:
traverse(n, ntype, func, use_depth, exclude, copy_children)
def get_arg_name(arg):
if type(arg) in [IdentNode, VarDeclNode]:
return arg.val
else:
for c in arg.children:
ret = get_arg_name(c)
if type(ret) == str: return ret
return None
def build_classes(nfiles):
global typespace
def func_visit(n):
if n.is_anonymous: return
if n.name in typespace.func_excludes: return
def find_this(n2):
if n2.op == "." and type(n2[0]) == IdentNode and n2[0].val == "this":
n.class_type = "class"
if n.class_type == "func":
traverse(n, BinOpNode, find_this, exclude={FunctionNode});
p = n.parent
while not null_node(p) and type(p) != FunctionNode:
p = p.parent
if type(p) == FunctionNode:
if n.name in p.functions:
msg = "Nested function %s in %s already exists" % (n.name, p.name)
typespace.error(msg, n)
p.functions[n.name] = n
else:
if n.name in typespace.functions:
msg = "Function %s already exists" % (n.name)
n2 = typespace.functions[n.name]
msg += "\n\tPrevious definition at %s:%d" % (n2.file, n2.line)
typespace.error(msg, n)
typespace.functions[n.name] = n
for i, c in enumerate(n[0].children):
n.args[c.val] = c
n.arg_is[c.val] = i
def exprfunc_visit(n):
if not n.is_anonymous: return
#figure out if we're a method function
#find parent function
p = n.parent
while not null_node(p) and type(p) != FunctionNode:
p = p.parent
p1 = p
#find parent assignment
path = []
p = n.parent
path.append(p)
while not null_node(p) and type(p) not in [AssignNode, FunctionNode]:
p = p.parent
path.append(p)
path.reverse()
if len(path) == 0:
return
if type(p) != AssignNode:
return
cs = p.children
parent = None
if type(p1) == FunctionNode:
parent = p1
is_valid = type(cs[0]) == BinOpNode\
and type(cs[0][0]) == IdentNode\
and type(cs[0][1]) == IdentNode\
and cs[0][0].val == "this"
else:
c = cs[0].gen_js(0)
i = c.find(".prototype")
if i < 0:
is_valid = False
else:
parent = c[:c.find(".")]
if parent not in typespace.functions:
typespace.error("Could not find class function %s"%parent, n)
parent = typespace.functions[parent]
c = c[i:]
is_valid = c.count(".") == 2
if is_valid:
if not func_is_class(parent):
parent.class_type = "class"
n.class_type = "method"
c = cs[0].gen_js(0)
c = c[c.rfind(".")+1:]
if type(parent) == StatementList:
typespace.error("yeek", n)
n.path = parent.name + "." + c
n.name = c
parent.members[n.name] = n
i = 0
def new_visit(n):
if type(n[0]) == IdentNode:
if n[0].val not in typespace.functions:
typespace.error("Could not find type constructor %s"%n[0].val, n)
f = typespace.functions[n[0].val]
if not func_is_class(f):
f.class_type = "class"
traverse_files(FunctionNode, func_visit)
traverse_files(FunctionNode, new_visit)
traverse_files(FunctionNode, exprfunc_visit)
def build_members(node):
if not func_is_class(node): return
def visit(n):
c = n[0].gen_js(0)
if c.startswith("this.") and c.count(".") == 1 and c.count("(") == 0 and c.count("[") == 0:
c = c[5:]
#print(node.name + "." + c)
if c in node.members and type(node.members[c]) == FunctionNode:
if node.members[c] != n[1]:
typespace.error("Overriding method functions is not allowed", n)
elif c not in node.members:
if n.type != None: n[1].type = n.type
node.members[c] = n[1]
for c in node[1:]:
traverse(c, AssignNode, visit, exclude=[FunctionNode])
def global_prototype_assignments(node):
c = node[0].gen_js(0)
if not ".prototype" in c: return
if c.strip().endswith(".prototype") and c.count(".")==1 and c.count("[")==0 and c.count(")")==0:
n = c[:c.find(".prototype")]
if n not in typespace.functions:
typespace.error("Could not find function %s"%n, node)
n = typespace.functions[n]
n.members["prototype"] = node[1]
elif c.count(".") == 2 and c.count("[") == 0 and c.count("(") == 0:
n = c[:c.find(".prototype")]
c = c[c.rfind(".prototype.")+len(".prototype."):]
if n not in typespace.functions:
typespace.error("Could not find function %s"%n, node)
n = typespace.functions[n]
n.members[c] = node[1]
traverse_files(AssignNode, global_prototype_assignments, exclude=[FunctionNode])
def add_logrecs(n):
if typespace.get_record(n) != None:
enlist = typespace.get_record(n)
for en in enlist:
#print(n.get_path(), en.arg in [a.val for a in n[0]])
en.func = n.get_path()
n.logrecs.append(en)
traverse_files(FunctionNode, add_logrecs)
print("\n")
traverse_files(FunctionNode, build_members)
def base_inherit(node):
parent = "Object"
if "__iterator__" in node.members:
parent = "CanIterate"
elif "next" in node.members and type(node.members["next"]) == FunctionNode:
parent = "Iterator"
parent = typespace.types[parent]
node.class_parent = parent
def resolve_inheritance(node):
#search for .prototype = calls at the global level, as well as calls to
#inherit()
if not func_is_class(node): return
#the easy case
if "prototype" in node.members:
n = node.members["prototype"]
if type(n) == ObjLitNode:
base_inherit(node, "Object")
else:
while type(n) == UnknownTypeNode:
n = n[0]
n1 = n
n2 = js_parse("Object.create(obj.prototype);")[0]
#normalize
n1 = js_parse(n1.gen_js(0), start_node=BinOpNode)
if node_structures_match(n1, n2): #ast_match("Object.create($class.prototype);", n1, start_node=BinOpNode): # node_structures_match(n1, n2):
parent = n1[1][1][0][0].val
#print(node.name, parent)
if parent not in typespace.functions:
typespace.error("Unknown parent type %s"%parent, n1)
parent = typespace.functions[parent]
node.class_parent = parent
else:
typespace.error("Unknown parent code line: \"%s\""%n1.gen_js(1), n1)
traverse_files(FunctionNode, resolve_inheritance)
def resolve_inheritance_inherit(node):
if node[0].gen_js(0) != "inherit": return
js = node[0].gen_js(0)
sn = js_parse("inherit(obj1, obj2)")[0]
print(node, sn)
ret = ast_match("inherit($class, $class);", node.gen_js(0));
if not ret: #node_structures_match(node, sn):
typespace.error("Could not parse inherit line", node)
n = node[1][0]
parent = node[1][1]
tname = n.get_type_name()
if tname not in typespace.functions:
typespace.error("Could not find function %s"%tname, node)
ptname = parent.get_type_name()
if ptname not in typespace.functions:
typespace.error("Could not find function %s"%ptname, node)
n = typespace.functions[tname]
parent = typespace.functions[ptname]
if n.class_parent != None:
typespace.error("Parent prototype for type %s is already set"%n.name, node)
if not func_is_class(n):
n.class_type = "class"
if not func_is_class(parent):
parent.class_type = "class"
n.class_parent = parent
traverse_files(FuncCallNode, resolve_inheritance_inherit)
def resolve_basic_class_types(node):
if not func_is_class(node): return
if node.class_parent != None: return
ntype = "Object"
base_inherit(node)
traverse_files(FunctionNode, resolve_basic_class_types)
def set_child_class_refs(node):
if not func_is_class(node): return
#if node.name in node.class_parent.child_classes:
# typespace.error("Duplicate child class names detected", node)
node.class_parent.child_classes[node.name] = node
def find_iter_iters(node):
if not func_is_class(node): return
if not ("next" in node.members and type(node.members["next"]) == FunctionNode):
return
"""
found_it = [False]
def find_stop_iter(n):
if n.val == "StopIteration":
found_it[0] = True
traverse(node, IdentNode, find_stop_iter)
"""
print(node.class_parent==None)
#print(node.name)
#traverse_files(FunctionNode, find_iter_iters)
def find_root_types(node):
if not func_is_class(node): return
root = node
while root.class_parent != None and root.class_parent.is_builtin == False:
root = root.class_parent
if not func_is_class(root):
root.class_types = "class"
root_types[root.name] = root
traverse_files(FunctionNode, set_child_class_refs)
root_types = {}
traverse_files(FunctionNode, find_root_types)
rts = list(root_types.keys())
rts.sort()
for r in rts:
if root_types[r].class_parent != None:
cname = root_types[r].class_parent.name
else:
cname = None
#print(r, root_types[r].class_type, cname)
def node_structures_match(n1, n2):
s1 = [n1]
s2 = [n2]
while len(s1) > 0 and len(s2) > 0:
n1 = s1.pop(-1)
n2 = s2.pop(-1)
if type(n1) != type(n2): return False
for c in n1.children:
s1.append(c)
for c in n2.children:
s2.append(c)
if len(s1) > 0 or len(s2) > 0: return False
return True
def common_parent(n1, n2):
p1 = n1
p2 = n2
lst1 = []
while p1 != None and p1.name != "Object" and p1 != typespace.functions["Object"]:
lst1.append(p1)
p1 = p1.class_parent
lst2 = []
while p2 != None and p2.name != "Object" and p2 != typespace.functions["Object"]:
lst2.append(p2)
p2 = p2.class_parent
for l in lst1:
if l in lst2:
return l
return None
def process_logrec():
global typespace
printlist = []
def return_rec_visit(n):
if len(n.logrecs) == 0: return #only do functions with records
t = odict()
argtypes = odict()
for en in n.logrecs:
if en.vtype == "r": continue
if en.arg not in t:
t[en.arg] = []
t[en.arg].append(en.type)
#print(t)
for k in t:
types = []
common = []
for a in t[k]:
if a not in typespace.functions:
f = typespace.empty_type()
else:
f = typespace.functions[a]
types.append(f)
tlen = len(types)
for t2 in types[:]:
if t2.name == "undefined":
types.remove(t2)
for j in range(tlen):
i1 = 0
i2 = 1
while i1 < len(types):
if i1 == 0: i2 = 1
else: i2 = 0
if len(types) == 1: break
c = common_parent(types[i1], types[i2])
while i2 < len(types):
if i2 != i1:
c = common_parent(types[i1], types[i2])
if c != None:
break
i2 += 1
if c != None:
nn1 = types[i1]
nn2 = types[i2]
types.remove(nn1)
types.remove(nn2)
types.insert(i1, c)
i1 += 1
if i2 >= len(types):
i2 = 0
argtypes[k] = types
s = n.get_path() + "("
for i, n2 in enumerate(n[0]):
k = n2.val
n.lrec_args[k] = []
if i > 0: s += ", "
if k in argtypes:
if k in typespace.functions:
cls = typespace.functions[k]
for j, v in enumerate(argtypes[k]):
if j > 0: s += "|"
s += v.name
n.lrec_args[k].append(v)
else:
n.lrec_args[k].append(typespace.empty_type())
s += "(unknown type)"
s += " " + k
s += ")"
if "()" not in s:
if "(unknown type)" not in s:
printlist.append(s)
else:
printlist.append("-" + n.get_path() + str([n3.val for n3 in n[0]]) + str(list(argtypes.keys())))
traverse_files(FunctionNode, return_rec_visit)
printlist.sort()
f = open("signatures.txt", "w")
f.write("\n============\n")
for l in printlist:
print(l)
f.write(l+"\n")
f.close()
#[print(l) for l in printlist]
def is_root(node):
return node.class_parent == None
def tab(t, tstr=" "):
s = ""
for i in range(t):
s += tstr
return s
def get_roots():
global typespace
roots = []
def print_tree(n, tlevel=0):
s = tab(tlevel) + n.name
print(s)
for c in n.child_classes.values():
print_tree(c, tlevel+1)
for c in typespace.functions.values():
if not func_is_class(c) or not is_root(c): continue
roots.append(c)
return roots
#we two sources of type information: user-supplied annotation,
#and the type log. first we should validate the user type annoation,
#then we have to apply a series of rules to reduce the types.
class TypeSet:
def __init__(self, input=[]):
self.map = odict()
self.data = []
for i in input:
self.add(i)
def add(self, item):
h = item.__setval__()
if h in self.map: return
self.map[h] = len(self.data)
self.data.append(item)
def remove(self, item):
i = self.map[item.__setval__()]
data = self.data[i]
self.data.pop(i)
del self.map[item.__setval__()]
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, idx, val):
if item < 0 or item >= len(self):
raise RuntimeError("Item out of bounds in TypeSet.__setitem__: len: %d, item: %s" % (len(self), str(item)))
d = self.data[idx]
self.data.remove(d)
del self.datamap[d.__setval__()]
self.datamap[val.__setval__()] = idx
self.data[idx] = val
def __iter__(self):
def iter():
for d in self.data:
yield d
return iter()
def len(self):
return len(self.data)
def join(self, b):
c = TypeSet(self)
for d in b:
c.add(d)
def __add__(self, b):
return self.join(self, b)
def copy(self):
return TypeSet(self)
def __contains__(self, item):
return item.__setval__() in self.map
def __sub__(self, b):
c = self.copy()
for d in b:
if d in c:
c.remove(d)
return c
def process_type_annotation():
global typespace
roots = get_roots()
for node in roots:
pass
def print_class_hiearchy():
global typespace
def print_tree(n, tlevel=0):
s = tab(tlevel) + n.name
print(s)
lst = list(n.child_classes.keys())
lst.sort();
for k in lst:
c = n.child_classes[k]
print_tree(c, tlevel+1)
roots = []
for k in typespace.functions:
c = typespace.functions[k]
if not func_is_class(c) or not is_root(c): continue
roots.append(k)
roots.sort()
for k in roots:
print_tree(typespace.functions[k])
def handle_dot_scope(n, scope):
pass
def handle_scope_infer(n, scope):
if type(n) in [IdentNode, VarDeclNode]:
scope[n.val] = n
elif type(n) in [FunctionNode, ForLoopNode, DoWhileNode,
TryNode, CatchNode, SwitchNode, WhileNode,
IfNode, ElseNode]:
scope = NodeScope(scope)
if type(n) == FunctionNode:
scope[n.name] = n
elif type(n) == BinOpNode and n.op == ".":
scope = handle_dot_scope(n, scope)
return scope
def set_const_types():
global typespace
def visit_num(n):
if n.type != None: return
if type(n.val) == float: n.type = BuiltinTypeNode("float")
else: n.type = BuiltinTypeNode("int")
def visit_str(n):
if n.type != None: return
n.type = typespace.functions["String"]
def visit_reg(n):
if n.type != None: return
n.type = typespace.functions["String"]
traverse_files(NumLitNode, visit_num);
traverse_files(StrLitNode, visit_str);
traverse_files(RegExprNode, visit_reg);
def filter_binop_assigns():
global typespace
def visit_binop(n):
if n.op != "=": return
if type(n.parent) not in [StatementList, FunctionNode]: return
assign = AssignNode(n[0], n[1], "=")
assign.type = n.type
n.parent.replace(n, assign)
traverse_files(BinOpNode, visit_binop)
def infer_types(ts):
global typespace
typespace = ts
filter_binop_assigns()
build_classes(ts.filenodes)
process_logrec()
if glob.g_do_annote:
process_type_annotation()
if glob.g_print_classes:
print_class_hiearchy()
set_const_types()
def replace_instanceof(result, typespace):
def visiti(node):
name = glob.g_instanceof_func
#make sure we're not inside g_instanceof_func (__instance_of) itself.
p = node
while p != None and type(p) != FunctionNode:
p = p.parent
if p != None and type(p) == FunctionNode and p.name == name:
#sys.stderr.write("ignoring %s implementation in instaneof replacement\n"%name);
return
if node.op != "instanceof": return
fn = FuncCallNode(IdentNode(name))
params = ExprListNode([node[0], node[1]])
params.line = node.line; params.lexpos = node.lexpos
fn.add(params)
fn.line = node.line; fn.lexpos = node.lexpos;
node.parent.replace(node, fn);
traverse(result, BinOpNode, visiti)
#print("\n")
def process_docstrings(result, typespace):
dprop = glob.g_docstring_propname
vset = set()
def case1(node, dstr):
#simple case; the node's parent is a statementlist
if type(node.parent) != StatementList: return False
if node.is_anonymous: return False
node.remove(dstr)
n = BinOpNode(IdentNode(node.name), IdentNode(dprop), ".")
n = AssignNode(n, dstr)
node.parent.insert(node.parent.index(node)+1, n)
return True
#a = func() {}, where a is part of a statementlist
def case2(node, dstr):
def count_funcs(n2):
ret = 0
if type(n2) == FunctionNode:
ret += 1
if n2 == node: return ret
for c in n2:
ret += count_funcs(c)
return ret
#make sure we're part of a valid assignnode
n = node
lastn = n;
while n != None:
lastn = n
n = n.parent
if type(n) not in [BinOpNode, IdentNode, AssignNode]: break
if type(n) not in [StatementList, FunctionNode]: return False
if type(lastn) != AssignNode: return False
an = lastn;
if count_funcs(an) != 1: return False
dstr.parent.remove(dstr);
dn = dstr
node.parent.replace(node, dn);
n2 = js_parse(an.gen_js(0), start_node=AssignNode)
node.parent.replace(dn, node)
n2.replace(n2[0], BinOpNode(n2[0], IdentNode(dprop), "."))
an.parent.insert(an.parent.index(an)+1, n2)
return True
def case3(node, dstr): #static class methods
if node.parent == None: return
if type(node.parent.parent) != FuncCallNode: return
if (node.parent.parent[0].gen_js(0) != "define_static"): return
lastn = node
slist = node.parent
while slist != None and type(slist) != StatementList and not isinstance(slist, FunctionNode):
lastn = slist
slist = slist.parent
if slist == None: return
fc = node.parent
sname = fc[1]
sname = sname.gen_js(0).replace("'", "").replace('"', "")
#print(fc[0], "dsfsdf")
#sys.exit()
#return
n = js_parse("$n.$s.__doc__ = $n;", [fc[0], sname, dstr])
dstr.parent.remove(dstr)
slist.insert(slist.index(lastn)+1, n)
return True
cases = [case1, case2, case3]
def visit(node):
if node in vset:
return
vset.add(node)
if len(node) == 1: return
n = node[1]
while len(n) > 0 and type(n) == StatementList:
n = n[0]
if type(n) != StrLitNode: return
dstr = n
tfound = 0
for c in cases:
if c(node, dstr):
if len(node) == 1:
node.add(StatementList())
tfound = True
break
if not tfound:
p = node.parent
i = node.parent.index(node)
if node in node.parent:
node.parent.remove(node)
node.remove(dstr)
sys.stderr.write("%s(%i): Warning: could not statically compile docstring for function\n \"%s\"." % (node.file, node.line, node.name))
sys.stderr.write(" Docstring will be set at runtime instead\n\n");
bn = ExprListNode([node, dstr])
cn = FuncCallNode("define_docstring")
cn.add(bn)
p.insert(i, cn)
traverse(result, FunctionNode, visit);
valid_randstr = "abcdefghijklmnopqrstuvwxyz"
valid_randstr += valid_randstr.upper() + "0123456789_"
_last_rs_time = time.time()
def randstr(n):
global _last_rs_time
if (time.time() - _last_rs_time > 0.25):
seed(time.time()*6)
_last_rs_time = time.time()
s = ""
for i in range(n):
s += valid_randstr[int(random()*len(valid_randstr)*0.99999)]
return s
def gen_useful_funcname(p2):
def is_ok(s):
if len(s) == 0: return False
for c in s:
if c not in valid_randstr: return False
return True
p2 = p2.parent
while p2 != None and type(p2) != AssignNode:
p2 = p2.parent
suffix = ""
if p2 != None: #okay, we belong to an anonymous function with no usable name
#find right-most usable identnode in the lvalue's ast tree
n2 = p2[0]
while len(n2) > 0:
s = n2.gen_js(0).replace("prototype.", "").replace(".", "_")
if is_ok(s):
suffix = s
break
n2 = n2[-1]
if not is_ok(suffix):
s = n2.gen_js(0)
if is_ok(s):
suffix = s
else:
suffix = randstr(4)
if len(suffix) == 0: suffix = randstr(2)
return suffix
def process_static_vars(result, typespace):
#if inside a class, returns class node
def inside_class(n):
if type(n) == ClassNode or (type(n) == FuncCallNode and n[0].gen_js(0) == "_ESClass"):
return n
if n.parent != None:
return inside_class(n.parent)
return False
def visit(node):
if "static" not in node.modifiers: return
inclass = inside_class(node)
#make sure we aren't carrying any child vardecl nodes
#(e.g var a, b, c, d) with us.
for c in list(node[2:]):
node.remove(c)
node.parent.insert(node.parent.index(node), c)
#helper function for generating (hopefully) unique suffixes
#from parent nodes
def is_ok(s):
if len(s) == 0: return False
for c in s:
if c not in valid_randstr: return False
return True
#we need to extract a useful name for the static
p = node.parent
while p != None and not isinstance(p, FunctionNode):
p = p.parent
if p == None:
return #we're already a static global variable
suffix = randstr(2)
if p.is_anonymous:
suffix = gen_useful_funcname(p)
else:
#see if we're a nested function. if so, build a chain of suffices.
suffix = p.name
p2 = p.parent
while p2 != None:
if isinstance(p2, FunctionNode):
suffix = gen_useful_funcname(p2) + "_" + suffix
p2 = p2.parent
name = "$" + node.val + "_" + suffix
scope = {}
scope[node.val] = name
def replace_var(n, scope):
if type(n) in [IdentNode, VarDeclNode] and n.val in scope:
n.val = scope[n.val]
if type(n) == BinOpNode and n.op == ".":
#don't traverse into the right side of . operators
replace_var(n[0], scope)
if type(n[1]) in [IdentNode, VarDeclNode]:
return
elif type(n[1]) == FuncCallNode: #need to fix this edge case: function calls operator precedence is messed up
replace_var(n[1][1:], scope)
return
replace_var(n[1], scope)
elif type(n) == FunctionNode:
#hrm, not sure how best to handle this one.
#avoid replacement in exprfunctions?
#well, for now, just convert them.
#don't convert arguments
scope = dict(scope)
for c in n[0]:
p = c
while len(p) > 0 and type(p) not in [IdentNode, VarDeclNode]:
p = p[0]
p = p.gen_js(0).strip();
scope[p] = p
for c in n.children[1:]:
replace_var(c, scope)
else:
for c in n:
replace_var(c, scope)
#find parent scope
p = node.parent
while p != None and not isinstance(p, FunctionNode):
p = p.parent
replace_var(p, scope)
func = p
#now find global scope, and insert
lastp = node
p = node.parent
#find parent function first, then find surrounding closure or global scope
for si in range(2):
while p.parent != None and not isinstance(p, FunctionNode):
lastp = p
p = p.parent
if si == 0 and p.parent != None:
lastp = p
p = p.parent
pindex = p.index(lastp)
node.parent.remove(node)
if inclass:
#add declaration
decl = VarDeclNode(ExprNode([]), local=True, name=node.val)
p.insert(pindex, decl)
while inclass.parent != None and inclass.parent != p:
inclass = inclass.parent
if inclass.parent == None:
pindex += 2
else:
pindex = p.index(inclass) + 1
while pindex < len(p) and hasattr(p[pindex], "_was_static") and getattr(p[pindex], "_was_static"):
pindex += 1
node._was_static = True
p.insert(pindex, node)
node.modifiers.remove("static")
node.modifiers.add("local")
traverse(result, VarDeclNode, visit);
from js_global import glob
from js_typespace import *
from js_ast import *
from js_util_types import *
import js_ast
node_types = set()
for k in js_ast.__dict__:
n = js_ast.__dict__[k]
try:
if not issubclass(getattr(js_ast, k), Node):
continue;
except TypeError:
continue
node_types.add(k)
def process_arrow_function_this(result, typespace):
idgen = [1];
doneset = set()
hash = glob.g_file
import hashlib
hash = hashlib.sha1(bytes(hash, "utf8")).digest()
import base64
hash = str(base64.b64encode(hash), "latin-1")
hash = hash.replace("+", "_").replace("/", "_").replace("=", "_").replace(".", "_").replace("-", "_")
hash = hash.replace("&", "_")
hash = hash[:4]
def replace_this(n, name):
if type(n) == IdentNode and n.val == 'this':
n.val = name
else:
for c in n.children:
if isinstance(c, FunctionNode):
continue
replace_this(c, name)
def visit(node):
if not node.is_arrow: return
if node.parent == None: return
if node._id in doneset: return
doneset.add(node._id)
p = node.parent
pi = p.index(node)
while p is not None and not isinstance(p, StatementList) \
and not isinstance(p, FunctionNode):
if p is not None:
pi = p.parent.index(p)
p = p.parent
if p is None:
#impossible
typespace.error("Impossible, no outer statementlist. That's can't happen", node)
name = "$_" + hash + "awthis_" + str(idgen[0])
idgen[0] += 1
namenode = VarDeclNode('this', name=name, local=True)
p.insert(pi, namenode)
replace_this(node, name)
traverse(result, FunctionNode, visit)
def transform_class_props(node, typespace):
if glob.g_include_types:
return
pass
def visit(n):
cons = None
for m in n:
if type(m) == MethodNode and m.name == "constructor":
cons = m
if cons is None:
cons = MethodNode("constructor")
cons.add(ExprListNode())
cons.line = n.line
cons.file = n.file
cons.add(StatementList())
cons.lexpos = n.lexpos
cons.lexpos2 = n.lexpos2
n.prepend(cons)
if n.parents is not None and len(n.parents) > 0:
cons[1].add(FuncCallNode("super"))
have_super = n.parents is not None and len(n.parents) > 0
if have_super and len(cons) == 1:
typespace.error("Missing super for class " + n.name, cons)
elif type(cons[1]) != StatementList:
if have_super and "super" not in cons[1].gen_js(0):
typespace.error("Missing super for class " + n.name, cons)
sm = cons
si = 2 if have_super else 1
else:
if have_super and (len(cons[1]) == 0 or "super" not in cons[1][0].gen_js(0)):
typespace.error("Missing super for class " + n.name, cons)
sm = cons[1]
si = 1 if have_super else 0
for prop in n:
if type(prop) != ClassPropNode: continue
if len(prop) == 0 or len(prop[0].gen_js(0).replace(";", "").strip()) == 0:
continue;
if "static" in prop.modifiers:
n2 = AssignNode(BinOpNode(n.name, prop.name, "."), prop[0])
insert_after(n, n2)
else:
n2 = AssignNode(BinOpNode("this", prop.name, "."), prop[0])
sm.insert(si, n2)
traverse(node, ClassNode, visit)
def apply_inserts(node, typespace, inserts, buf):
inserts.sort(key = lambda key : key[0])
off = 0
for it in inserts:
i, s = it
i += off
buf = buf[:i] + s + buf[i:]
off += len(s)
pass
return buf
def infer_class_properties(result, typespace, buf):
inserts = []
def visit(n):
if len(n) == 0:
return
indent = n[0].lexpos
while indent < len(buf) and indent >= 0 and buf[indent] not in ["\n", "\r"]:
indent -= 1
count = n[0].lexpos - indent
indent = ""
for i in range(count-1):
indent += " "
si = n.lexpos
#print(n.name, buf[n.lexpos:n.lexpos+15].replace("\n", "\\n"))
#find bracket
if not buf[si:].strip().startswith("class"):
if buf[si+1:].strip().startswith("class"):
while si < len(buf) and buf[si] != "c":
si += 1
pass
else:
#preprocessor has messed up lines
return
while si < len(buf) and buf[si] != "{":
si += 1
si += 1
while si < len(buf) and buf[si] not in ["\n", "\r"]:
si += 1
si += 1
cons = None
for m in n:
if type(m) == MethodNode and m.name == "constructor":
cons = m
break
if not cons: return
props = {}
def binop_recurse(n2):
if type(n2) == NumLitNode:
return "number"
elif type(n2) == IdentNode and n2.val in ["true", "false"]:
return "boolean"
elif type(n2) == TrinaryCondNode:
a = binop_recurse(n2[1])
b = binop_recurse(n2[2])
if a == b:
return a
if a is not None and b is None:
return a
if b is not None and a is None:
return b
if a == "boolean" and b == "number":
return "number"
if a == "number" and b == "boolean":
return "number"
elif type(n2) != BinOpNode:
return None
a = binop_recurse(n2[0])
b = binop_recurse(n2[1])
if (a is not None) == (b is not None):
if n2.op in ["+", "-", "/", "*", "**", ">>", "<<", "^", "|", "&", "~", \
"%"]:
return "number" if a == "number" or b == "number" else "boolean"
elif n2.op in ["<", "==", "===", ">", "&&", "||", ">=", "<="]:
return "boolean"
def visit2(n2):
if type(n2) == BinOpNode and n2.op != "=": return
if len(n2[0]) == 0 or n2[0][0].gen_js(0).strip() != "this": return
if type(n2[0]) not in [BinOpNode, MemberRefNode]: return
if type(n2[0]) == BinOpNode and n2[0].op != ".": return
prop = n2[0][1].gen_js(0).strip()
for m in n:
if type(m) == ClassPropNode and m.name == prop:
return
val = n2[1]
ptype = None
if type(val) == StrLitNode:
ptype = "string"
elif type(val) == IdentNode and val.val in ["true", "false"]:
ptype = "boolean"
elif type(val) == NumLitNode:
ptype = "number"
elif type(val) == ObjLitNode:
ptype = "Object"
elif type(val) == BinOpNode:
ptype = binop_recurse(val)
elif type(val) == FunctionNode: #isinstance(val, FunctionNode):
ptype = "function"
elif type(val) == ArrayLitNode:
ok = len(val[0]) > 0
for c in val[0]:
if type(c) != NumLitNode:
ok = 0
if ok:
ptype = "Array<number>"
elif type(val) == KeywordNew and len(val) > 0 and len(val[0]) > 0:
name = val[0][0].gen_js(0).strip()
if "." not in name:
ptype = name
#print(val)
if ptype:
props[prop] = [ptype, si]
traverse(cons, AssignNode, visit2)
traverse(cons, BinOpNode, visit2)
for i, k in enumerate(props):
ptype, loc = props[k]
line = indent + k + " : " + ptype
if i == len(props)-1:
line += ";\n\n" #add semicolon to last prop due to compiler bug
else:
line += "\n"
inserts.append([loc, line])
#print(props)
traverse(result, ClassNode, visit)
buf = apply_inserts(result, typespace, inserts, buf)
return buf
def coverage_profile(result, typespace):
typeset = set([VarDeclNode, ExportNode, TryNode, CatchNode,
ClassNode, FunctionNode, IfNode, ElseNode,
ForInNode, ForLoopNode, DoWhileNode, WhileNode,
SwitchNode, CaseNode, DefaultCaseNode, FinallyNode])
proflines = []
for c in result:
if type(c) != StrLitNode: break
if c.val[1:-1] == "not_covered_prof":
return
def visit(node):
if type(node) == StatementList:
cs = node[:]
else:
cs = node[1:]
#go through statements
for st in cs:
if type(st) in typeset:
continue;
node.insert(node.index(st), js_parse("""
$$cov_prof("$s", $s);
""", [glob.g_file, st.line], start_node=FuncCallNode))
proflines.append([glob.g_file, st.line])
traverse(result, StatementList, visit)
traverse(result, FunctionNode, visit)
traverse(result, MethodNode, visit)
traverse(result, MethodGetter, visit)
traverse(result, MethodSetter, visit)
#for t in typeset:
# traverse(result, t, visit)
for pl in proflines:
result.add(js_parse("""
$$cov_reg("$s", $s);
""", pl, start_node=FuncCallNode))
``` |
{
"source": "joeedh/game.js",
"score": 2
} |
#### File: game.js/designer/props.py
```python
import bpy
from . import utils
class ObjectSettings(bpy.types.PropertyGroup):
ignore = bpy.props.BoolProperty()
#my_float = bpy.props.FloatProperty()
#my_string = bpy.props.StringProperty()
def register():
bpy.utils.register_class(ObjectSettings)
bpy.types.Object.gamejs = \
bpy.props.PointerProperty(type=ObjectSettings)
def unregister():
bpy.utils.unregister_class(ObjectSettings)
reg = utils.Registrar([
utils.Registrar.custom(register, unregister)
])
``` |
{
"source": "joeedh/noodle.blend",
"score": 2
} |
#### File: noodle.blend/defunct/generate.py
```python
import bpy, bmesh
from math import *
from mathutils import *
from . import codegen
from . import c_code
def generate():
ob = bpy.context.object
if ob == None: return
if ob.name.startswith("__"): return
from lg.symbol import sym
from lg import codegen
#nodetree we'll be working on
ntree = ob.implicit.node_tree
if ntree not in bpy.data.node_groups.keys(): return
from lg import mesh
outname = "__" + ob.name + "_output"
if outname not in bpy.data.objects.keys():
outme = bpy.data.meshes.new(outname)
outobj = bpy.data.objects.new(outname, outme)
bpy.context.scene.objects.link(outobj)
else:
outobj = bpy.data.objects[outname]
print("Tessellating...", outname, outobj);
bm = bmesh.new()
bm.from_object(ob, bpy.context.scene)
#bm.from_mesh(ob.data)
min1 = Vector([1e17, 1e17, 1e17])
max1 = Vector([-1e17, -1e17, -1e17])
mat = ob.matrix_world.copy()
for v in bm.verts:
v.co = mat * v.co
for j in range(3):
min1[j] = min(min1[j], v.co[j]);
max1[j] = max(max1[j], v.co[j]);
if min1[0] == 1e17:
return
#add a small margin
d = 0.5
min1 -= Vector([d, d, d]);
max1 += Vector([d, d, d]);
print("AABB", ob.name, min1, max1);
#return
mesh.surfmesh(outobj, min1=min1, max1=max1)
from lg import appstate
appstate.start_events()
```
#### File: noodle.blend/noodle/node.py
```python
import bpy
from bpy.types import NodeTree, Node, NodeSocket, NodeCustomGroup, NodeGroup, NodeGroupInput
from . import utils
from . import config as cf
from .config import *
from . import globals
# Implementation of custom nodes from Python
# Derived from the NodeTree base type, similar to Menu, Operator, Panel, etc.
class NoodleTree(NodeTree):
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = NODETREE_TYPE
# Label for nice name display
bl_label = NODETREE_EDITOR_NAME
# Icon identifier
bl_icon = 'NODETREE'
@classmethod
def poll(cls, ntree):
return True
# Description string
NoodleTree.__doc__ = cf.NODETREE_EDITOR_NAME + " Editor"
class NoodleCustomGroup (NodeCustomGroup):
bl_idname = PRE+"NodeGroup"
bl_label = "Group"
bl_icon = 'SOUND'
bl_width_min = 250
def init(self, context):
pass
def copy(self, b):
pass
def poll_instance(self, tree):
return ntree.bl_idname == NODETREE_TYPE
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == NODETREE_TYPE
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
if self.node_tree == None:
return
layout.label(text=self.node_tree.name)
layout.prop(self.node_tree, "name")
prop = layout.operator("node."+APIPRE+"_edit_group", text="Edit Group")
print("PATH", context.space_data.path[-1].node_tree)
node_tree = context.space_data.path[-1].node_tree
prop["node_path"] = utils.gen_node_path(self, node_tree) #context.space_data.path[-1]) #node_tree)
def draw_buttons_ext(self, context, layout):
pass
# Custom socket type
class FieldVectorSocket(NodeSocket):
# Description string
'''Vector Socket'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = PRE+'VectorSocket'
# Label for nice name display
bl_label = 'Vector'
value : bpy.props.FloatVectorProperty(default=[0.0, 0.0, 0.0], size=3)
# Optional function for drawing the socket input vector
def draw(self, context, layout, node, text):
if self.is_output or self.is_linked:
layout.label(text=text)
else:
layout.prop(self, "value", text=self.name)
# Socket color
def draw_color(self, context, node):
return (0.4, 0.8, 1.0, 1.0)
# Custom socket type
class FieldSocket(NodeSocket):
# Description string
'''Value Socket'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = PRE+'FieldSocket'
# Label for nice name display
bl_label = 'Field'
value : bpy.props.FloatProperty(default=0.0)
# Optional function for drawing the socket input value
def draw(self, context, layout, node, text):
if self.is_output or self.is_linked:
layout.label(text=text)
else:
layout.prop(self, "value", text=self.name)
# Socket color
def draw_color(self, context, node):
return (1.0, 0.4, 0.216, 1.0)
from . import symbol
sym = symbol.sym
#stype is either 'vec' (vector) or 'field' (scalar)
def coerce(a, stype):
if type(a) in [list, tuple] and stype != "vec":
a = sym.func("sqrt", a[0]*a[0] + a[1]*a[1] + a[2]*a[2])
elif type(a) not in [list, tuple] and stype != "field":
a = [a, a, a]
return a
# Mix-in class for all custom nodes in this tree type.
# Defines a poll function to enable instantiation.
class NoodleTreeNode:
tag : bpy.props.IntProperty(default=0) #used during sorting
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == NODETREE_TYPE
class math_func_impl:
def SIN(self, a, b, dva, dvb):
return sym.func("sin", a)
def COS(self, a, b, dva, dvb):
return sym.func("cos", a)
def TAN(self, a, b, dva, dvb):
return sym.func("tan", a)#, "{dva}*(tan({a})*tan({a}) + 1.0)"
def ASIN(self, a, b, dva, dvb):
return sym.func("asin", a)#, "(-sqrt(-{a}*{a} + 1.0)*{dva})/({a}*{a} - 1.0)"
def ACOS(self, a, b, dva, dvb):
return sym.func("acos", a)#, "(sqrt(-{a}*{a} + 1.0)*{dva})/({a}*{a} - 1.0)"
def POW(self, a, b, dva, dvb):
return sym.func("pow", [a, b])#, "(pow({a}, {b})*({dva}*{b} + {dvb}*log({a})*{a}))/{a}"
def ABS(self, a, b, dva, dvb):
return sym.func("abs", a)
def FLOOR(self, a, b, dva, dvb):
return sym.func("floor", a)
def CEIL(self, a, b, dva, dvb):
return sym.func("ceil", a)
def FRACT(self, a, b, dva, dvb):
return sym.func("fract", a)
def TRUNC(self, a, b, dva, dvb):
return sym.func("trunc", a)
def ATAN(self, a, b, dva, dvb):
return sym.func("atan", a)#, "atan({a})", "({dva}/({a}*{a}+1.0)"
def TENT(self, a, b, dva, dvb):
return sym(1.0) - sym.func("abs", [sym.func("fract", a) - 0.5])*2.0
def ATAN2(self, a, b, dva, dvb):
return sym.func("atan2", a)#, "atan2({b}, {a})", "(atan2({a}+0.001) - atan2({a}-0.001)) / 500.0"
def MUL(self, a, b, dva, dvb):
return sym(a) * sym(b)
def SUB(self, a, b, dva, dvb):
return sym(a) - sym(b)
def ADD(self, a, b, dva, dvb):
return sym(a) + sym(b)
def DIV(self, a, b, dva, dvb):
return sym(a) / sym(b)
def MIN(self, a, b, dva, dvb):
return sym.func("min", [a, b])
def MAX(self, a, b, dva, dvb):
return sym.func("max", [a, b])#, "max({a}, {b})", "{a} > {b} ? {dva} : {dvb}"
def CROSS(self, a, b, dva, dvb):
return [
a[1]*b[2] - a[2]*b[1],
a[2]*b[0] - a[0]*b[2],
a[0]*b[1] - a[1]*b[0]
]
#vector functions
def DOT(self, a, b, dva, dvb):
return a[0]*b[0] + a[1]*b[1] + a[2]*b[2]
def LEN(self, a, b, dva, dvb):
return sym.func('sqrt', [a[0]*a[0] + a[1]*a[1] + a[2]*a[2]])
#"""({dva}[0]*{a}[0] + {dva}[1]*{a}[1] + {dva}[2]*{a}[2]) /
# sqrt({a}[0]*{a}[0] + {a}[1]*{a}[1] + {a}[2]*{a}[2]")"""
#example node
# Derived from the Node base type.
class MathNode(Node, NoodleTree):
# === Basics ===
# Description string
'''A custom node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = PRE+'MathNode'
# Label for nice name display
bl_label = 'Math Node'
# Icon identifier
bl_icon = 'SOUND'
bl_width_min = 200
# === Custom Properties ===
# These work just like custom properties in ID data blocks
# Extensive information can be found under
# http://wiki.blender.org/index.php/Doc:2.6/Manual/Extensions/Python/Properties
#myStringProperty : bpy.props.StringProperty()
#myFloatProperty : bpy.props.FloatProperty(default=3.1415926)
# Enum items list
math_funcs = [
("SIN", "Sin", "Sine"),
("COS", "Cos", "Cosine"),
("TENT", "Tent", "Tent"),
("TAN", "Tan", "Tangent"),
("ASIN", "Asin", "Sine"),
("ACOS", "Acos", "Sine"),
("POW", "Pow", "Sine"),
("ABS", "Abs", "Sine"),
("FLOOR", "Floor", "Sine"),
("CEIL", "Floor", "Sine"),
("FRACT", "Fract", "Sine"),
("TRUNC", "Truncate", "Sine"),
("ATAN", "Atan", "Sine"),
("ATAN2", "Atan2 (xy to polar)", "Sine"),
("MUL", "Multiply", "Sine"),
("SUB", "Subtract", "Sine"),
("ADD", "Add", "Sine"),
("DIV", "Divide", "Sine"),
("MIN", "Min", "Min"),
("MAX", "Max", "Max"),
]
mathFunc : bpy.props.EnumProperty(name="Function", description="Math Functions", items=math_funcs, default='ADD')
def init(self, context):
self.inputs.new(PRE+'FieldSocket', "a")
self.inputs.new(PRE+'FieldSocket', "b")
self.outputs.new(PRE+'FieldSocket', "field")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
self.mathFunc = node.mathFunc
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.label(text="Node settings")
layout.prop(self, "mathFunc")
#layout.prop(self, "myFloatProperty")
# Detail buttons in the sidebar.
# If this function is not defined, the draw_buttons function is used instead
def draw_buttons_ext(self, context, layout):
pass
#layout.prop(self, "myFloatProperty")
# myStringProperty button will only be visible in the sidebar
#layout.prop(self, "myStringProperty")
# Optional: custom label
# Explicit user label overrides this, but here we can define a label dynamically
def draw_label(self):
return "Math Node"
globals.module_registrar.add(utils.Registrar([
#NoodleTree is registered in node_tree.py
FieldSocket,
FieldVectorSocket,
MathNode,
NoodleCustomGroup
]));
```
#### File: noodle.blend/noodle/panel_ui.py
```python
import bpy
from bpy.types import Menu, Panel
from .config import *
from . import globals
from math import *
class PhysicButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "physics"
@classmethod
def poll(cls, context):
rd = context.scene.render
return (context.object) and (not rd.use_game_engine)
class PHYSICS_PT_implicit(PhysicButtonsPanel, Panel):
bl_label = "Noodle Surfaces"
def draw(self, context):
layout = self.layout
ob = context.object
layout.prop(ob.implicit, "surface_groups")
layout.prop(ob.implicit, "blend_mode")
layout.prop(ob.implicit, "node_tree")
layout.prop(ob.implicit, "global_mode")
from . import utils
globals.module_registrar.add(utils.Registrar([
PHYSICS_PT_implicit
]))
``` |
{
"source": "joeedh/ShapeView",
"score": 2
} |
#### File: ShapeView/shapeview/ops.py
```python
import bpy
from mathutils import *
from math import *
import bmesh
from .utils import Registrar, ctxHasShapeKeys
from .shapeview import setView, createDrivers
import bpy
class CreateDriversOp(bpy.types.Operator):
"""Set view vector from active viewport camera"""
bl_idname = "object.shapeview_create_drivers"
bl_label = "Create Drivers"
bl_options = {'UNDO'}
@classmethod
def poll(cls, context):
return ctxHasShapeKeys(context)
def execute(self, context):
ob = context.object
print("Making drivers")
createDrivers(ob)
return {'FINISHED'}
class SetViewVectorOp(bpy.types.Operator):
"""Set view vector from active viewport camera"""
bl_idname = "object.shapeview_set_view"
bl_label = "Set View"
bl_options = {'UNDO'}
@classmethod
def poll(cls, context):
return ctxHasShapeKeys(context)
def execute(self, context):
ob = context.object
setView(ob)
return {'FINISHED'}
class CreateEmbeddedScript(bpy.types.Operator):
"""Create embedded script to run rig without addon"""
bl_idname = "object.shapeview_create_script"
bl_label = "Create Script"
bl_options = {'UNDO'}
@classmethod
def poll(cls, context):
return ctxHasShapeKeys(context)
def execute(self, context):
ob = context.object
from . import generate
buf = generate.generate()
name = "shapeview_run.py"
if name not in bpy.data.texts:
bpy.data.texts.new(name)
text = bpy.data.texts[name];
ob.shapeview.script = text
text.clear()
text.write(buf)
text.use_module = True
text.as_module() #run
return {'FINISHED'}
bpy_exports = Registrar([
SetViewVectorOp,
CreateDriversOp,
CreateEmbeddedScript
])
```
#### File: ShapeView/shapeview/select_area_op.py
```python
import bpy
from bpy.props import *
from .utils import Registrar
import bpy
import bgl
import blf
import gpu
from gpu_extras.batch import batch_for_shader
types = {
"VIEW_3D" : bpy.types.SpaceView3D
}
def draw_callback_px(self, context):
font_id = 0 # XXX, need to find out how best to get this.
if context.area == self.active_area:
# draw some text
blf.position(font_id, 15, 30, 0)
blf.size(font_id, 20, 72)
blf.draw(font_id, "Select Area " + str(self.active_area_i))
class SelectAreaOp(bpy.types.Operator):
"""Draw a line with the mouse"""
bl_idname = "screen.select_area"
bl_label = "Select Area"
areaType : bpy.props.StringProperty(default="VIEW_3D")
targetPath : bpy.props.StringProperty()
targetProp : bpy.props.StringProperty()
_x = None
_y = None
def find_area(self, screen, x, y):
i = 0
for area in screen.areas:
if area.type != self.areaType:
continue
if x >= area.x and y >= area.y and x < area.x + area.width and y < area.y + area.height:
self.active_area = area
self.active_area_i = i
i += 1
def modal(self, context, event):
for area in context.screen.areas:
if area.type != self.areaType:
continue
area.tag_redraw()
print(dir(event), "event!!")
if event.type == 'MOUSEMOVE':
self._x = event.mouse_x
self._y = event.mouse_y
self.find_area(context.screen, self._x, self._y)
return {'RUNNING_MODAL'}
elif event.type == 'LEFTMOUSE' or event.type == "RIGHTMOUSE" or event.type == "MIDDLEMOUSE":
self._x = event.mouse_x
self._y = event.mouse_y
self.find_area(context.screen, self._x, self._y)
types[self.areaType].draw_handler_remove(self._handle, 'WINDOW')
obj = bpy.data.path_resolve(self.targetPath)
setattr(obj, self.targetProp, self.active_area_i)
#make sure ui redraws
for area in context.screen.areas:
area.tag_redraw()
return {'FINISHED'}
elif event.type in {'ESC'}:
types[self.areaType].draw_handler_remove(self._handle, 'WINDOW')
return {'CANCELLED'}
return {'RUNNING_MODAL'}
def invoke(self, context, event):
# the arguments we pass the the callback
args = (self, context)
# Add the region OpenGL drawing callback
# draw in view space with 'POST_VIEW' and 'PRE_VIEW'
self._handle = types[self.areaType].draw_handler_add(draw_callback_px, args, 'WINDOW', 'POST_PIXEL')
self._x = event.mouse_prev_x
self._y = event.mouse_prev_y
self.mouse_path = []
self.active_area = None
self.active_area_i = 0
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
bpy_exports = Registrar([
SelectAreaOp
])
```
#### File: ShapeView/shapeview/shapeview.py
```python
import bpy
from mathutils import *
from math import *
import bmesh, random, time, os, sys, os.path
from .Global import svglob
#START
last_view = Vector([0, 0, 1])
did_render = False
def getKey(shapeview, key):
for sk in shapeview.skeys:
if sk.shapekey == key:
return sk
ret = shapeview.skeys.add()
ret.shapekey = key
return ret
def getTargetMatrix(ob):
shapeview = ob.data.shape_keys.shapeview
target = shapeview.target
mat = ob.matrix_world
if target.object is not None:
ob2 = target.object
mat = ob2.matrix_world
bone = None
if type(ob2.data) == bpy.types.Armature:
bone = target.bone
if bone in ob2.pose.bones:
bone = ob2.pose.bones[bone]
else:
bone = None
if bone:
mat = mat @ bone.matrix
loc, quat, scale = mat.decompose()
return quat.to_matrix()
def getView():
global last_view, did_render
if svglob.is_rendering:
did_render = True
print("IS RENDERING", svglob.is_rendering)
camera = bpy.context.scene.camera
if not camera:
print("ERROR: no camera?!!")
else:
mat2 = camera.matrix_world
mat2 = Matrix(mat2)
mat2.invert()
last_view = Vector(mat2[2][:3])
return
view3d = None
first = None
i = 0
if bpy.context.window and bpy.context.window.screen:
for area in bpy.context.window.screen.areas:
if area.type == "VIEW_3D":
if not first:
first = area.spaces[0].region_3d
if i == bpy.context.workspace.shapeview.active_view3d:
view3d = area.spaces[0].region_3d
i += 1
if first and not view3d:
print("warning, failed to find correct view3d for index %i"%(bpy.context.workspace.shapeview.active_view3d));
view3d = first
if view3d:
mat2 = view3d.view_matrix
last_view = Vector(mat2[2][:3])
return last_view
def getKeyVal(ob, key):
global last_view
ctx = bpy.context
shapeview = ob.data.shape_keys.shapeview
sv = getKey(shapeview, key)
scene = ctx.scene
view3d = None
getView()
mat1 = getTargetMatrix(ob)
z1 = mat1 @ Vector(sv.vector)
z2 = last_view
z1.normalize()
z2.normalize()
dot = z1.dot(z2)
if sv.both_sides:
dot = abs(dot)
th = abs(acos(dot*0.99999))
imat = Matrix(ob.matrix_world)
imat.invert()
th /= pi*0.5
th = min(max(th, 0.0), 1.0)
th = 1.0 - th;
th = pow(th, 1.5)
return th;
def _sv_getview(obname, key):
ob = bpy.data.objects[obname]
print("OB", ob)
shape_keys = ob.data.shape_keys
shapeview = shape_keys.shapeview
sv = getKey(shapeview, key)
kval = getKeyVal(ob, key)
sum = 0.0
tot = 0.0
for sv in shapeview.skeys:
if isBasisKey(sv.shapekey, shape_keys): continue
if sv.shapekey not in shape_keys.key_blocks: continue
kval = getKeyVal(ob, sv.shapekey)
sum += kval
tot += 1.0
if tot == 0.0:
return 0.0
kval = getKeyVal(ob, key)
print(key, "single", kval, "tot", tot, "sum", sum)
if tot == 1.0:
return kval
return kval / sum
print(key, "sum", kval)
return kval
# Add variable defined in this script into the drivers namespace.
bpy.app.driver_namespace["_sv_getview"] = _sv_getview
def setView(ob):
shapeview = ob.data.shape_keys.shapeview
key = ob.active_shape_key_index
key = ob.data.shape_keys.key_blocks[key]
sv = getKey(shapeview, key.name)
view = getView()
print("view", view)
mat = Matrix(getTargetMatrix(ob))
mat.invert()
print(mat)
print((mat @ view))
vec = mat @ view
vec.normalize()
sv.vector = vec
print(vec)
last_update_view = Vector()
def needUpdate():
getView()
#timer might not be called during render, and thus it might think
#view hasn't changed
global did_render
global last_update_view
global last_view
if did_render:
did_render = False
last_update_view = last_view
return True
if (last_view - last_update_view).length > 0.001:
last_update_view = last_view
return True
return False
def allVisibleObjects():
obset = set()
def check(obs):
for ob in obs:
if ob.mode not in ["OBJECT", "POSE"]:
continue
if ob.type == "EMPTY" and ob.instance_type == "COLLECTION" and ob.instance_collection:
coll = ob.instance_collection
check(coll.all_objects)
if ob.type == "MESH" and ob.data.shape_keys and len(ob.data.shape_keys.shapeview.skeys) > 0:
obset.add(ob)
check(bpy.context.visible_objects)
return obset
def checkViews():
#print("need upd ate?", needUpdate())
dgraph = bpy.context.evaluated_depsgraph_get()
scene = bpy.context.scene
if needUpdate():
for ob in allVisibleObjects():
ob.data.shape_keys.update_tag()
def isBasisKey(name, skeys):
return name == skeys.key_blocks[0].name
def getKeyIndex(name, skeys):
for i in range(len(skeys.key_blocks)):
if skeys.key_blocks[i].name == name:
return i
def getDriver(ob, skeys, keyname, animdata, path):
for d in animdata.drivers:
if d.data_path == path:
return d
ret = ob.data.shape_keys.driver_add(path)
return ret
def makeDriver(ob, keyname, skeys, target):
key = skeys.key_blocks[keyname]
if skeys.animation_data is None:
skeys.animation_data_create()
path = "key_blocks[\"" + keyname + "\"].value"
print(path)
print(skeys)
animdata = skeys.animation_data
d = getDriver(ob, skeys, keyname, animdata, path)
for v in d.driver.variables[:]:
d.driver.variables.remove(v)
var1 = d.driver.variables.new()
var1.name = "obself"
var1.targets[0].id = ob
d.driver.expression = "_sv_getview(\""+ob.name+"\", \"" + keyname + "\")"
if target.object is not None:
var2 = d.driver.variables.new()
var2.name = "dgraph_link"
var2.targets[0].id = target.object
var2.type = "TRANSFORMS"
if type(target.object.data) == bpy.types.Armature:
var2.targets[0].bone_target = target.bone
def createDrivers(ob):
mesh = ob.data
shapeview = mesh.shape_keys.shapeview
skeys = mesh.shape_keys
target = shapeview.target
for sv in shapeview.skeys:
if isBasisKey(sv.shapekey, skeys):
continue
if not sv.shapekey in skeys.key_blocks:
print("Warning, missing key " + sv.shapekey)
continue
makeDriver(ob, sv.shapekey, skeys, target)
print(sv.shapekey)
#END
from . import utils
bpy_exports = utils.Registrar([
])
```
#### File: ShapeView/shapeview/timer.py
```python
from .Global import svglob
from .shapeview import needUpdate
import bpy
#no longer used, embedded scripts are used instead
def start():
def checkViews():
#print("need update?", needUpdate())
if needUpdate():
for ob in bpy.context.visible_objects:
if ob.mode not in ["OBJECT"] or type(ob.data) != bpy.types.Mesh:
continue
if not ob.data.shape_keys or not ob.data.shape_keys.animation_data:
continue
if len(ob.data.shape_keys.shapeview.skeys) == 0:
continue
print("updating object. . .", ob.name)
print("view update detected")
dgraph = bpy.context.evaluated_depsgraph_get()
scene = bpy.context.scene
ob.data.shape_keys.update_tag()
pass
svglob.startTimer(checkViews)
def stop():
svglob.stopTimers()
pass
```
#### File: ShapeView/shapeview/ui.py
```python
import bpy
from bpy.props import *
from . import utils
from .shapeview import isBasisKey
class DATA_PT_WorkSpaceShapeView (bpy.types.Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Tool"
bl_label = "Shape View"
@classmethod
def poll(cls, context):
return True
def draw(self, context):
layout = self.layout
row = layout.row()
row.prop(context.workspace.shapeview, "active_view3d")
op = row.operator("screen.select_area")
op.targetPath = "workspaces[\"%s\"].shapeview" % (bpy.context.workspace.name)
op.targetProp = "active_view3d"
class ShapeKeyPanel (bpy.types.Panel):
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "data"
@classmethod
def poll(cls, context):
return utils.ctxHasShapeKeys(context)
class DATA_PT_ShapeView(ShapeKeyPanel):
"""Creates a Panel in the scene context of the properties editor"""
bl_label = "Shape View"
def draw(self, context):
layout = self.layout
ob = context.object
mesh = ob.data
skey = ob.active_shape_key
shapeview = mesh.shape_keys.shapeview
#print(dir(layout))
box = layout.box()
target = ob.data.shape_keys.shapeview.target
row = layout.row()
row.prop(context.workspace.shapeview, "active_view3d")
op = row.operator("screen.select_area")
op.targetPath = "workspaces[\"%s\"].shapeview" % (bpy.context.workspace.name)
op.targetProp = "active_view3d"
box.label(text="Front Axis Target")
box.prop(target, "object")
if target.object:
if type(target.object.data) == bpy.types.Armature:
box.prop_search(target, "bone", target.object.data, "bones")
#box.prop(target, "axis")
layout.operator("object.shapeview_create_drivers")
layout.operator("object.shapeview_create_script")
box = layout.box()
for sv in shapeview.skeys:
if sv.shapekey == skey.name and not isBasisKey(sv.shapekey, mesh.shape_keys):
box.prop(sv, "vector", text=sv.shapekey)
box.prop(sv, "both_sides")
layout.operator("object.shapeview_set_view", text="Set View Vector")
"""
layout.label(text="Shape Key: " + skey.name);
layout.operator("object.shapeview_create_drivers")
for sv in shapeview.skeys:
box = layout.box()
if sv.name == "Basis": continue
box.operator("object.shapeview_set_view", text="Set View Vector")
box.prop(sv, "vector", text=sv.shapekey)
"""
from .utils import Registrar
bpy_exports = Registrar([
DATA_PT_ShapeView,
DATA_PT_WorkSpaceShapeView
])
``` |
{
"source": "joeedh/small-loader.es6",
"score": 3
} |
#### File: joeedh/small-loader.es6/serv.py
```python
import os, os.path, sys, time, socket, traceback, stat
from math import *
INDEXFILE = "/main.html"
PORT = 8079
mimetypes = {
".js" : "application/x-javascript",
".html" : "text/html",
".json" : "application/x-javascript",
".glsl" : "text/plain",
".png" : "image/png",
".jpg" : "image/jpeg",
".obj" : "text/plain"
};
def get_mime(path):
path = path.strip().lower()
for k in mimetypes:
if path.endswith(k):
return mimetypes[k]
return "application/x-octet-stream"
class SocketFile:
def __init__(self, con):
self.sock = con
self.writebuf = b""
self.readbuf = b""
con.setblocking(False)
def __next__(self):
bsize = 2048
wsize = 1024*8
try:
buf = self.sock.recv(2048)
self.readbuf += buf
except BlockingIOError:
pass
try:
buf = self.writebuf
if len(buf) > wsize:
buf = buf[:wsize]
self.sock.send(buf)
self.writebuf = self.writebuf[len(buf):]
except BlockingIOError:
pass
def write(self, buf):
self.writebuf += buf
def read(self, max=2048):
buf = self.readbuf
if len(buf) > max:
buf = buf[:max]
self.readbuf = self.readbuf[max:]
else:
self.readbuf = b""
return buf
def Connection(con, addr, cls):
con.setblocking(False)
file = SocketFile(con)
while 1:
sbuf = b""
yield
while 1:
file.__next__()
buf = file.read()
yield 1
if (len(buf) == 0): continue;
sbuf += buf
if b"\r\n\r\n" in sbuf:
break;
lines = [l.strip() for l in sbuf.split(b"\r\n")]
method = lines[0];
headers = {}
path = method[method.find(b" ")+1:method.find(b" HTTP")].strip()
method = method[:method.find(b" ")]
print(str(method + b" " + path, "latin-1"))
for l in lines[1:]:
key = l[:l.find(b":")].strip()
val = l[l.find(b":")+1:].strip()
#print(key, val)
headers[str(key, "latin-1")] = str(val, "latin-1")
h = cls()
h.path = str(path, "latin-1")
h.method = str(method, "latin-1")
h.headers = headers
h.rfile = file
h.wfile = file
getattr(h, "do_"+str(method, "latin-1").strip())()
#print("\n")
yield
class Server:
def __init__(self, addr, cls):
self.connections = []
self.addr = addr
self.cls = cls
self.socket = socket.socket()
def start(self):
self.socket.bind(self.addr)
self.socket.listen(10)
sock = self.socket
sock.setblocking(False)
while 1:
dellist = []
try:
ret = sock.accept()
#print(ret[1])
con = Connection(ret[0], ret[1], self.cls)
self.connections.append(con)
except BlockingIOError:
pass
for con in self.connections:
try:
for i in range(5):
con.__next__()
except StopIteration:
print(" connection closed")
dellist.append(con)
except:
traceback.print_exc()
dellist.append(con);
for con in dellist:
self.connections.remove(con)
time.sleep(1.0/420.0)
pass
def bs(s):
if type(s) == bytes:
return s
return bytes(str(s), "latin-1")
class Handler:
def __init__(self):
self.path = ""
self.headers = {}
self.wfile = None
self.rfile = None
self.send_headers = []
self.body = b""
self.code = 200
def send_response(self, code):
self.body = bs(self.body)
buf = b"HTTP/1.1 " + bs(code) + b" None\r\n"
had_content = False
headers = [
[b"Connection", b"keep-alive"]
] + self.send_headers
for h in headers:
if h[0] == b"Content-length":
had_content = True
buf += bs(h[0]) + b":" + b" " + bs(h[1]) + b"\r\n"
if not had_content:
buf += b"Content-length: " + bs(len(self.body)) + b"\r\n"
buf += b"\r\n"
buf += self.body
self.wfile.write(buf)
def add_header(self, key, val):
self.send_headers.append([bs(key), bs(val)])
def set_body(self, body):
self.body = body
def send_error(self, error):
body = b"Error: " + bs(error)
self.add_header("MimeType", "text/plain")
self.set_body(body)
self.send_response(error)
def do_GET(self):
path = self.path.strip()
dir = os.getcwd()
if path == "/" or path == "":
path = INDEXFILE
abspath = os.path.abspath(os.path.normpath(dir+os.path.sep+path))
if not abspath.startswith(dir):
self.send_error(404)
return
if not os.path.exists(abspath):
self.send_error(404)
return
st = os.stat(abspath)
if stat.S_ISDIR(st.st_mode):
self.send_error(405)
return
file = open(abspath, "rb")
buf = file.read()
file.close()
self.set_body(buf)
self.add_header("MimeType", get_mime(path))
self.send_response(200)
server = Server(("", PORT), Handler)
print("serving at port", PORT)
server.start()
``` |
{
"source": "joeedh/webblender",
"score": 3
} |
#### File: joeedh/webblender/dbcache.py
```python
import shelve, struct, io, imp, ctypes, re
import os, sys, os.path, time, random, math
cache_cache = {}
class CachedDB:
def __init__(self, db, name=None):
self.db = db
if name != None:
if name not in cache_cache:
cache_cache[name] = {}
self.cache = cache_cache[name]
else:
self.cache = {}
def sync(self):
self.db.sync()
def close(self):
self.db.close()
def keys(self):
return self.db.keys()
def values(self):
return self.db.values()
def __iter__(self):
return self.db.__iter__()
def __getitem__(self, item):
if item in self.cache:
return self.cache[item]
ret = self.db[item]
self.cache[item] = ret
return ret
def __delitem__(self, item):
del self.db[item]
if item in self.cache:
del self.cache[item]
def __setitem__(self, item, val):
self.db[item] = val
self.cache[item] = val
def __contains__(self, item):
return item in self.cache or item in self.db
```
#### File: tools/extjs_cc/js_ast.py
```python
from js_lex import HexInt, StringLit
from js_global import glob
from js_util_types import odict
import sys, traceback, os, os.path
def print2(*args):
s = ""
for a in args:
s += str(a) + " "
sys.stdout.write(s+"\n")
if 0: #glob.g_debug_print_calls:
def theprint(arg, **args):
args = [arg] + list(args)
colwid = 80
frame = sys._getframe(1)
line = frame.f_lineno
filename = frame.f_code.co_filename
if "/" in filename or "\\" in filename:
filename = os.path.split(filename)[1]
filename = " (%s:%d)" % (filename, line)
maxcol = colwid - len(filename)-1
out = ""
for i, a in enumerate(args):
s = str(a)
if i > 0: s = " " + s
out += s
out += "\n"
out2 = ""
col = 0
for s in out:
if s == "\n":
while col < maxcol:
col += 1
out2 += " "
out2 += filename
col = 0
if col >= maxcol:
out2 += filename + "\n"
col = 0
out2 += s
if s != "\n":
col += 1
sys.stdout.write(out2)
else:
theprint = print2
#def print(arg, **args):
# theprint(arg, **args)
def tab(tlvl, tstr=" "):
s = ""
for i in range(tlvl):
s += tstr
return s
class SourceMap:
def __init__(self):
self.lexpos = 0
self.segments = []
def add_segment(self, node, str1):
if len(str1) == 0: return
self.segments.append([self.lexpos, node, len(str1), str1])
self.lexpos += len(str1);
def delete_last(self):
seg = self.segments.pop(-1)
self.lexpos -= seg[2]
def out(self, node, str1):
self.add_segment(node, str1)
return str1
idgen = [1]
class Node (object):
def __init__(self):
global idgen
self._id = idgen[0]
idgen[0] += 1
self.children = []
self.parent = None
self.type = None #node type, don't touch this while parsing
self.line = glob.g_line
self.file = glob.g_file
self.lexpos = glob.g_lexpos
self.final_type = None
self.smap = None
if glob.g_comment != None and glob.g_comment != "":
self.comment = glob.g_comment
self.commentline = glob.g_comment_line
glob.g_comment = None
else:
self.comment = None
self.commentline = -1
def c(self):
if self.comment == None: return ""
print("------------------------->", self.comment)
if self.comment.strip().startswith("//"):
self.comment = "/*" + self.comment[self.comment.find("//"):] + "*/"
return self.comment
#inc is how much to increment self.lexpos
def s(self, str1):
if (self.smap != None):
self.smap.out(self, str1)
return str1
def smap_dellast(self):
smap = self.smap
if (smap != None):
smap.delete_last()
def __getitem__(self, item):
return self.children[item]
def __setitem__(self, item, val):
self.children[item] = val
def __len__(self):
return len(self.children)
def index(self, i):
return self.children.index(i)
def replace(self, oldnode, newnode):
i = 0
for c in self.children:
if c == oldnode:
break
i += 1
self.children[i] = newnode
newnode.parent = self
def _default_node(self, node):
if type(node) == str:
if node.startswith('"'):
node = StrLitNode(node)
else:
node = IdentNode(node)
elif type(node) == float:
node = NumLitNode(node)
return node
def pop(self, i):
self.children.pop(i);
def add(self, node):
node = self._default_node(node)
self.children.append(node)
node.parent = self
def remove(self, node):
self.children.remove(node)
def insert(self, i, node):
node = self._default_node(node)
self.children.insert(i, node);
node.parent = self
def prepend(self, node):
node = self._default_node(node)
self.children.insert(0, node)
node.parent = self
def extra_str(self):
return ""
def copy_basic(self, n2):
n2.type = self.type
n2.line = self.line
n2.file = self.file
n2.lexpos = self.lexpos
n2.final_type = self.final_type
if hasattr(self, "template"):
if self.template != None:
n2.template = n2.template.copy()
def copy(self):
raise RuntimeError("Unimplemented copy function in type %s!"%str(type(self)))
def copy_children(self, n2):
n2.children[:] = []
for c in self:
n2.add(c.copy())
def gen_js(self, tlevel):
raise RuntimeError("Unimplemented gen_js function in type %s!"%str(type(self)))
def get_line_str(self):
name = str(type(self)).replace("js_ast.", "").replace("<class", "").replace(">", "").replace(" ", "").replace("'", "")
c = self.extra_str()
if len(c.strip()) > 0: c = " " + c
return name + c
def get_ntype_name(self):
return str(type(self)).replace("js_ast.", "").replace("<class", "").replace(">", "").replace(" ", "").replace("'", "")
def __str__(self, tlevel=0):
t = tab(tlevel, "-")
name = ""
if self.type != None:
if type(self.type) == str:
name += self.type + " "
else:
if hasattr(self.type, "get_type_str"):
name += self.type.get_type_str() + ": "
else:
name += "(" + self.type.get_line_str() + "): "
name += str(type(self)).replace("js_ast.", "").replace("<class", "").replace(">", "").replace(" ", "").replace("'", "")
if len(self.children) == 0:
return t + name + " " + self.extra_str()
else:
s = t + name + " " + self.extra_str() + " {\n"
for c in self.children:
cs = c.__str__(tlevel+1)
if not (cs.endswith("\n")):
cs += "\n"
s += cs
s += t + "}\n"
return s
def __repr__(self):
return str(self)
class ValueNode (Node):
val = None
def __init__(self):
super(ValueNode, self).__init__()
def gen_js(self, tlevel):
s = self.s(str(self.val))
return s
def extra_str(self):
return str(self.val)
class StrLitNode (ValueNode):
def __init__(self, str):
super(StrLitNode, self).__init__()
self.val = str
def gen_js(self, tlevel):
s = self.s(self.val)
return s;
def copy(self):
n2 = StrLitNode(str(self.val))
self.copy_basic(n2)
self.copy_children(n2)
return n2
class RegExprNode (StrLitNode):
def copy(self):
n2 = RegExprNode(str(self.val))
self.copy_basic(n2)
self.copy_children(n2)
return n2
class NumLitNode (ValueNode):
def __init__(self, num):
super(NumLitNode, self).__init__()
self.val = num
def get_type_str(self):
return "float" if type(self.val) == float else "int"
def fmt(self):
if type(self.val) == HexInt:
return hex(self.val)
elif type(self.val) in [int, float]:
return str(self.val)
def gen_js(self, tlevel):
if type(self.val) == HexInt:
s = hex(self.val)
elif type(self.val) == int:
s = str(self.val)
elif type(self.val) == float:
s = str(self.val)
s = self.s(s)
return s
def copy(self):
n2 = NumLitNode(self.val)
self.copy_basic(n2)
self.copy_children(n2)
return n2
class CommentNode (Node):
def __init__(self, comment=""):
super(CommentNode, self).__init__()
self.comment = comment
def copy(self):
return CommentNode(self.comment)
def gen_js(self, tlevel=0):
t1 = tab(tlevel)
t2 = tab(tlevel+1)
if not "\n" in self.comment:
return "/*" + self.comment + "*/"
else:
ls = [l.replace("\r" ,"") for l in self.comment.split("\n")]
s = "/*\n"
for l in ls:
s += t2 + l + "\n"
s += t1 + "*/\n"
return s
class IdentNode (ValueNode):
def __init__(self, ident, local=False):
super(IdentNode, self).__init__()
self.val = ident
self.local = local
def gen_js(self, tlevel):
s = self.s(str(self.val))
return s
def get_type_str(self):
return self.val
def extra_str(self):
return str(self.val) + " " + str(self.local)
def __setval__(self):
return self.val
def get_type_name(self):
return self.val
def copy(self):
n2 = IdentNode(str(self.val))
self.copy_basic(n2)
self.copy_children(n2)
return n2
class VarDeclNode(IdentNode):
def __init__(self, expr, local=False, name="(unnamed)"):
#self[0] is assignment expression
#self[1] is type
#self[2..] are chained var decl child nodes
super(VarDeclNode, self).__init__(expr)
self.modifiers = set()
self.val = name
if local:
self.modifiers.add("local")
self.type = None
self.add(expr)
#make sure to add type to self, please;
#it should be self[1]
def get_type_str(self):
def get_type(n, visit=None):
if visit == None: visit = set()
if n in visit:
return n.get_line_str()
visit.add(n)
if type(n) == str:
return n
elif n.type == str:
if n.type in [VarDeclNode, IdentNode]:
return n.val
else:
return "(corruption)"
elif type(n) == IdentNode:
return n.val
elif type(n) == VarDeclNode:
return n.get_type_str()
elif type(n) == TemplateNode:
return n.get_type_str()
elif isinstance(n, TypeNode):
return get_type(n.type, visit)
elif type(n) == TypedClassNode:
return n.name
elif type(n) == TypeRefNode:
return get_type(n.type)
elif type(n) == TypedClassRef:
return n.type
else:
return self.val
if self.type == self:
return self.val
return get_type(self[1])
def gen_js(self, tlevel):
if type(self.modifiers) != set:
sys.stderr.write("WARNING: corrupted modifiers in VarDeclNode. Regenerating.\n")
self.modifiers = set()
print(self)
if self.local: self.modifiers.add("local")
elif "local" in self.modifiers:
self.local = True
if "global" in self.modifiers:
return ""
s = ""
if self.local and type(self.parent) != VarDeclNode: s += "var "
elif "static" in self.modifiers and type(self.parent) != VarDeclNode: s += "static "
s += str(self.val)
s = self.s(s)
if len(self.children) > 0 and not (type(self.children[0]) == ExprNode and len(self.children[0].children)==0):
s += self.s("=") + self.children[0].gen_js(tlevel)
if len(self.children) > 2:
for c in self.children[2:]:
s += self.s(", ") + c.gen_js(tlevel)
return s
def copy(self):
n2 = VarDeclNode(ExprNode([]), name=str(self.val))
n2.local = self.local
n2.modifiers = set(self.modifiers)
self.copy_basic(n2)
self.copy_children(n2)
return n2
def extra_str(self):
return self.val + " " + str(self.local)
def __setval__(self):
return self.get_type_str()
class TypeNode(Node):
def __init__(self, type):
super(TypeNode, self).__init__()
self.type = type
def gen_js(self, tlevel):
return ""
def get_type_name(self):
if type(self.type) == str: return self.type
else: return self.type.get_type_name()
def __setval__(self):
s = str(type(self))
return s[s.find("<"):s.find(">")]
class StaticArrNode(Node):
def __init__(self, name_node, size):
"""
layout: self[0] is array name node
self[1] is array type
parent should be a var decl node
"""
Node.__init__(self)
self.size = size
self.add(name_node)
def extra_str(self):
return str(self.size)
def copy(self):
n = StaticArrNode(Node(), self.size)
n.remove(n[0])
self.copy_basic(n)
self.copy_children(n)
return n
def get_type_str(self):
t = self[0]
if type(t) != str: t = t.get_type_str()
return "%s[%i]" % (t, self.size)
def get_type_name(self):
return self.get_type_str()
class FuncRefNode(TypeNode):
def __init__(self, name):
super(FuncRefNode, self).__init__(UndefinedTypeNode())
self.template = None
self.name = name
def copy(self):
n2 = FuncRefNode(str(self.name))
self.copy_basic(n2)
self.copy_children(n2)
return n2
def get_type_str(self):
s = self.type.get_type_str() + " "
s += self.name
if self.template != None:
s += self.template.extra_str()
s += "("
for i, c in enumerate(self[0]):
if i > 0: s += ", "
if isinstance(c, TypeNode):
s += c.get_type_str()
else:
s += c.gen_js(0)
s += ")"
return s
def __setval__(self):
return self.get_type_str(self)
def gen_js(self, tlevel):
s = ""
return s
def extra_str(self):
return self.get_type_str()
s = self.name
s += self.children[0].extra_str()
return s
class BuiltinTypeNode(TypeNode):
def __init__(self, tname):
super(BuiltinTypeNode, self).__init__(tname)
def copy(self):
n2 = BuiltinTypeNode(str(self.type) if type(self.type) == str else self.type.copy())
self.copy_basic(n2)
self.copy_children(n2)
return n2
def compatible(self, b):
infertypes = ["float", "int", "byte", "short", "char"]
return (self.type == b.type) or (self.type in infertypes and b.type in infertypes)
def get_type_str(self):
if type(self.type) == str: return self.type
elif self.type == None: return "{type-corruption}"
else: return self.type.extra_str()
def extra_str(self):
return str(self.type)
def __setval__(self):
return "BuiltinTypeNode(%s)" % self.get_type_str()
#this node encapsulates code with an unknown type
class UnknownTypeNode(TypeNode):
def __init__(self, node=None):
super(UnknownTypeNode, self).__init__(self)
if node != None:
self.add(node)
def copy(self):
n2 = UnknownTypeNode()
self.copy_basic(n2)
self.copy_children(n2)
return n2
def get_type_name(self):
return "(UnknownNode)"
def gen_js(self, tlevel):
s = ""
for c in self.children:
s += c.gen_js(tlevel)
return s
def get_type_str(self):
return "(UnknownTypeNode)"
class VoidTypeNode(TypeNode):
def __init__(self, node=None):
super(VoidTypeNode, self).__init__(self)
if node != None:
self.add(node)
def copy(self):
n2 = VoidTypeNode()
self.copy_basic(n2)
self.copy_children(n2)
return n2
def get_type_name(self):
return "void"
def gen_js(self, tlevel):
s = ""
for c in self.children:
s += c.gen_js(tlevel)
return s
def get_type_str(self):
return "void"
class TemplateStandInType(VoidTypeNode):
def get_type_name(self):
return "TemplateStandIn"
def copy(self):
n2 = TemplateStandInType()
self.copy_basic(n2)
self.copy_children(n2)
return n2
class UndefinedTypeNode(TypeNode):
def __init__(self, node=None):
super(UndefinedTypeNode, self).__init__(self)
if node != None:
self.add(node)
def copy(self):
n2 = UndefinedTypeNode()
self.copy_basic(n2)
self.copy_children(n2)
return n2
def get_type_name(self):
return "None"
def gen_js(self, tlevel):
s = ""
for c in self.children:
s += c.gen_js(tlevel)
return s
def get_type_str(self):
return "None"
class TypeRefNode (TypeNode):
def __init__(self, typeref):
TypeNode.__init__(self, typeref)
self.template = None
def copy(self):
n2 = TypeRefNode(self.type)
self.copy_basic(n2)
self.copy_children(n2)
return n2
def extra_str(self):
s = str(self.type)
if self.template != None:
s += " " + self.template.extra_str()
return s
def gen_js(self, tlevel):
s = ""
if type(self.type) == str:
s += self.s(self.type)
else:
s += self.type.gen_js(tlevel)
return s
def get_type_str(self):
return self.__setval__()
def __setval__(self):
s = ""
if type(self.type) == str:
s += self.type
else:
s += self.type.get_type_str()
if self.template != None:
s += "<%s>" % self.template.__setval__()
return s
class NullStatement(Node):
def __init__(self):
super(NullStatement, self).__init__()
def copy(self):
n2 = NullStatement()
self.copy_basic(n2)
self.copy_children(n2)
return n2
def gen_js(self, tlevel):
p = self.parent
if type(p) not in [WhileNode, ForLoopNode, IfNode, ElseNode]:
return ""
else:
return self.s(";")
class DeleteNode (Node):
def __init__(self, expr):
super(DeleteNode, self).__init__()
self.add(expr)
def copy(self):
n2 = DeleteNode(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
def gen_js(self, tlevel):
s = self.s("delete ")
s += self.children[0].gen_js(tlevel)
return s;
def node_eq(a, b):
if type(a) != type(b): return False
stack1 = [a]
stack2 = [a]
while len(stack1) > 0 and len(stack2) > 0:
n1 = stack1.pop(-1)
n2 = stack2.pop(-1)
if type(n1) != type(n2):
return False
if not n1.node_eq(n2):
return False
for c in n1.children:
stack1.append(c)
for c in n2.children:
stack2.append(c)
if len(stack1) != len(stack2):
return False
return True
class TemplateNode(Node):
def __init__(self, exprlist):
super(TemplateNode, self).__init__()
self.add(exprlist)
self.type = None
self.name_expr = None #used in later stages of type processing
def copy(self):
n2 = TemplateNode(self[0])
self.copy_basic(n2)
self.copy_children(n2)
if self.name_expr != None:
n2.name_expr = str(self.name_expr) if type(self.name_expr) == str else self.name_expr.copy()
return n2
def get_type_str(self):
s = ""
if self.name_expr != None:
s += self.name_expr.gen_js(0)
s += "<"
for i, c in enumerate(self[0]):
if i > 0: s += ", "
if hasattr(c, "get_type_str"): #type(c) in [VarDeclNode, TemplateNode]:
s += c.get_type_str()
else:
s += c.gen_js(0);
s += ">"
return s
def extra_str(self, print_self=False):
s = ""
if print_self:
s += str(self)
if self.name_expr != None:
s += self.name_expr.gen_js(0)
s += "<<"
for i, c in enumerate(self[0]):
if i > 0:
s += ", "
if type(c) in [VarDeclNode, BuiltinTypeNode, TemplateNode]:
s += c.get_type_str()
else:
s2 = c.gen_js(0);
if s2 == "":
s2 = c.extra_str()
s += s2
s += ">>"
return s
def gen_js(self, tlevel):
if len(self) > 1:
return self.children[1].gen_js(tlevel)
def __setval__(self):
return self.get_type_str()
def get_type_name(self):
return self[1].get_type_name()
class BinOpNode (Node):
def __init__(self, a, b, op):
super(BinOpNode, self).__init__()
self.op = op
self.add(a);
self.add(b);
def copy(self):
n2 = BinOpNode(self[0], self[1], self.op)
self.copy_basic(n2)
self.copy_children(n2)
return n2
def gen_js(self, tlevel):
if self.op in ["instanceof", "in"]:
s = self.children[0].gen_js(tlevel)
s += self.s(" "+self.op+" ") + self.children[1].gen_js(tlevel)
return s
else:
s = self.children[0].gen_js(tlevel)
s += self.s(self.op) + self.children[1].gen_js(tlevel)
return s
def extra_str(self):
return str(self.op)
class ExprNode (Node):
def __init__(self, exprnodes, add_parens=False):
super(ExprNode, self).__init__()
self.add_parens = add_parens
for e in exprnodes:
self.add(e)
def copy(self):
n2 = ExprNode(self)
self.copy_basic(n2)
self.copy_children(n2)
return n2
def gen_js(self, tlevel):
s = ""
if self.add_parens:
s += self.s("(")
for i, c in enumerate(self.children):
if i != 0:
s += self.s(", ")
c1 = c.gen_js(tlevel)
if c1 == None:
print("problem in ExprNode.gen_js()", type(c))
continue
s += c1
if self.add_parens:
s += self.s(")")
return s
class ArrayRefNode (Node):
def __init__(self, var, ref):
super(ArrayRefNode, self).__init__()
self.add(var)
self.add(ref)
def copy(self):
n2 = ArrayRefNode(self[0], self[1])
self.copy_basic(n2)
self.copy_children(n2)
return n2
def gen_js(self, tlevel):
return self[0].gen_js(tlevel) + self.s("[") + self[1].gen_js(tlevel) + self.s("]")
class ArrayLitNode (Node):
def __init__(self, exprlist):
super(ArrayLitNode, self).__init__()
self.add(exprlist)
def copy(self):
n2 = ArrayLitNode(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
def get_type_str(self):
if type(self.type) == str:
return self.type
elif self.type != None:
return self.type.get_type_str()
else: return ""
def gen_js(self, tlevel):
s = self.s("[")
s += self.children[0].gen_js(tlevel);
s += self.s("]")
return s
class ObjLitNode (Node):
def __init__(self):
self.name = "anonymous"
self.is_prototype = False
super(ObjLitNode, self).__init__()
def copy(self):
n2 = ObjLitNode()
self.copy_basic(n2)
self.copy_children(n2)
return n2
def gen_js(self, tlevel):
s = self.s("{")
for i, c in enumerate(self):
if i > 0:
s += self.s(", ")
s += c[0].gen_js(tlevel) + self.s(": ") + c[1].gen_js(tlevel)
s += self.s("}")
return s
#objlitnode whose gen_js formats in Require.JS class prototype style
class RJSObjLitNode (ObjLitNode):
def gen_js(self, tlevel):
t1 = tab(tlevel-1)
t2 = tab(tlevel)
s = "{\n"
for i, c in enumerate(self):
s += t2
s += c[0].gen_js(tlevel)
s += " : " + c[1].gen_js(tlevel+1)
if i != len(self)-1:
s += ","
s += "\n\n"
s += t1 + "}"
return s
#duplicate of ExprNode, but with different type to (hopefully) avoid chain confusion
class ExprListNode (ExprNode):
def __init__(self, exprnodes):
super(ExprListNode, self).__init__(exprnodes)
def copy(self):
n2 = ExprListNode(self)
self.copy_basic(n2)
self.copy_children(n2)
return n2
def flatten(self):
pass
class MemberRefNode (Node):
def __init__(self, parent, member):
super(MemberRefNode, self).__init__()
self.add(parent)
self.add(member)
def copy(self):
n2 = MemberRefNode(self[0], self[1])
self.copy_basic(n2)
self.copy_children(n2)
return n2
class VarRefNode (Node):
def __init__(self, var):
super(VarRefNode, self).__init__()
self.add(var)
def copy(self):
n2 = VarRefNode(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
class NegateNode(Node):
def __init__(self, expr):
super(NegateNode, self).__init__()
self.add(expr)
def extra_str(self):
return ""
def gen_js(self, tlevel):
return self.s("-") + self.children[0].gen_js(tlevel);
def copy(self):
n2 = NegateNode(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
class TypeofNode(Node):
def __init__(self, expr):
super(TypeofNode, self).__init__()
self.add(expr)
def extra_str(self):
return ""
def gen_js(self, tlevel):
s = self.s("typeof ")
return s + self.children[0].gen_js(tlevel)
def copy(self):
n2 = TypeofNode(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
class LogicalNotNode(Node):
def __init__(self, expr):
super(LogicalNotNode, self).__init__()
self.add(expr)
def extra_str(self):
return ""
def gen_js(self, tlevel):
return self.s("!") + self.children[0].gen_js(tlevel)
def copy(self):
n2 = LogicalNotNode(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
class BitInvNode(Node):
def __init__(self, expr):
super(BitInvNode, self).__init__()
self.add(expr)
def extra_str(self):
return ""
def gen_js(self, tlevel):
return self.s("~") + self.children[0].gen_js(tlevel)
def copy(self):
n2 = BitInvNode(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
class AssignNode (Node):
def __init__(self, var_ref, expr, flags=set(), mode="="):
super(AssignNode, self).__init__()
self.mode = mode
self.add(var_ref)
self.add(expr)
self.type = None
self.flags = set(flags) #duplicate
def gen_js(self, tlevel):
s = self.children[0].gen_js(tlevel)
sm = self.s(" "+self.mode+" ")
s = s + sm + self.children[1].gen_js(tlevel)
return s
def extra_str(self):
s = ""
if self.type != None:
s += self.type.extra_str() + " "
s += self.mode
return s
def copy(self):
n2 = AssignNode(self[0], self[1])
n2.mode = self.mode
n2.flags = set(self.flags)
self.copy_basic(n2)
self.copy_children(n2)
return n2
def legacy_endstatem(c, c2):
ret = len(c2.strip()) > 0 and not c2.strip().endswith(";")
ret2 = not c2.strip().endswith("}") or type(c) in [ExprListNode, VarDeclNode, AssignNode, ExprNode, BinOpNode]
return ret and ret2
class StatementList (Node):
def __init__(self):
super(StatementList, self).__init__()
def gen_js(self, tlevel):
t = tab(tlevel)
t2 = tab(tlevel+1)
s = ""
for c in self.children:
if type(c) == StatementList:
c2 = c.gen_js(tlevel);
else:
self.s(t)
c2 = c.gen_js(tlevel+1)
#if tlevel == -1: continue
if 0: #XXX len(c2.strip()) == 0:
if self.smap != None:
self.smap.lexpos -= len(c2)+len(t)
while self.smap.segments[-1][0] >= self.smap.lexpos:
self.smap.segments.pop(-1)
continue
if legacy_endstatem(c, c2):
c2 += self.s(";")
c2 = t + c2
if not c2.endswith("\n"):
c2 += self.s("\n")
s += c2
return s
def copy(self):
n2 = StatementList()
self.copy_basic(n2)
self.copy_children(n2)
return n2
class FuncCallNode (Node):
def __init__(self, name_expr):
super(FuncCallNode, self).__init__()
self.template = None
self.add(name_expr)
def gen_js(self, tlevel):
s = self.children[0].gen_js(tlevel) + self.s("(")
if len(self.children) > 1:
s += self.children[1].gen_js(tlevel)
"""
for i, c in enumerate(self.children[1].children):
if i > 0: s += ", "
s += c.gen_js(tlevel)
"""
s += self.s(")")
return s
def extra_str(self):
s = ""
if self.template != None:
s += self.template.extra_str() + " "
s += self.children[0].extra_str()
return s
def copy(self):
n2 = FuncCallNode(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
class InitCallNode (FuncCallNode):
def __init__(self, name_expr):
super(InitCallNode, self).__init__(name_expr)
def copy(self):
n2 = InitCallNode(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
class TypedClassRef (Node):
def __init__(self, cls):
Node.__init__(self)
self.type = cls
class FunctionNode (StatementList):
def copy(self):
n2 = FunctionNode(self.name, self.lineno)
self.copy_basic(n2)
self.copy_children(n2)
n2.members = odict()
n2.functions = odict()
n2.class_type = self.class_type
n2.is_generator = self.is_generator
n2.class_parent = self.class_parent
n2.path = self.path
n2.ret = self.ret
n2.is_native = self.is_native
n2.is_anonymous = self.is_anonymous
return n2
#NEED TO GET RID OF THIS LINENO PARAMETER!
#(and member; both have been replaced by node.line/node.lexpos)
def __init__(self, name, lineno=0):
super(FunctionNode, self).__init__()
self.name = name
self.origname = name
self.is_anonymous = False
self.is_native = False
self.members = odict()
self.functions = odict() #this is local nested functions, not class members
self.ret = None
self.class_type = "func" #valid values: ["func", "method", "class", "array"]
self.type = None
self.is_generator = False
self.args = odict()
self.arg_is = odict()
self.path = None
self.class_parent = None
self.child_classes = odict()
self.is_builtin = False
self.logrecs = []
self.template = None
self.lrec_args = odict()
if type(lineno) != int:
self.lineno = lineno(1)
else:
self.lineno = lineno
def add_class_child(self, child):
if type(child) != FunctionNode:
raise JSError("Invalid argument for FunctionNode.add_class_child")
child.class_parent = self
self.child_classes[child.name] = child
def get_type_str(self):
s = self.name
if self.template != None:
s += self.template.get_type_str()
return s
def get_args(self):
args = []
for c in self[0]:
args.append(c.val)
self.args = args
return args
def get_path(self):
if self.path == None: return self.name
else: return self.path
def set_arg(self, arg, node):
self[0][self.arg_is[arg]] = node
node.parent = self[0]
self.args[arg] = node
def gen_js(self, tlevel):
t = tab(tlevel-1)
t2 = tab(tlevel)
if self.is_native: return ""
if self.name != "" and self.name != "(anonymous)":
s = "function %s("%self.name
else:
s = "function("
s = self.s(s)
for i, c in enumerate(self.children[0].children):
if i > 0:
s += self.s(", ")
s += c.gen_js(tlevel)
s += self.s(") {\n")
for c in self.children[1:]:
if type(c) != StatementList:
cd = self.s(t2) + c.gen_js(tlevel+1)
else:
cd = c.gen_js(tlevel)
#XXX if len(cd.strip()) == 0: continue
if len(cd.strip()) > 0 and not cd.strip().endswith("}") and not cd.strip().endswith(";"):
cd += self.s(";")
if not cd.endswith("\n"):
cd += self.s("\n")
s += cd
s += self.s(t+"}")
return s
def extra_str(self):
s = ""
if self.type != None:
if type(self.type) == str:
s += self.type + " "
else:
s += self.type.get_type_str() + " "
s += self.name
if self.template != None:
s += self.template.extra_str()
return s
class SwitchNode(Node):
def __init__(self, expr):
super(SwitchNode, self).__init__()
self.add(expr)
def copy(self):
n2 = SwitchNode(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
def gen_js(self, tlevel):
t = tab(tlevel-1)
t2 = tab(tlevel)
cs = self.children
s = self.s("switch (")
s += cs[0].gen_js(tlevel)
sadd = ") {\n"
s += self.s(sadd)
for c in cs[1:]:
self.s(t2)
s += t2 + c.gen_js(tlevel+1)
s += self.s(t + "}")
return s
class CaseNode(Node):
def __init__(self, expr):
super(CaseNode, self).__init__()
if expr != "default":
self.add(expr)
def copy(self):
n2 = CaseNode(self[0] if len(self) > 0 else "default")
self.copy_basic(n2)
self.copy_children(n2)
return n2
def gen_js(self, tlevel):
cs = self.children
s = self.s("case ")
s += cs[0].gen_js(tlevel)
s += self.s(":\n") + cs[1].gen_js(tlevel)
return s
class DefaultCaseNode(CaseNode):
def __init__(self):
super(DefaultCaseNode, self).__init__("default")
def copy(self):
n2 = DefaultCaseNode("default")
self.copy_basic(n2)
self.copy_children(n2)
return n2
def gen_js(self, tlevel):
cs = self.children
s = self.s("default:\n")
s += cs[0].gen_js(tlevel)
return s
class WithNode(Node):
def __init__(self, expr):
super(WithNode, self).__init__()
self.add(expr)
def copy(self):
n2 = WithNode(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
def gen_js(self, tlevel):
t = tab(tlevel-1)
t2 = tab(tlevel)
s1 = self.s("with (")
s = s1 + self.children[0].gen_js(tlevel)
s += self.s(")")
if type(self.children[1]) != StatementList:
sadd = self.s("\n" + t2)
s += add
s += self.children[1].gen_js(tlevel) + self.s(";");
else:
s += self.s(" {\n")
s += self.children[1].gen_js(tlevel+1)
s += self.s(t + "}")
return s
def extra_str(self):
return ""
class IfNode(Node):
def __init__(self, expr):
super(IfNode, self).__init__()
self.add(expr)
def copy(self):
n2 = IfNode(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
def gen_js(self, tlevel):
t = tab(tlevel-1)
t2 = tab(tlevel)
s1 = self.s("if (")
s = s1 + self.children[0].gen_js(tlevel)
if type(self.children[1]) == ObjLitNode: #the grammers do mix a bit
sadd = self.s(") {\n" + t + "}")
s += sadd
elif type(self.children[1]) != StatementList:
sadd = self.s(")\n" + t2)
sadd += self.children[1].gen_js(tlevel) #+ self.s(";");
s += sadd
else:
sadd = self.s(") {\n")
sadd += self.children[1].gen_js(tlevel+1) + self.s(t + "}")
s += sadd
if len(self) > 2:
for c in self.children[2:]:
s += c.gen_js(tlevel)
return s
def extra_str(self):
return ""
class TryNode(Node):
def __init__(self):
super(TryNode, self).__init__()
def copy(self):
n2 = TryNode()
self.copy_basic(n2)
self.copy_children(n2)
return n2
def gen_js(self, tlevel):
t = tab(tlevel-1)
t2 = tab(tlevel)
s = self.s("try ")
#if len(self.children) == 0:
# c1 = s + self.s("{\n" + t + "}\n")
if type(self.children[0]) != StatementList:
c1 = s + self.s("\n") + "%s%s" % (t2, self.children[0].gen_js(tlevel))
else:
c1 = s + self.s("{\n")
c1 += self.children[0].gen_js(tlevel) + self.s(t) + self.s("}\n")
if len(self.children) > 1:
c2 = ""
for c in self.children[1:]:
c2 += self.s(t) + c.gen_js(tlevel)
else:
c2 = ""
return c1 + c2
def extra_str(self):
return ""
class CatchNode(Node):
def __init__(self, expr):
super(CatchNode, self).__init__()
self.add(expr)
def copy(self):
n2 = CatchNode(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
def extra_str(self):
return ""
def gen_js(self, tlevel):
t = tab(tlevel-1)
t2 = tab(tlevel)
s = self.s("catch (") + self.children[0].gen_js(tlevel) + self.s(") ")
if type(self.children[1]) != StatementList:
s += self.s("\n" + t2) + self.children[1].gen_js(tlevel) + self.s(";");
else:
s += self.s("{\n") + self.children[1].gen_js(tlevel+1) + self.s(t+"}")
return s
class WhileNode(Node):
def __init__(self, expr):
super(WhileNode, self).__init__()
self.add(expr)
def extra_str(self):
return ""
def copy(self):
n2 = WhileNode(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
def gen_js(self, tlevel):
t = tab(tlevel-1)
t2 = tab(tlevel)
if len(self.children) == 0:
return self.s("malformed while {\n")
s = self.s("while (") + self.children[0].gen_js(tlevel) + self.s(") {\n")
if len(self.children) == 1:
c = self.s("malformed while\n")
else:
if type(self.children[1]) != StatementList:
c = self.s(t2)
else:
c = ""
c += self.children[1].gen_js(tlevel)
"""
if c != "{}":
if type(self.children[1]) != StatementList:
c = t2 + c + "\n"
else:
c = ""
"""
s += c + self.s(t+"}\n")
return s
class ForCNode(Node):
def __init__(self, s1, s2, s3):
super(ForCNode, self).__init__()
self.add(s1)
self.add(s2)
self.add(s3)
def copy(self):
n2 = ForCNode(self[0], self[1], self[2])
self.copy_basic(n2)
self.copy_children(n2)
return n2
def gen_js(self, tlevel):
cs = self.children
c1 = cs[0].gen_js(tlevel)
s = c1 + self.s("; ") + cs[1].gen_js(tlevel)
s += self.s("; ") + cs[2].gen_js(tlevel)
return s
class ForInNode(Node):
def __init__(self, var, list):
self.of_keyword = "in"
super(ForInNode, self).__init__()
self.add(var)
self.add(list)
def copy(self):
n2 = ForInNode(self[0], self[1])
self.copy_basic(n2)
self.copy_children(n2)
return n2
def gen_js(self, tlevel):
return self.children[0].gen_js(tlevel) + self.s(" in ") + self.children[1].gen_js(tlevel)\
def extra_str(self):
return ""
def endline(node, s):
if not s.endswith("\n"):
s += node.s("\n")
return s
class ForLoopNode(Node):
"""
self[0] : loop expression
self[1] : statementlist
"""
def __init__(self, expr):
super(ForLoopNode, self).__init__()
self.add(expr)
def copy(self):
n2 = ForLoopNode(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
def extra_str(self):
return ""
def gen_js(self, tlevel):
t = tab(tlevel-1)
t2 = tab(tlevel)
s = self.s("for (") + self.children[0].gen_js(tlevel) + self.s(") {\n")
c = endline(self, self.children[1].gen_js(tlevel+1))
s += c
s += self.s(t+"}")
return s
class DoWhileNode(Node):
def __init__(self, expr):
super(DoWhileNode, self).__init__()
self.add(expr)
def copy(self):
n2 = DoWhileNode(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
def extra_str(self):
return ""
def gen_js(self, tlevel):
t = tab(tlevel-1)
t2 = tab(tlevel)
s = self.s("do {\n")
if type(self[1]) != StatementList:
s += self.s(t2);
c = self[1].gen_js(tlevel)
if type(self[1]) != StatementList:
c += self.s("\n")
s += c + self.s(t + "} while (") + self[0].gen_js(tlevel) + self.s(")")
return s
class ElseNode(Node):
def __init__(self, c=None):
super(ElseNode, self).__init__()
if c != None:
self.add(c)
def copy(self):
n2 = ElseNode()
self.copy_basic(n2)
self.copy_children(n2)
return n2
def extra_str(self):
return ""
def gen_js(self, tlevel):
t = tab(tlevel-1)
t2 = tab(tlevel)
s = self.s("else ")
if len(self.children) == 0:
return s + self.s("{\n%s}\n" % t)
if type(self.children[0]) == ObjLitNode: #the grammars do mix a bit
return s + self.s(" {\n" + t + "}")
elif type(self.children[0]) != StatementList:
return s + self.s("\n"+t2) + self.children[0].gen_js(tlevel)
else:
return s + self.s("{\n") + self.children[0].gen_js(tlevel) + self.s(t + "}\n")
class TrinaryCondNode(Node):
def __init__(self, s1, s2, s3):
super(TrinaryCondNode, self).__init__()
self.add(s1)
self.add(s2)
self.add(s3)
def copy(self):
n2 = TrinaryCondNode(self[0], self[1], self[2])
self.copy_basic(n2)
self.copy_children(n2)
return n2
def extra_str(self):
return ""
def gen_js(self, tlevel):
cs = self.children
return cs[0].gen_js(tlevel) + self.s(" ? ") + cs[1].gen_js(tlevel) + self.s(" : ") + cs[2].gen_js(tlevel)
class KeywordNew(Node):
def __init__(self, expr):
super(KeywordNew, self).__init__()
self.add(expr)
def copy(self):
n2 = KeywordNew(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
def gen_js(self, tlevel):
return self.s("new ") + self[0].gen_js(tlevel)
def extra_str(self):
return ""
class YieldNode (Node):
def __init__(self, expr):
super(YieldNode, self).__init__()
self.add(expr)
self.print_return = False
def copy(self):
n2 = YieldNode(self[0])
n2.print_return = self.print_return
self.copy_basic(n2)
self.copy_children(n2)
return n2
def extra_str(self):
return ""
def gen_js(self, tlevel):
if self.print_return:
return self.s("return ") + self.children[0].gen_js(tlevel)
else:
return self.s("yield ") + self.children[0].gen_js(tlevel)
class ReturnNode(Node):
def __init__(self, expr):
super(ReturnNode, self).__init__()
self.add(expr)
def copy(self):
n2 = ReturnNode(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
def extra_str(self):
return ""
def gen_js(self, tlevel):
return self.s("return ") + self.children[0].gen_js(tlevel)
class ThrowNode(Node):
def __init__(self, expr):
super(ThrowNode, self).__init__()
self.add(expr)
def copy(self):
n2 = ThrowNode(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
def gen_js(self, tlevel):
return self.s("throw ") + self.children[0].gen_js(tlevel);
def extra_str(self):
return ""
class IncDec(Node):
def __init__(self, expr):
super(IncDec, self).__init__()
self.add(expr)
def extra_str(self):
return ""
def copy(self):
n2 = IncDec(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
class PreInc(IncDec):
def gen_js(self, tlevel):
return self.s("++") + self.children[0].gen_js(tlevel)
def copy(self):
n2 = PreInc(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
class PostInc(IncDec):
def gen_js(self, tlevel):
return self.children[0].gen_js(tlevel) + self.s("++")
def copy(self):
n2 = PostInc(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
class PreDec(IncDec):
def gen_js(self, tlevel):
return self.s("--") + self.children[0].gen_js(tlevel)
def copy(self):
n2 = PreDec(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
class PostDec(IncDec):
def gen_js(self, tlevel):
return self.children[0].gen_js(tlevel) + self.s("--")
def copy(self):
n2 = PostDec(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
class ContinueNode (Node):
def gen_js(self, tlevel):
return self.s("continue")
def copy(self):
n2 = ContinueNode()
self.copy_basic(n2)
self.copy_children(n2)
return n2
class BreakNode (Node):
def gen_js(self, tlevel):
return self.s("break")
def copy(self):
n2 = BreakNode()
self.copy_basic(n2)
self.copy_children(n2)
return n2
class ClassMember (IdentNode):
def __init__(self, name):
IdentNode.__init__(self, name)
self.modifiers = set()
self.type = None;
def gen_js(self, tlevel):
s = ""
for m in self.modifiers:
s += self.s(m + " ")
s += self.s(self.val)
if len(self) > 0:
s += self.s(" = ") + self[0].gen_js(0)
return s
class MethodNode(FunctionNode):
def __init__(self, name, is_static=False):
FunctionNode.__init__(self, name, glob.g_line)
self.is_static = is_static
#self[0] : params
#self[1] : statementlist
def gen_js(self, tlevel):
s = ""
if self.is_static:
s += "static "
s += self.s(self.name + "(")
for i, c in enumerate(self[0]):
if i > 0: s += c.s(", ")
s += c.gen_js(0)
s += ") {\n"
s += self[1].gen_js(tlevel)
s += self.s(tab(tlevel-1) + "}")
return s
class MethodGetter(MethodNode):
def __init__(self, name, is_static=False):
MethodNode.__init__(self, name, is_static)
#getters do not take any function parameters,
#but since we ultimately inherit
#from FunctionNode we add an empty param list
#here.
self.add(ExprListNode([]))
def gen_js(self, tlevel):
s = self.s("get " + self.name + "(")
for i, c in enumerate(self[0]):
if i > 0: s += c.s(", ")
s += c.gen_js(0)
s += ") {\n"
s += self[1].gen_js(tlevel)
s += self.s(tab(tlevel-1) + "}")
return s
class MethodSetter(MethodNode):
def __init__(self, name, is_static=False):
MethodNode.__init__(self, name, is_static)
def gen_js(self, tlevel):
s = self.s("set " + self.name + "(")
for i, c in enumerate(self[0]):
if i > 0: s += c.s(", ")
s += c.gen_js(0)
s += ") {\n"
s += self[1].gen_js(tlevel)
s += self.s(tab(tlevel-1) + "}")
return s
class ClassNode(Node):
def __init__(self, name, parents):
Node.__init__(self)
self.name = name
self.parents = parents
def gen_js(self, tlevel):
t1 = tab(tlevel)
t2 = tab(tlevel+1)
s = self.s("class " + self.name + " ")
if self.parents != None and len(self.parents) > 0:
s += self.s("extends ")
for i, p in enumerate(self.parents):
if i > 0: s += self.s(", ")
s += p.gen_js(0)
s += self.s(" {\n")
for c in self:
s += t1 + c.gen_js(tlevel+1) + "\n"
s += "}"
return s
class TypedClassNode(Node):
def __init__(self, name, parent=None):
Node.__init__(self)
self.name = name
self.cls_parent = parent
self.getters = {}
self.setters = {}
self.methods = {}
self.props = {}
self.childmap = {}
self.size = None
def start(self, typespace):
"""
propegate self.getters/setters/methods/props
"""
for c in self.children:
if type(c) == VarDeclNode:
if c.val in self.props:
typespace.error("duplicate property " + c.val, c)
self.props[c.val] = c
self.childmap[c.val] = c
elif type(c) == MethodNode:
if c.name in self.methods:
typespace.error("duplicate method " + c.name, c)
self.methods[c.name] = c
self.childmap[c.name] = c
elif type(c) == MethodGetter:
if c.name in self.getters:
typespace.error("duplicate getter " + c.name, c)
self.getters[c.name] = c
self.childmap[c.name] = c
elif type(c) == MethodSetter:
if c.name in self.setters:
typespace.error("duplicate setter " + c.name, c)
self.setters[c.name] = c
self.childmap[c.name] = c
g = self.getters; s = self.setters; m = self.methods; p = self.props
for k in g:
if k in m or k in p:
typespace.error(k + " is already defined", g[k])
for k in s:
if k in m or k in p:
typespace.error(k + " is already defined", s[k])
for k in m:
if k in g or k in s or k in p:
typespace.error(k + " is already defined", m[k])
for k in p:
if k in s or k in m or k in g:
typespace.error(k + " is already defined", p[k])
def extra_str(self):
if self.cls_parent != None:
return ("%s extends %s" % (self.name, self.cls_parent))
else:
return self.name
def gen_js(self, tlevel):
return ""
class FinallyNode (Node):
def __init__(self):
Node.__init__(self)
def gen_js(self, tlevel):
s = self.s("finally {\n")
s += self[0].gen_js(tlevel+1)
s += tab(tlevel) + "}\n"
return s
class ExportNode(Node):
def __init__(self, name, is_default=False): #first child is whats exported
Node.__init__(self)
self.name = name
self.bindname = name
self.is_default = is_default
def extra_str(self):
if self.bindname != self.name:
return self.name + " as " + self.bindname
return self.name
#just pass through
def gen_js(self, tlevel=0):
ret = ""
for c in self.children:
ret += c.gen_js(tlevel)
return ret
class ExportNameNode(Node):
def __init__(self): #children is list of ExportIdents
Node.__init__(self)
class ExportFromNode(Node):
def __init__(self, modname): #children are ExportIdent's of what to export
Node.__init__(self)
self.name = modname
class ExportIdent(IdentNode):
def __init__(self, name, binding=None):
if binding == None: binding = name
IdentNode.__init__(self, name)
self.bindname = binding
class ImportNode(Node):
'''
first node is always from clause
'''
def __init__(self):
Node.__init__(self)
self.add(StrLitNode(""))
class ImportDeclNode(Node):
def __init__(self, name, bindname=None):
Node.__init__(self);
self.import_all = False
if bindname != None:
self.bindname = bindname;
else:
self.bindname = name
self.name = name;
def extra_str(self):
return str(self.name) + " as " + str(self.bindname)
def gen_js(self, tlevel):
return ""
def copy(self):
n2 = PreDec(self[0])
self.copy_basic(n2)
self.copy_children(n2)
return n2
def node_is_class(node):
if type(node) != FunctionNode:
return False
return node.class_type in ["class", "array"]
def func_is_class(node):
if type(node) != FunctionNode:
return False
return node.class_type in ["class", "array"]
def line_print(s, do_print=True):
lines = s.split("\n")
s2 = ""
for i, l in enumerate(lines):
s2 += "%d %s\n" % (i+1, l)
if do_print:
print(s2)
return s2
```
#### File: tools/extjs_cc/js_generators.py
```python
from js_process_ast import traverse, traverse_i, null_node, \
find_node, flatten_statementlists, \
kill_bad_globals
from js_ast import *
from js_cc import js_parse, unpack_for_c_loops, combine_if_else_nodes
import sys, os, time, math, struct, io, imp
typespace = None
debug_gen = False
class Frame (list):
def __init__(self, input=[], parent=None, node=None):
super(Frame, self).__init__(input)
self.parent = parent
self.node = node
self.locals = {}
self.leaf = False
self.pop_trystack = False
self.paths = []
def append(self, item):
if type(item) == Frame:
item.parent = self
else:
item.frame = self
super(Frame, self).append(item)
def prepend(self, item):
if type(item) == Frame:
item.parent = self
else:
item.frame = self
super(Frame, self).insert(0, item)
def replace(self, i1, i2):
self[self.index(i1)] = i2
if type(i2) == Frame:
i2.parent = self
else:
i2.frame = self
def insert(self, i, item):
if type(item) == Frame:
item.parent = self
else:
item.frame = self
super(Frame, self).insert(i, item)
def print_frames(frames, tlevel=0):
tstr = tab(tlevel)
tstr2 = tab(tlevel+1)
s = ""
for f in frames:
if type(f) == Frame:
if f.node != None:
nstr = "%s %d " % (f.node.get_line_str(), f.label)
else:
nstr = str(f.label) + " "
s += tstr + nstr + "{\n" + print_frames(f, tlevel+1)
s += tstr + "}\n";
else:
s += tstr + f.get_line_str() + "\n"
if tlevel == 0:
print(s)
return s
def visit_yields(node):
p = node
while not null_node(p) and type(p) != FunctionNode:
p = p.parent
if null_node(p):
typespace.error("yield keyword only valid within functions")
p.is_generator = True
def node_has_yield(node):
if type(node) == YieldNode:
return True
for c in node.children:
if type(c) == FunctionNode:
continue
ret = node_has_yield(c)
if ret: return True
return False
def visit_generators(node):
if not node.is_generator: return
def _remove_this(n):
if n.val != "this": return
if type(n.parent) != BinOpNode or n.parent.op != ".":
#typespace.error("Can only reference members of 'this' in generators");
n.val = "__gen_this2"
else:
n.val = "__gen_this2"
#n.parent.parent.replace(n.parent, n.parent[1])
def set_cur(n):
if type(n) in [IfNode, WhileNode,
DoWhileNode, ForLoopNode, CatchNode]:
n._cur = 1;
n._startcur = 1;
else:
n._cur = 0
n._startcur = 0
n._start = True
n._has_yield = node_has_yield(n)
for c in n:
set_cur(c)
def prior_if(n):
if n.parent == None: return None
sl = n.parent
i = sl.children.index(n)-1
while 1:
while i >= 0:
if type(sl[i]) == IfNode:
break
i -= 1
if i >= 0 or null_node(n.parent): break
i = sl.parent.children.index(sl);
sl = sl.parent;
if i < 0:
typespace.error("Orphaned else node", n)
sys.exit(-1)
return sl[i]
def prior_try(n):
if n.parent == None: return None
sl = n.parent
i = sl.children.index(n)-1
while 1:
while i >= 0:
if type(sl[i]) == TryNode:
break
i -= 1
if i >= 0 or null_node(n.parent): break
i = sl.parent.children.index(sl);
sl = sl.parent;
if i < 0:
typespace.error("Orphaned catch node", n)
sys.exit(-1)
return sl[i]
def is_stype(n):
ret = type(n) in stypes # and (n._has_yield or n.parent._has_yield)
return ret
if type(n) == CatchNode:
ret |= prior_try(n)._has_yield
if type(n) == ElseNode:
ret |= prior_if(n)._has_yield
if type(n) in [IfNode, ElseNode]:
p5 = n.parent
while not null_node(p5):
if hasattr(p5, "_has_yield") and p5._has_yield:
ret = True;
break
p5 = p5.parent
return ret
combine_if_else_nodes(node)
traverse(node, ForCNode, unpack_for_c_loops, exclude=[FunctionNode], copy_children=True);
traverse(node, IdentNode, _remove_this)
traverse(node, VarDeclNode, _remove_this)
frames = frame = Frame(node=node)
stack = [c for c in node.children[1:]]
stack.reverse()
stypes = set([ForLoopNode, WhileNode, DoWhileNode, IfNode,
ElseNode, TryNode, CatchNode])
for c in stack:
set_cur(c)
while len(stack) > 0:
n = stack.pop(-1)
if is_stype(n) or type(n) == StatementList:
if n._start:
if type(n) != StatementList:
f = Frame(node=n)
frame.append(f)
frame = f
n._start = False
if n._cur < len(n.children):
stack.append(n)
stack.append(n[n._cur])
n._cur += 1
elif type(n) != StatementList:
frame = frame.parent
else:
frame.append(n)
def compact_frames(frames):
i = 0
frm = None
while i < len(frames):
f1 = frames[i]
if type(f1) == YieldNode:
frm = None
if type(f1) != Frame:
if frm == None:
frm = Frame()
frames.insert(i, frm)
frm.parent = frames
i += 1
frames.remove(f1)
i -= 1
frm.append(f1)
else:
compact_frames(f1)
frm = None
if type(f1) == YieldNode:
frm = None
i += 1
def label_frames(frames, cur=None):
if cur == None: cur = [0]
frames.label = cur[0]
cur[0] += 1
for f in frames:
if type(f) == Frame:
if f.node != None:
f.node.frame = f
label_frames(f, cur)
else:
f.frame = f
def prop_frame_refs(node, f):
if hasattr(node, "frame"): f = node.frame
else: node.frame = f
for c in node.children:
prop_frame_refs(c, f)
def apply_frame_scope(n, scope, frames):
if type(n) == IdentNode:
if n.val in scope:
n.val = "scope.%s_%d" % (n.val, scope[n.val])
else:
p = n.parent
n2 = n
#check for implicit declarations within catch and loop nodes
while not null_node(p):
if type(p) in [CatchNode, WhileNode, ForLoopNode]: break
n2 = p
p = p.parent
if not null_node(p) and n2 == p[0]:
scope[n.val] = frames.label
n.val = "scope.%s_%d" % (n.val, scope[n.val])
elif type(n) == VarDeclNode:
n.local = False;
if "local" in n.modifiers: n.modifiers.remove("local")
if hasattr(n.parent, "_c_loop_node"):
frames = n.parent._c_loop_node.frame
#print("yay", n.parent._c_loop_node.frame.label)
if n.val not in scope:
scope[n.val] = frames.label
if n.val in scope:
n.val = "scope.%s_%d" % (n.val, scope[n.val])
for c in n.children:
#ignore expr functions, but not nested functions?
if type(c) == FunctionNode and type(c.parent) == AssignNode: continue
if type(n) == BinOpNode and n.op == "." and c == n[1] and type(c) == IdentNode:
continue
if type(n) == FuncCallNode and type(c) == IdentNode and c == n[0]:
continue
apply_frame_scope(c, scope, frames)
def frame_scope(frames, scope, depth=0):
frames.scope = scope
for f in frames:
ss = "-"
fstr = ""
if type(f) == Frame:
if f.node != None:
fstr = f.node.get_line_str()
else:
if type(f[0]) == Frame: fstr = f[0].node.get_line_str()
else: fstr = f[0].get_line_str()
if f.node != None:
ss = "+"
scope2 = dict(scope)
for i in range(f.node._startcur):
apply_frame_scope(f.node[i], scope2, f)
frame_scope(f, scope2, depth+1)
else:
frame_scope(f, scope, depth)
else:
fstr = f.get_line_str()
apply_frame_scope(f, scope, frames)
scope = {}
for a in node.children[0]:
scope[a.val] = 0
compact_frames(frames)
label_frames(frames)
prop_frame_refs(node, frames)
frame_scope(frames, scope)
#print_frames(frames)
def frames_validate(frames):
def gen_frame_validate(frames, tlevel=0):
s = ""
tstr = tab(tlevel+1)
tstr2 = tab(tlevel+2)
for f in frames:
if type(f) == Frame:
if f.node != None:
cs = f.node.children
f.node.children = f.node.children[:node._startcur]
f.node.add(ExprNode([]))
c = f.node.gen_js(tlevel+1).split("\n")[0].replace("{", "").replace("\n", "").replace("}", "").strip()
if c.endswith(";"): c = c[:-1]
s += tstr + c + " {\n"
f.node.children = cs
s += gen_frame_validate(f, tlevel+1)
if f.node != None:
s += tstr + "}\n"
else:
c = tstr + f.gen_js(tlevel+2)
s += c
if c.strip().endswith("}") == 0 and c.strip().endswith(";") == 0:
s += ";"
s += "\n"
if tlevel == 0:
c = node.gen_js(0).split("\n")[0] + "\n"
s = c + s + "}\n"
return s
#print(node.gen_js(0))
#print(scope)
#print_frames(frames)
s = gen_frame_validate(frames)
s2 = js_parse(s).gen_js(0).strip()
s = node.gen_js(0).strip()
s = js_parse(s, print_stack=False).gen_js(0).strip()
print(s==s2)
if s != s2:
import difflib
print(dir(difflib))
d = difflib.ndiff(s.split("\n"), s2.split("\n"))
ds = ""
for l in d:
ds += l + "\n"
#print(ds)
line_print(s)
line_print(s2)
#frames_validate(frames)
flatframes = []
def flatten_frames(frames):
flatframes.append(frames)
for f in frames:
if type(f) == Frame:
flatten_frames(f)
flatten_frames(frames)
#print([f.label for f in flatframes])
def frames_transform(frames, node2):
scope = frames.scope
node2 = FunctionNode(node.name, node.lineno)
node2.add(ExprListNode([]))
for c in node.children[0]:
node2[0].add(IdentNode(c.val))
frames2 = frames
for j, frames in enumerate(flatframes[1:]):
p = frames.parent
f = frames
frames.return_frame = 0
frames.return_frame_parent = 0
i = p.index(f)
while i >= len(p)-1 and p.parent != None:
f = p
p = p.parent
i = p.index(f)
if p.parent == None:
frames.return_frame = 0
frames.return_frame_parent = p.label
else:
frames.return_frame = p[i+1].label
frames.return_frame_parent = p.label
def f_name(f):
return "frame_%d" % f.label
def f_ref(f):
return "this.frame_%d" % f.label
def f_raw_next(f):
if f.parent == None:
f = Frame()
f.label = len(flatframes)
return f
while f.parent != None:
i = f.parent.index(f)+1
while i < len(f.parent):
if type(f.parent[i]) == Frame:
return f.parent[i]
i += 1
f = f.parent
f = Frame()
f.label = len(flatframes)
return f
def f_next(f, ignore_loops=False):
if f.parent == None:
if debug_gen:
print("no f.parent! make frame");
f = Frame()
f.label = len(flatframes)
return f
while f.parent != None:
i = f.parent.index(f)+1
while i < len(f.parent):
if type(f.parent[i]) == Frame:
if type(f.parent[i].node) not in [CatchNode, ElseNode]:
return f.parent[i]
i += 1
if not ignore_loops and f.parent != None and \
type(f.parent.node) in \
[WhileNode, DoWhileNode, ForLoopNode]:
if debug_gen:
print("looper!", f.label, f.parent.label)
return f.parent
f = f.parent
if debug_gen:
print("made frame!", len(flatframes))
f = Frame()
f.label = len(flatframes)
return f
def f_first(f):
for f2 in f:
if type(f2) == Frame:
return f2
#return f
def f_last(f):
return f[-1]
def has_parent(f, p):
while f != p and f != None:
f = f.parent
return f == p
def find_exit_points(f, p=None, vset=None):
stack = []
if p == None: p = f
if vset == None: vset = set()
lst = []
"""
lst = []
for f2 in f:
if type(f2) == Frame:
for f3 in f2.paths:
if type(f3) == Frame:
if not has_parent(f3, p) and f3.label not in vset:
lst.append(f3)
vset.add(f3.label)
lst += find_exit_points(f3, p, vset)
else:
continue
"""
for f2 in f.paths:
if not has_parent(f2, p) and f2.label not in vset:
lst.append(f)
vset.add(f.label)
else:
lst += find_exit_points(f2, p, vset)
for f in lst:
print(f.label)
#sys.exit()
return lst
tot = len(node)-1
for i in range(tot):
node.pop(1)
def param_var(k):
for c in node[0]:
val = c.gen_js(0)
val = val.strip()
k = k.strip()
if k == val: return True
return False
#build generator state data
scopestr = "{"
for k in scope:
if scopestr != "{": scopestr += ", ";
if param_var(k):
scopestr += "%s_%i : %s" % (k, scope[k], k);
else:
scopestr += "%s_%i : %s" % (k, scope[k], "undefined");
scopestr += "}"
node.add(js_parse("this.scope = $s;", [scopestr], start_node=AssignNode))
node.add(js_parse("this.ret = {done : false, value : undefined};", start_node=AssignNode))
node.add(js_parse("this.state = 1;", start_node=AssignNode))
node.add(js_parse("this.trystack = [];", start_node=AssignNode))
node.add(js_parse("""
this.next = function() {
var ret;
var stack = this.trystack;
try {
ret = this._next();
} catch (err) {
if (stack.length > 0) {
var item = stack.pop(stack.length-1);
this.state = item[0];
this.scope[item[1]] = err;
return this.next();
} else {
throw err;
}
}
return ret;
}""", start_node=AssignNode))
node.add(js_parse("""
this.push_trystack = function(catchstate, catchvar) {
this.trystack.push([catchstate, catchvar]);
}""", start_node=AssignNode))
node.add(js_parse("""
this.pop_trystack = function() {
this.trystack.pop(this.trystack.length-1);
}""", start_node=AssignNode))
#build next function
keynode = IdentNode("$__state");
sn = SwitchNode(keynode);
slist = js_parse("var $__ret = undefined; var $__state = this.state; var scope = this.scope;");
slist2 = StatementList()
slist2.add(sn)
wn = WhileNode(BinOpNode(IdentNode("$__state"), NumLitNode(len(flatframes)), "<"))
wn.add(slist2)
wn[1].add(js_parse("""
if ($__ret != undefined) {
break;
}
""", start_node=IfNode));
slist.add(wn);
slist.add(js_parse("""
if ($__ret != undefined) {
this.ret.value = $__ret.value;
} else {
this.ret.done = true;
this.ret.value = undefined;
}
this.state = $__state;
return this.ret;
"""));
next = js_parse("this._next = function() { };", start_node=AssignNode)
next[1].add(slist)
node.add(next)
sn.line = slist.line = node.line
sn.lexpos = slist.lexpos = node.lexpos
#find leaves
for f in flatframes:
if len(f) > 0:
f.leaf = True
for c in f:
if type(c) == Frame:
f.leaf = False
break
#move control frame of dowhile statements to
#after their statement body frames.
visit = set()
for i in range(len(flatframes)):
if i in visit: continue
f = flatframes[i]
if f.leaf or type(f.node) != DoWhileNode: continue
f2 = f_first(f)
if f2 == None: continue
last = f2.label
while (f2 != f_next(f) and f2 != f):
last = f2.label
f2 = f_next(f2)
last = ((last-1) if last > i else last) + 1
flatframes.pop(i);
flatframes.insert(last, f);
visit.add(last)
for i, f in enumerate(flatframes):
f.label = i
#set up case statements
for f in flatframes:
n2 = CaseNode(NumLitNode(f.label))
sl = StatementList()
if debug_gen:
sl.add(js_parse("console.log(\"in frame $s\");", [f.label]));
#set line/lexpos data
if f.node != None:
n2.line = f.node.line
n2.lexpos = f.node.lexpos
sl.line = f.node.line
sl.lexpos = f.node.lexpos
f.case_sl = sl
n2.add(sl)
#add to switch statement
sn.add(n2)
def set_linepos(n, line, lexpos):
n.line = line
n.lexpos = lexpos
for c in n:
set_linepos(c, line, lexpos)
for f in flatframes:
if f.leaf:
for c in f:
c.frame = f
else:
f.node.frame = f
#handle loop breaks/continues
visit = set()
def visit_breaks(n):
wn = n
if n in visit: return
visit.add(n)
while type(wn) not in [WhileNode, DoWhileNode, ForLoopNode]:
if type(wn) == SwitchNode:
typespace.error("Switches in generators not supported yet.", wn);
wn = wn.parent
if not wn:
typespace.error("Invalid break statement.", n);
if "frame" not in wn.__dict__:
return
f = wn.frame
i = n.parent.index(n)
n2 = js_parse("$s=$s;", ("$__state", f_next(f).label))
if "frame" in n.__dict__:
n.frame.insert(n.frame.index(n), n2)
else:
n.parent.insert(i, n2)
def visit_continues(n):
if n in visit: return
visit.add(n)
wn = n
while wn != None and (type(wn) not in [WhileNode, DoWhileNode, ForLoopNode]):
wn = wn.parent
if wn == None:
typespace.error("Invalid continue statement.", n);
if "frame" not in wn.__dict__:
return
f = wn.frame
i = n.parent.index(n)
n2 = js_parse("$s=$s;", ("$__state", f.label));
n3 = BreakNode();
visit.add(n3)
n.parent.remove(n)
n.frame.replace(n, n2)
n.frame.insert(n.frame.index(n2)+1, n3)
def handle_yields(node):
slist = js_parse("""$__ret = this.ret;""");
is_empty = type(node[0]) == ExprNode and len(node[0]) == 0
if is_empty:
slist.add(js_parse("""$s.value = undefined;""", ["$__ret"], start_node=AssignNode));
else:
slist.add(js_parse("""$s.value = $n;""", ["$__ret", node[0]], start_node=AssignNode))
slen = len(slist)
#print(slist)
if node in node.parent:
i = node.parent.index(node)
node.parent.remove(node)
for j in range(slen):
node.parent.insert(i, slist[slen-j-1])
i = node.frame.index(node)
node.frame.remove(node)
for j in range(slen):
node.frame.insert(i, slist[slen-j-1])
#handle loop breaks
for f in flatframes:
if not f.leaf: continue;
for c in f:
traverse(c, BreakNode, visit_breaks, exclude=FunctionNode)
pass
#handle loop continues
for f in flatframes:
if not f.leaf: continue;
for c in f:
traverse(c, ContinueNode, visit_continues, exclude=FunctionNode)
pass
#handle yields
for f in flatframes:
if not f.leaf: continue
for c in f:
traverse(c, YieldNode, handle_yields, exclude=FunctionNode);
def has_common_parent(n1, n2, p):
while n1 != p and n1 != None:
n1 = n1.parent
while n2 != p and n2 != None:
n2 = n2.parent
if n1 == n2 and n1 == p: return True
else: return False
#build control code
for f in flatframes:
if f.leaf: continue
n = f.node
sl = f.case_sl
if type(n) == IfNode:
f2 = f_first(f)
if f2 == None: #empty if node
f2 = Frame()
f2.label = len(flatframes)
if len(n) > 2:
f3 = n[2].frame
else:
f3 = f_next(f)
f.paths += [f2, f3]
n2 = js_parse("""
$s = ($n) ? $s : $s;
""", ["$__state", n[0], f2.label, f3.label]);
set_linepos(n2, n.line, n.lexpos);
sl.add(n2)
elif type(n) == ElseNode:
f2 = f_first(f)
if f2 == None: #empty else node
f2 = Frame()
f2.label = len(flatframes)
f.paths += [f2]
n2 = js_parse(";$s = $s;", ("$__state", str(f2.label)))
set_linepos(n2, n.line, n.lexpos);
sl.add(n2)
elif type(n) == WhileNode:
f.paths += [f_first(f), f_next(f, False)]
n2 = js_parse("""
$s = ($n) ? $s : $s;
""", ("$__state", n[0], f_first(f).label, f_next(f, False).label));
set_linepos(n2, n.line, n.lexpos);
sl.add(n2)
elif type(n) == ForLoopNode:
#okay, why did I say to ignore loops here?
f.paths += [f_first(f), f_next(f, False)]
if type(n[0]) == ForCNode:
n2 = js_parse("""
$s = ($n) ? $s : $s;
""", ("$__state", n[0][1], f_first(f).label, f_next(f, False).label));
set_linepos(n2, n.line, n.lexpos);
sl.add(n2)
else:
typespace.error("process_generators expects unpacked iterator for loops")
elif type(n) == DoWhileNode:
f.paths += [f_first(f), f_next(f, False)]
n2 = js_parse("""
$s = ($n) ? $s : $s;
""", ("$__state", n[0], f_first(f).label, f_next(f, False).label), start_node=AssignNode)
set_linepos(n2, n.line, n.lexpos)
sl.add(n2)
elif type(n) == TryNode:
f.paths += [f_first(f)]
cn = f_raw_next(f).node
if type(cn) != CatchNode:
typespace.error("Missing catch block", f.node)
ident = cn[0].gen_js(0).replace("scope.", "")
n2 = js_parse("$s = $s;", ("$__state", f_first(f).label), start_node=AssignNode)
n3 = js_parse("this.push_trystack($s, \"$s\");", [f_raw_next(f).label, ident])
set_linepos(n2, n.line, n.lexpos)
set_linepos(n3, n.line, n.lexpos)
sl.add(n2)
sl.add(n3)
elif type(n) == CatchNode:
f.paths += [f_first(f)]
n2 = js_parse("$s = $s;", ("$__state", f_first(f).label), start_node=AssignNode)
set_linepos(n2, n.line, n.lexpos)
sl.add(n2)
#build leaf code
for f in flatframes:
if not f.leaf: continue
sl = f.case_sl
for n in f:
sl.add(n)
f2 = f_next(f)
sl.add(js_parse(";$s=$s;", ("$__state", str(f2.label))))
f.paths += [f2]
#add in pop_trystack calls
for f in flatframes:
if type(f.node) != TryNode: continue
f2 = f_last(f)
ps = find_exit_points(f)
for f2 in ps:
f2.case_sl.add(js_parse("this.pop_trystack();"))
#add case breaks
for f in flatframes:
bn = BreakNode()
bn.line = f.case_sl.line
bn.lexpos = f.case_sl.lexpos
f.case_sl.add(bn);
#add final state case
cn = CaseNode(NumLitNode(len(flatframes)))
sl2 = StatementList()
sl2.add(BreakNode())
cn.add(sl2)
sn.add(cn)
#default case
df = DefaultCaseNode()
df.add(js_parse("console.log(\"Generator state error\"); console.trace();"))
df[0].add(BreakNode())
sn.add(df)
outernode = js_parse("""
function() {
var __gen_this2 = this;
function _generator_iter() {
}
return new _generator_iter();
}
""", start_node=FunctionNode);
#add a self-referencing [Symbol.iterator] method
n = js_parse("""
this[Symbol.iterator] = function() {
return this;
}
""");
for c in n:
node.add(c);
#and, a es5.1-style forEach method
n = js_parse("""
this.forEach = function(callback, thisvar) {
if (thisvar == undefined)
thisvar = self;
var _i = 0;
while (1) {
var ret = this.next();
if (ret == undefined || ret.done || (ret._ret != undefined && ret._ret.done))
break;
callback.call(thisvar, ret.value);
if (_i++ > 100) {
console.log("inf loop", ret);
break;
}
}
}
""");
for c in n:
node.add(c);
outernode.name = node.name;
if node.is_anonymous:
outernode.is_anonymous = True
outernode.replace(outernode[0], node[0])
node.parent.replace(node, outernode);
node2 = outernode[2]
cs = node[:]
for c in cs[1:]:
node2.add(c)
#print(outernode, "\n\n\n", outernode[2])
def bleh():
for frames in flatframes:
fname = f_name(frames)
n = js_parse("""
function $s1(scope) {
if (_do_frame_debug) console.log("in $s1");
}""", (fname), start_node=FunctionNode)
if type(n[1]) != StatementList:
n.replace(n[1], StatementList())
n = n[1]
func = n
while type(func) != FunctionNode:
func = func.parent
excl = (type(frames.node) == StatementList and type(frames.parent.node) == FunctionNode)
if frames.node != None and not excl and type(frames.node) != FunctionNode:
f = frames
sl = StatementList()
f.node[f.node._startcur] = sl
frames.funcnode = func
frames.subnode = frames.funcnode
local_frames = "["
totframes = 0
for i, f in enumerate(frames):
if type(f) != Frame:
frames.subnode.add(f)
frames.leaf = True
else:
frames.leaf = False
if len(local_frames) > 1: local_frames += ", "
local_frames += f_ref(f) #.replace("this.", "")
totframes += 1
if f.node != None and type(f.node) != FunctionNode:
if len(f.node.children) > f.node._startcur + 1:
do_conv(f.node, f)
if frames.leaf:
f2 = f_next(frames)
f2 = f2.label if f2 != -1 else -1
frames.subnode.add(js_parse("return [$i, undefined];", [f2], start_node=ReturnNode));
local_frames = "%s_frames = "%f_ref(frames) + local_frames + "];"
frames.frames = js_parse(local_frames)
frames.totframes = totframes
def build_next(f, parent=None):
if type(f) != Frame:
return
subnode = f.subnode
if f.label >= 0: # and f.label < 3:
n2 = js_parse("this.$s1 = 0;", [f_name(f)], start_node=AssignNode)
n2.replace(n2[1], f.funcnode)
f.funcnode.name = "(anonymous)"
f.funcnode.is_anonymous = True
node2.add(n2) #f.funcnode)
if f.totframes > 0:
if f.node != None and type(f.node) == WhileNode:
f2 = f_next(f)
f2 = f2.label if f2 != -1 else -1
n = js_parse("""
if (!"placeholder") {
return [$i1, undefined];
}
""", [f2])
if n == None:
typespace.error("internal error", subnode);
n2 = find_node(n, StrLitNode);
n2.parent.replace(n2, f.node[0])
subnode.add(n)
f2 = f_first(f);
n.add(js_parse("return [$i, undefined];", [f2.label], start_node=ReturnNode))
elif f.node != None and type(f.node) == TryNode:
n = StatementList()
if n == None:
typespace.error("internal error", subnode);
f3 = f_raw_next(f)
while f3 != -1 and type(f3.node) != CatchNode:
f3 = f_raw_next(f3);
if f3 == -1:
typespace.error("Orphaned try block", f.node)
f3name = "_nfothing"
if len(f3.node) > 0:
f3name = f3.node[0].gen_js(0).replace("scope.", "")
n.add(js_parse("""
this.trystack.push([$i, "$s"]);
""", [f3.label, f3name]))
f2 = f_first(f);
n.add(js_parse("return [$i, undefined];", [f2.label], start_node=ReturnNode))
subnode.add(n)
f2.pop_trystack = True
elif f.node != None and type(f.node) == IfNode:
f2 = f_first(f)
f1 = f_raw_next(f)
while type(f1.node) != ElseNode and f1.label != len(flatframes):
f1 = f_raw_next(f1)
if f1.label == len(flatframes):
f1 = f_next(f)
n = js_parse("""
if (!("placeholder")) {
return [$i1, undefined];
} else {
return [$i2, undefined];
}
""", [f1.label, f2.label]);
n2 = find_node(n, StrLitNode)
n2.parent.replace(n2, f.node[0].copy())
if n == None:
typespace.error("internal error", subnode);
f2 = f_first(f);
n.add(js_parse("return [$i, undefined];", [f2.label], start_node=ReturnNode))
subnode.add(n)
f2.pop_trystack = True
elif f.node != None and type(f.node) == ElseNode:
f2 = f_first(f)
f1 = f_raw_next(f)
while type(f1.node) != ElseNode and f1.label != len(flatframes):
f1 = f_raw_next(f1)
if f1.label == len(flatframes):
f1 = f_next(f)
n = js_parse("""
return [$i1, undefined];
""", [f2.label]);
if n == None:
typespace.error("internal error", subnode);
f2 = f_first(f);
subnode.add(n)
elif f.node != None and type(f.node) == CatchNode:
f2 = f_first(f)
n = js_parse("""
return [$i1, undefined];
""", [f2.label]);
if n == None:
typespace.error("internal error", subnode);
subnode.add(n)
elif f.node != None and type(f.node) == ForLoopNode:
f2 = f_first(f);
f3 = f_next(f)
f3 = f3.label if f2 != -1 else -1
f2 = f2.label if f2 != -1 else -1
n = js_parse("""
if ($n) {
return [$i, undefined];
} else {
return [$i, undefined];
}
""", [f.node[0][1], f2, f3])
if n == None:
typespace.error("internal error", subnode);
subnode.add(n)
node2.insert(1, js_parse("""
this[Symbol.iterator] = function() {
return this;
}
""")[0])
for f in flatframes:
build_next(f, f.parent)
#process returns from within try nodes
for f in flatframes:
if f.parent != None and type(f.parent.node) == TryNode:
def visit_rets1(n2):
target = n2[0][0][0].val
isyield = n2[0][0][1].val
ni = n2.parent.index(n2)
if target >= f_next(f.parent).label:
n3 = js_parse("this.trystack.pop();")[0]
n2.parent.insert(ni, n3)
traverse(f.subnode, ReturnNode, visit_rets1, copy_children=True);
#process yields
for f in flatframes:
f2 = f.parent
set_yield = None
def visit_rets2(n2):
if set_yield != None:
#print(n2)
n2[0][0].replace(n2[0][0][1], set_yield);
set_yield = find_node(f.subnode, YieldNode);
if set_yield != None:
set_yield.parent.remove(set_yield);
set_yield = ArrayLitNode(ExprListNode([set_yield[0]]))
traverse(f.subnode, ReturnNode, visit_rets2, copy_children=True);
def find_parent_frame(f, ntypes, include_first=True):
p = f
if not include_first:
p = p.parent
while p != None:
if type(p.node) in ntypes:
return p
p = p.parent
return None
#process breaks
for f in flatframes:
f2 = f.parent
def visit_rets3(n2):
p = n2.parent
while not null_node(p) and p != f.subnode:
if type(p) in [WhileNode, DoWhileNode, ForLoopNode]: break
p = p.parent
if p != f.subnode and not null_node(p): return #break corresponds to a loop internal to this frame
p = find_parent_frame(f, [WhileNode, DoWhileNode, ForLoopNode], True)
if p == None:
typespace.error("Invalid break statement (switches within generators aren't supported yet)", n2)
f2 = f_next(p)
n3 = js_parse("return [$i, undefined];", [f2.label], start_node=ReturnNode);
n2.parent.replace(n2, n3)
traverse(f.subnode, BreakNode, visit_rets3, copy_children=True);
#process continues
for f in flatframes:
f2 = f.parent
def visit_rets3(n2):
p = n2.parent
while not null_node(p) and p != f.subnode:
p = p.parent
if p != f.subnode and not null_node(p): return #continue corresponds to a loop internal to this frame
p = f.parent
while p != None:
if type(p.node) in [WhileNode, DoWhileNode, ForLoopNode]:
break;
p = p.parent
if p == None:
typespace.error("Invalid continue statement")
n3 = js_parse("return [$i, undefined];", [p.label], start_node=ReturnNode);
n2.parent.replace(n2, n3)
traverse(f.subnode, ContinueNode, visit_rets3, copy_children=True);
firstnode = js_parse("if (this.first) {\n}", start_node=IfNode)
firstnode2 = js_parse("if (this.first) {\n}", start_node=IfNode)
firstnode.replace(firstnode[1], StatementList())
firstnode2.replace(firstnode2[1], StatementList())
flatframes[0].subnode.add(firstnode);
node2.insert(1, firstnode2[1]);
firstnode = firstnode[1]
firstnode2 = firstnode2[1]
args = list(node.children[0])
for i3 in range(len(args)):
argn = args[i3]
while type(argn) not in [IdentNode, VarDeclNode]:
argn = argn[0]
args[i3] = argn.val
scope = {}
for f in flatframes:
scope.update(f.scope)
s = "{"
j2 = 0
for j, v in enumerate(scope.keys()):
if j2 > 0: s += ", "
j2 += 1
if v in args:
s += "%s:%s" % ("%s_%s"%(v, scope[v]), v)
else:
s += "%s:undefined" % ("%s_%s"%(v, scope[v]))
s += "}"
s = "this.scope = %s;\n" % s
firstnode2.add(js_parse(s)[0])
#ensure all frames have returns
for f in flatframes:
if not find_node(f.subnode, ReturnNode):
f.subnode.add(js_parse("return [$i, undefined];", [f_next(f).label], start_node=ReturnNode));
framelist = "["
for i, f in enumerate(flatframes):
if i > 0: framelist += ", "
framelist += "this.frame_%i" % f.label
framelist = "this.frames = %s];"%framelist
node2.add(js_parse(framelist));
node2.add(js_parse("""
this.cur = 1;
this.trystack = new Array();
this.next = function() {
var ret;
while (this.cur < this.frames.length) {
try {
ret = this.frames[this.cur].call(this, this.scope);
} catch (_generator_error) {
if (this.trystack.length > 0) {
var ts1 = this.trystack.pop();
this.scope[ts1[1]] = _generator_error;
ret = [ts1[0], undefined];
} else {
throw _generator_error;
}
}
if (ret[0] == this.frames.length) {
return {done : true, value : undefined};
break;
}
if (ret[0] == this.cur) {
console.trace();
console.log("YEEK!")
return {done : true, value : undefined};
}
this.cur = ret[0];
if (ret[1] != undefined) {
return {value : ret[1][0], done : false};
} else {
return {value : undefined, done : false};
}
}
}
""", []))
node.parent.replace(node, node2)
def process_generators(result, tspace):
global typespace
typespace = tspace
traverse(result, YieldNode, visit_yields)
traverse(result, FunctionNode, visit_generators)
del_attrs = []
def cleanup_generator_garbage(n):
for a in del_attrs:
if hasattr(n, a):
delattr(n, a)
for c in n.children:
cleanup_generator_garbage(c)
cleanup_generator_garbage(result)
def process_generators_old(result, typespace):
def visit_yields(node):
p = node
while not null_node(p) and type(p) != FunctionNode:
p = p.parent
if null_node(p):
typespace.error("yield keyword only valid within functions")
p.is_generator = True
traverse(result, YieldNode, visit_yields)
def node_has_yield(node):
if type(node) == YieldNode:
return True
for c in node.children:
if type(c) == FunctionNode:
continue
ret = node_has_yield(c)
if ret: return True
return False
def visit_generators(node):
def print_frames(frames, tlevel=0):
tstr = tab(tlevel)
tstr2 = tab(tlevel+1)
s = ""
for f in frames:
if type(f) == Frame:
if f.node != None:
nstr = "%s %d " % (f.node.get_line_str(), f.label)
else:
nstr = str(f.label) + " "
s += tstr + nstr + "{\n" + print_frames(f, tlevel+1)
s += tstr + "}\n";
else:
s += tstr + f.get_line_str() + "\n"
if tlevel == 0:
print(s)
return s
if 0:
file = open("generator_test.html", "w")
file.write("""
<html><head><title>Generator Test</title></head>
<script>
FrameContinue = {1:1};
FrameBreak = {2:2};
""")
file.write(node2.gen_js(3).replace("yield", "return"))
file.write("""
j = 0;
for (var tst in new range(2, 8)) {
console.log(tst);
if (j > 10)
break;
j++;
}
</script>
</html>
""")
file.close()
#print(node2.gen_js(1))
#print_frames(frames2)
traverse(result, FunctionNode, visit_generators)
del_attrs = ["_cur", "_startcur", "frame", "return_frame", "pop_trystack"]
def cleanup_generator_garbage(n):
for a in del_attrs:
if hasattr(n, a):
delattr(n, a)
for c in n.children:
cleanup_generator_garbage(c)
cleanup_generator_garbage(result)
```
#### File: tools/extjs_cc/js_opcode_emit.py
```python
import traceback, sys
from js_ast import *
from js_global import glob, Glob
from js_typespace import *
from js_cc import *
from js_process_ast import *
from js_util_types import *
import os, os.path
from js_type_emit import resolve_types, types_match, templates_match
#rules for registers: 0 and 1 are reserved for our
#function calling convention; 2, 3, and 4 are used
#for unary, binary, and trinary expr operatons.
#
#5 is used for shortjmp operations (e.g, if, loops, etc)
MAX_REGISTER = 8
valid_ops = [
"PUSH",
"POP",
"PUSH_UNDEFINED",
"LOAD_FROM_REG",
"LOAD_LOCAL_STACK", #writes top of stack with another stack item, arg is offset
"WRITE_LOCAL_STACK", #writes another stack item with top of stack arg is offset
"LOAD_REG_REF",
"LOAD_REG_INT",
"LOAD_REG_PTR", #register codes all pop values from the stack
"LOAD_REG_PTR_CONST", #reads from argument, not stack
"LOAD_REG_EXTERN_PTR", #is turned into a LOAD_REG_PTR later by linker
"LOAD_REG_UNDEFINED", #load null/void/empty value
"LOAD_REG_FLOAT",
"LOAD_SYMBOL_PTR",
"LOAD_SYMBOL_INT",
"NATIVE_CALL",
"LOAD_SYMBOL_FLOAT",
"WRITE_SYMBOL_REF",
"WRITE_SYMBOL_INT",
"WRITE_SYMBOL_FLOAT",
"LOAD_REG_SYMBOL_PTR",
"LOAD_OPCODE_PTR", #REMEMBER TO RELINK THIS! use for calculating function calls and the like
"LOAD_REF", #loads ref from memory address in passed in register
"LOAD_CONST_REF",
"LOAD_CONST_INT",
"LOAD_CONST_FLOAT",
"LOAD_INT",
"WRITE_INT",
"INT_TO_FLOAT",
"UNDEFINED_TO_ZERO_INT",
"FLOAT_TO_INT",
"LOAD_FLOAT",
"PUSH_REF",
"SHORTJMP", #reads stack offset from its argument
"SHORTJMPTRUE", #jumps if current stack value is true
"SHORTJMPTRUE_REG", #jumps if a register value is true
"SHORTJMPFALSE", #jumps if current stack value is false
"SHORTJMPFALSE_REG", #jumps if a register value is false
"LONGJMP", #reads from a register
"PUSHTRY",
"POPTRY",
"THROW",
"INT_TO_FLOAT",
"FLOAT_TO_INT",
"ARRAY_REF",
"ARRAY_SET",
"ADD_INT",
"SUB_INT",
"MUL_INT",
"DIV_INT",
"MOD_INT",
"BITINV",
"BITAND",
"BITOR",
"BITXOR",
"LSHIFT",
"RSHIFT",
"NEGATE", #reads from register, writes to current stack position
"ADD_FLOAT", #math operations read from registers, but write to stack
"SUB_FLOAT",
"MUL_FLOAT",
"DIV_FLOAT",
"MOD_FLOAT",
"LTHAN_INT",
"GTHAN_INT",
"LTHANEQ_INT",
"GTHANEQ_INT",
"EQ_INT",
"NOTEQ_INT",
"NOT_INT",
"LTHAN_FLOAT",
"GTHAN_FLOAT",
"LTHANEQ_FLOAT",
"GTHANEQ_FLOAT",
"EQ_FLOAT",
"NOTEQ_FLOAT",
"AND",
"OR",
"ADD",
"SUB",
"MUL",
"DIV",
"IN",
]
valid_op_set = set(valid_ops)
class OPCODE:
def __init__(self, code, arg=None, comment=None, blocklevel=0, stacklevel=0):
self.code = code
self.arg = arg #not all opcodes have args
self.comment = comment
self.blocklevel = blocklevel
self.stacklevel = stacklevel
if code not in valid_op_set:
raise RuntimeError("Invalid opcode %s in js_opcode_emit.py"%code)
def __str__(self):
argstr = ""
if (self.arg != None):
argstr = " " + str(self.arg)
cstr = ""
if self.comment != None:
cstr += " //" + str(self.comment)
return tab(self.blocklevel) + str(self.stacklevel) + " " + self.code + argstr + cstr
typespace = None
op_map = {
"+" : "ADD",
"-" : "SUB",
"/" : "DIV",
"*" : "MUL",
"&" : "BITAND",
"|" : "BITOR",
"<<" : "LSHIFT",
">>" : "RSHIFT",
"<" : "LTHAN",
"<=" : "LTHANEQ",
"==" : "EQ",
"=>" : "GTHANEQ",
">" : "GTHAN",
"||" : "OR",
"&&" : "AND",
"!" : "NOT"
}
class TypeEmitVisit(NodeVisit):
def __init__(self):
super(TypeEmitVisit, self).__init__()
self.codes = []
self.stacklevels = [0]
self.funcstack = []
self.blocklevel = 0
self.required_nodes = node_types
for n in set(self.required_nodes):
if isinstance(n, TypeNode):
self.required_nodes.remove(n)
def opcode(self, opcode, arg=None, comment=None):
if "PUSH" in opcode and opcode != "PUSHTRY":
self.stacklevels[-1] += 1
if "POP" in opcode and opcode != "POPTRY":
self.stacklevels[-1] -= 1
if len(self.stacklevels) > 0:
slvl = self.stacklevels[-1]
else:
slvl = 0
self.codes.append(OPCODE(opcode, arg, comment, self.blocklevel, slvl))
def StrLitNode(self, node, scope, emit, tlevel):
self.opcode("LOAD_CONST_REF", node.val, "string literal")
def NumLitNode(self, node, scope, emit, tlevel):
if type(node.val) == int:
t = "INT"
else:
t = "FLOAT"
self.opcode("LOAD_CONST_"+t, node.val, "%s literal"%t.lower())
def IdentNode(self, node, scope, emit, tlevel):
if node.val not in scope:
sys.stderr.write("IdentNode %s not in scope.\n"%node.val)
print(scope)
typespace.error("IdentNode %s not in scope.\n"%node.val, node)
obj = scope[node.val]
self._gen_var_read(node, scope, self._get_optype(node.type))
def convert_types(self, t1, t2):
if t1 == "FLOAT" and t2 == "INT":
self.opcode("INT_TO_FLOAT")
else:
self.opcode("FLOAT_TO_INT")
def BinOpNode(self, node, scope, emit, tlevel):
handle_nodescope_pre(node, scope)
if node.op != ".":
op = op_map[node.op]
t1 = self._get_optype(node[0].type)
t2 = self._get_optype(node[1].type)
emit(node[0], scope)
self.opcode("PUSH")
emit(node[1], scope)
if t1 != t2:
if t1 == "REF":
typespace.error("Cannot %s %s types with objects"%(op.lower(), t1.lower()))
if t2 == "REF":
typespace.error("Cannot %s %s types with objects"%(op.lower(), t1.lower()))
self.convert_types(t1, t2)
self.opcode("LOAD_REG_"+t1, 3)
self.opcode("POP")
self.opcode("LOAD_REG_"+t1, 2);
if t1 == "REF": t1 = ""
else: t1 = "_" + t1
self.opcode(op+t1)
else:
raise RuntimeError("member lookup not implemented")
handle_nodescope_pre(node, scope)
def NegateNode(self, node, scope, emit, tlevel):
emit(node[0], scope)
t = self._get_optype(node[0].type)
self.opcode("LOAD_REG_"+t, 2)
self.opcode("NEGATE", 2)
def TypeofNode(self, node, scope, emit, tlevel):
pass
def VarDeclNode(self, node, scope, emit, tlevel):
if node.local:
if len(self.stacklevels) == 0:
typespace.error("Global variable has local flag", node)
node.stack_off = self.stacklevels[-1]
self.opcode("PUSH", comment="declare %s"%node.val)
if node.val in scope and types_match(node.type, scope[node.val].type, typespace):
node.stack_off = scope[node.val].stack_off
if len(self.stacklevels) > 1:
node.local = True
else:
node.local = scope[node.val].local
scope[node.val] = node
if type(node[0]) != ExprNode or len(node[0]) > 0:
n = AssignNode(IdentNode(node.val), node[0], "=")
n.type = node.type
self.AssignNode(n, scope, emit, tlevel)
def _gen_var_read(self, var, scope, optype):
if type(var) not in [IdentNode, VarDeclNode]:
raise RuntimeError("Unimplemented var read/write for type %s"%str(type(var)))
if type(var) == IdentNode:
var = scope[var.val]
if var.local:
self.opcode("LOAD_LOCAL_STACK", var.stack_off-self.stacklevels[-1])
else:
if not optype.startswith("_"):
optype = "_" + optype
self.opcode("LOAD_SYMBOL"+optype, var.val)
def _gen_var_write(self, var, scope, optype):
if type(var) not in [IdentNode, VarDeclNode]:
raise RuntimeError("Unimplemented var read/write for type %s"%str(type(var)))
if type(var) == IdentNode:
var = scope[var.val]
if var.local:
self.opcode("WRITE_LOCAL_STACK", var.stack_off-self.stacklevels[-1])
else:
if not optype.startswith("_"):
optype = "_" + optype
self.opcode("WRITE_SYMBOL"+optype, var.val)
def AssignNode(self, node, scope, emit, tlevel):
self.opcode("PUSH", comment="begin assignment")
emit(node[1], scope);
if node.mode != "=":
op = op_map[node.mode[0]]
t = self._get_optype(node.type)
self.opcode("LOAD_REG_"+t, 2)
self.opcode("PUSH")
self._gen_var_read(node[0], scope, t)
self.opcode("LOAD_REG_"+t, 3)
if t == "REF": t = ""
else: t = "_"+t
self.opcode(op+t)
self._gen_var_write(node[0], scope, self._get_optype(node.type))
if node.mode != "=":
self.opcode("POP");
self.opcode("POP", comment="finish assignment")
def ForLoopNode(self, node, scope, emit, tlevel):
self.blocklevel += 1
handle_nodescope_pre(node, scope)
for c in node.children:
emit(c, scope)
handle_nodescope_pre(node, scope)
self.blocklevel -= 1
def WhileNode(self, node, scope, emit, tlevel):
self.blocklevel += 1
for c in node.children:
emit(c, scope)
self.blocklevel -= 1
def DoWhileNode(self, node, scope, emit, tlevel):
self.blocklevel += 1
for c in node.children:
emit(c, scope)
self.blocklevel -= 1
def ElseNode(self, node, scope, emit, tlevel):
self.blocklevel += 1
for c in node.children:
emit(c, scope)
self.blocklevel -= 1
def IfNode(self, node, scope, emit, tlevel):
self.opcode("PUSH", comment="---begin if")
emit(node[0], scope)
t = self._get_optype(node[0].type)
if t != "INT":
if t == "FLOAT":
self.opcode("FLOAT_TO_INT")
else:
self.opcode("UNDEFINED_TO_ZERO_INT")
self.opcode("LOAD_REG_"+t, 5)
self.opcode("POP")
self.opcode("SHORTJMPFALSE_REG", -1, "if")
jmpcode1 = len(self.codes)-1
self.blocklevel += 1
self.stacklevels.append(self.stacklevels[-1])
emit(node[1], scope)
self.stacklevels.pop(-1)
self.opcode("SHORTJMP", -1, "endif")
jmpcode3 = len(self.codes)-1
if len(node) == 3:
self.stacklevels.append(self.stacklevels[-1])
self.codes[jmpcode1].arg = [5, len(self.codes)-jmpcode1-1]
emit(node[2], scope)
self.stacklevels.pop(-1)
else:
self.codes[jmpcode1].arg = [5, len(self.codes)-jmpcode1-1]
self.codes[jmpcode3].arg = len(self.codes)-jmpcode3-1
self.blocklevel -= 1
def stacklevel(self, func_local=True):
if len(self.stacklevels) > 0:
if func_local:
return self.stacklevels[-1]
else:
lvl = 0
for sl in self.stacklevels:
lvl += sl
return lvl
else:
return 0
def FuncCallNode(self, node, scope, emit, tlevel):
#there are two cases here. one is calling a function
#that is the result of an expression (e.g. member lookup),
#the other is calling a named function, which pushes
#the value itself.
func = node.type
if len(node[1]) != len(func.children[0]):
typespace.error("Wrong number of function arguments", node);
for i, a in enumerate(func[0]):
a2 = node[1][i]
nt = a2.type
if type(a2) == FuncCallNode:
nt = a2.type.type
if type(nt) == IdentNode:
nt = TypeRefNode(nt.val)
if not types_match(a.type, nt, typespace):
typespace.error("Wrong type for argument %i."%(i+1), node);
#XXX REMEMBER TO RELINK THIS!
self.opcode("LOAD_OPCODE_PTR", -2, "return address")
jmpcode = len(self.codes)-1
for a in node[1]:
self.opcode("PUSH")
emit(a, scope)
if type(node[0]) == IdentNode:
self.opcode("LOAD_REG_SYMBOL_PTR", [node[0].val, 0]);
else:
self.opcode("PUSH")
emit(node[0], scope)
self.opcode("LOAD_REG_PTR", 0);
self.opcode("POP")
if func.is_native:
self.opcode("NATIVE_CALL", func.name)
else:
self.opcode("LONGJMP", 0, "call %s"%func.name);
self.codes[jmpcode].arg = len(self.codes)
#decrement locals offset.
#we do this here since the
#called function, not the caller,
#pops the arguments.
self.stacklevels[-1] -= len(func[0])
def FunctionNode(self, node, scope, emit, tlevel):
if node.is_native:
node.opcode_addr = -1
return
node.opcode_addr = len(self.codes)
self.blocklevel += 1
handle_nodescope_pre(node, scope)
self.funcstack.append(node)
self.stacklevels.append(0)
node.stack_start = self.stacklevel()
node.arg_codetypes = odict()
args = list(range(len(node.args)))
for a in node.arg_is:
args[node.arg_is[a]] = a
node.arglist = args
for i, k in enumerate(node.arglist):
a = node.args[k]
if type(a) == BuiltinTypeNode and a.type == "int":
atype = "INT"
if type(a) == BuiltinTypeNode and a.type == "float":
atype = "FLOAT"
else:
atype = "REF"
node.stack_start -= 1
node.children[0][i].stack_off = -(len(node.arglist)-i)
a.stack_off = -(len(node.arglist)-i);
a.local = True
if type(a) == VarDeclNode:
a.modifiers.add("local")
if "global" in a.modifiers:
a.modifiers.remove("global")
scope[k] = a
print(scope)
self.opcode("PUSH", comment="start " + node.name)
for c in node.children[1:]:
emit(c, scope)
self.opcode("POP")
if self.codes[-2].code != "LONGJMP":
while self.stacklevels[-1] > 0:
self.opcode("POP")
for a in node.arglist:
self.opcode("POP", comment=a)
#this is the undefined return case, so push a null ret value
self.opcode("LOAD_REG_PTR", 0)
self.opcode("POP"); self.opcode("PUSH_UNDEFINED")
self.opcode("LONGJMP", 0, comment="return from "+node.name)
handle_nodescope_post(node, scope)
self.stacklevels.pop(-1)
self.funcstack.pop(-1)
self.blocklevel -= 1
def _get_optype(self, node, add_u=False):
if type(node) == NumLitNode:
if type(node.val) == int: s = "INT"
elif type(node.val) == float: s = "FLOAT"
elif type(node) == BuiltinTypeNode and node.type == "int":
s = "INT"
elif type(node) == BuiltinTypeNode and node.type == "float":
s = "FLOAT"
else:
s = "REF"
if add_u and s != "REF":
s = "_" + s
return s
def ReturnNode(self, node, scope, emit, tlevel):
if len(node) > 0:
self.opcode("PUSH");
emit(node[0], scope)
func = self.funcstack[-1]
ntype = self._get_optype(self.funcstack[-1].type)
self.opcode("LOAD_REG_" + ntype, 1)
while self.stacklevels[-1] > 0:
self.opcode("POP")
for a in func.arglist:
self.opcode("POP", comment=a)
self.opcode("LOAD_REG_PTR", 0)
self.opcode("LOAD_FROM_REG", 1)
self.opcode("LONGJMP", 0, comment="return from "+func.name)
def WithNode(self, node, scope, emit, tlevel):
handle_nodescope_pre(node, scope)
for c in node.children:
emit(c, scope)
handle_nodescope_pre(node, scope)
def StatementList(self, node, scope, emit, tlevel):
for c in node.children:
emit(c, scope)
from js_opcode_exec import *
def link_symbols(codes):
for c in codes:
if c.code == "LOAD_REG_SYMBOL_PTR":
func = typespace.functions[c.arg[0]]
reg = c.arg[1]
c.code = "LOAD_REG_PTR_CONST"
c.arg = [c.arg[1], func.opcode_addr]
def code_to_int(codes):
for c in codes:
c.type = opcode_map[c.code]
def gen_opcode(node, typespace2):
global typespace
combine_if_else_nodes(node)
typespace = typespace2
resolve_types(node, typespace2)
visit = TypeEmitVisit()
visit.traverse(node, None)
link_symbols(visit.codes)
code_to_int(visit.codes)
i = 0
for c in visit.codes:
if glob.g_debug_opcode:
print("%03d %s" % (i, c))
c.i = i
i += 1
from js_opcode_emit import Interpretor
machine = Interpretor()
machine.run_function(visit.codes, typespace.functions["main"], [1, 2.0])
def gen_opcode_files(rootnodes, typespace):
pass
if __name__ == "__main__":
lines = ["%s: %d"%(k, opcode_map[k]) for k in opcode_map]
lines.sort()
for l in lines:
print(l)
sys.exit()
```
#### File: tools/extjs_cc/js_opcode_exec.py
```python
import traceback, sys
from js_opcode_emit import MAX_REGISTER
from js_global import glob
opcode_map = {}
code = 0
def _gen_code():
global code
code += 1
return code - 1
opcode_map["PUSH"] = _gen_code()
opcode_map["POP"] = _gen_code()
opcode_map["PUSH_UNDEFINED"] = _gen_code()
opcode_map["LOAD_FROM_REG"] = _gen_code()
opcode_map["LOAD_LOCAL_STACK"] = _gen_code()
opcode_map["WRITE_LOCAL_STACK"] = _gen_code()
opcode_map["LOAD_REG_REF"] = _gen_code()
opcode_map["LOAD_REG_INT"] = _gen_code()
opcode_map["LOAD_REG_PTR"] = _gen_code()
opcode_map["LOAD_REG_EXTERN_PTR"] = _gen_code()
opcode_map["LOAD_REG_UNDEFINED"] = _gen_code()
opcode_map["LOAD_REG_FLOAT"] = _gen_code()
opcode_map["LOAD_SYMBOL_PTR"] = _gen_code()
opcode_map["LOAD_SYMBOL_INT"] = _gen_code()
opcode_map["NATIVE_CALL"] = _gen_code()
opcode_map["LOAD_SYMBOL_FLOAT"] = _gen_code()
opcode_map["WRITE_SYMBOL_REF"] = _gen_code()
opcode_map["WRITE_SYMBOL_INT"] = _gen_code()
opcode_map["WRITE_SYMBOL_FLOAT"] = _gen_code()
opcode_map["LOAD_REG_SYMBOL_PTR"] = _gen_code()
opcode_map["LOAD_OPCODE_PTR"] = _gen_code()
opcode_map["WRITE_REG_INT"] = _gen_code()
opcode_map["WRITE_REG_FLOAT"] = _gen_code()
opcode_map["WRITE_REG_REF"] = _gen_code()
opcode_map["WRITE_REG_PTR"] = _gen_code()
opcode_map["LOAD_REF"] = _gen_code()
opcode_map["WRITE_REF"] = _gen_code()
opcode_map["LOAD_CONST_REF"] = _gen_code()
opcode_map["LOAD_CONST_INT"] = _gen_code()
opcode_map["LOAD_CONST_FLOAT"] = _gen_code()
opcode_map["INT_TO_FLOAT"] = _gen_code()
opcode_map["UNDEFINED_TO_ZERO_INT"] = _gen_code()
opcode_map["FLOAT_TO_INT"] = _gen_code()
opcode_map["LOAD_FLOAT"] = _gen_code()
opcode_map["WRITE_REF_LOCAL"] = _gen_code()
opcode_map["WRITE_INT_LOCAL"] = _gen_code()
opcode_map["WRITE_FLOAT_LOCAL"] = _gen_code()
opcode_map["LOAD_REF_LOCAL"] = _gen_code()
opcode_map["LOAD_INT_LOCAL"] = _gen_code()
opcode_map["LOAD_FLOAT_LOCAL"] = _gen_code()
opcode_map["PUSH_REF"] = _gen_code()
opcode_map["SHORTJMP"] = _gen_code()
opcode_map["SHORTJMPTRUE"] = _gen_code()
opcode_map["SHORTJMPTRUE_REG"] = _gen_code()
opcode_map["SHORTJMPFALSE"] = _gen_code()
opcode_map["SHORTJMPFALSE_REG"] = _gen_code()
opcode_map["LONGJMP"] = _gen_code()
opcode_map["PUSHTRY"] = _gen_code()
opcode_map["POPTRY"] = _gen_code()
opcode_map["THROW"] = _gen_code()
opcode_map["INT_TO_FLOAT"] = _gen_code()
opcode_map["FLOAT_TO_INT"] = _gen_code()
opcode_map["ARRAY_REF"] = _gen_code()
opcode_map["ARRAY_SET"] = _gen_code()
opcode_map["ADD_INT"] = _gen_code()
opcode_map["SUB_INT"] = _gen_code()
opcode_map["MUL_INT"] = _gen_code()
opcode_map["DIV_INT"] = _gen_code()
opcode_map["MOD_INT"] = _gen_code()
opcode_map["BITINV"] = _gen_code()
opcode_map["BITAND"] = _gen_code()
opcode_map["BITOR"] = _gen_code()
opcode_map["BITXOR"] = _gen_code()
opcode_map["LSHIFT"] = _gen_code()
opcode_map["RSHIFT"] = _gen_code()
opcode_map["NEGATE"] = _gen_code()
opcode_map["ADD_FLOAT"] = _gen_code()
opcode_map["SUB_FLOAT"] = _gen_code()
opcode_map["MUL_FLOAT"] = _gen_code()
opcode_map["DIV_FLOAT"] = _gen_code()
opcode_map["MOD_FLOAT"] = _gen_code()
opcode_map["LTHAN_INT"] = _gen_code()
opcode_map["GTHAN_INT"] = _gen_code()
opcode_map["LTHANEQ_INT"] = _gen_code()
opcode_map["GTHANEQ_INT"] = _gen_code()
opcode_map["EQ_INT"] = _gen_code()
opcode_map["NOTEQ_INT"] = _gen_code()
opcode_map["NOT_INT"] = _gen_code()
opcode_map["LTHAN_FLOAT"] = _gen_code()
opcode_map["GTHAN_FLOAT"] = _gen_code()
opcode_map["LTHANEQ_FLOAT"] = _gen_code()
opcode_map["GTHANEQ_FLOAT"] = _gen_code()
opcode_map["EQ_FLOAT"] = _gen_code()
opcode_map["NOTEQ_FLOAT"] = _gen_code()
opcode_map["AND"] = _gen_code()
opcode_map["OR"] = _gen_code()
opcode_map["ADD"] = _gen_code()
opcode_map["SUB"] = _gen_code()
opcode_map["MUL"] = _gen_code()
opcode_map["DIV"] = _gen_code()
opcode_map["IN"] = _gen_code()
opcode_map["LOAD_REG_PTR_CONST"] = _gen_code()
rev_opcode_map = {}
for k in opcode_map:
rev_opcode_map[opcode_map[k]] = k
class StackItem:
def __init__(self, value=None):
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return str(self)
class Object:
def __init__(self):
self.init = None
self.type_name = ""
self.methods = {}
self.properties = {}
self.child_classes = []
self.class_parent = None
def __str__(self):
return "(obj)"
def __repr__(self):
return str(self)
class UndefinedType(Object):
def __str__(self):
return "None"
Undefined = UndefinedType()
def do_print(machine, string):
print("print:", str(string))
def do_fstr(machine, f):
return str(f)
do_print.totarg = 1
do_fstr.totarg = 1
class Interpretor:
def __init__(self):
self.functions = {"print" : do_print, "fstr" : do_fstr} #native functions
self.globals = {}
self.stack = [StackItem()]
self.code = []
self.cur = 0
self.registers = [Undefined for x in range(MAX_REGISTER)]
self.error = 0
self.trystack = []
self.opfuncs = [0 for x in range(len(opcode_map)+2)]
for k in opcode_map:
if hasattr(self, k):
self.opfuncs[opcode_map[k]] = getattr(self, k)
def reset(self):
self.cur = 0
self.registers = [Undefined for x in range(MAX_REGISTER)]
self.stack = [StackItem()]
def run_function(self, code, funcnode, args):
self.reset()
self.code = code
self.stack.append(StackItem(-1))
for a in args:
self.stack.append(StackItem(a))
self.run(code, funcnode.opcode_addr, do_reset=False)
def run(self, code, entry, do_reset=True):
limit = 500
if do_reset:
self.reset()
self.code = code;
self.cur = entry;
print("\n")
print("starting stack:")
st = self.stack[:]
st.reverse()
for s in st:
print(" " + str(s.value))
print("\n")
def rev(lst):
l = lst[:]
l.reverse()
return str(l)
i = 0
code = self.code
while i < limit:
c = code[self.cur]
self.cur += 1
try:
self.opfuncs[c.type](c.arg)
except:
if glob.g_debug_opcode:
print("%03d %d %s %s | %s %s"%(c.i, c.code, str(c.arg), rev(self.stack[-4:len(self.stack)]), str(self.registers)))
traceback.print_stack()
traceback.print_exc()
sys.exit(-1)
if glob.g_debug_opcode:
print("%03d %s %s | %s %s"%(c.i, c.code, str(c.arg), rev(self.stack[-4:len(self.stack)]), str(self.registers)))
if self.cur < 0: break
i += 1
print("\n")
print("finished", i)
def PUSH(self, args=None):
self.stack.append(StackItem())
def POP(self, args=None):
return self.stack.pop(-1)
def PUSH_UNDEFINED(self, args):
self.stack.push(StackItem(Undefined))
def LOAD_FROM_REG(self, args):
self.stack[-1].value = self.registers[args]
def LOAD_LOCAL_STACK(self, args):
#print(self.stack)
self.stack[-1].value = self.stack[args].value
def WRITE_LOCAL_STACK(self, args):
self.stack[args].value = self.stack[-1].value
def LOAD_REG_PTR_CONST(self, args):
self.registers[args[0]] = args[1]
def LOAD_REG_REF(self, args):
self.registers[args] = self.stack[-1].value
def LOAD_REG_INT(self, args):
self.registers[args] = self.stack[-1].value
def LOAD_REG_PTR(self, args):
self.registers[args] = self.stack[-1].value
def LOAD_REG_EXTERN_PTR(self, args):
raise RuntimeError("Opcode not fully processed")
def LOAD_REG_UNDEFINED(self, args):
self.registers[args] = Undefined
def LOAD_REG_FLOAT(self, args):
self.registers[args] = self.stack[-1].value
def LOAD_SYMBOL_PTR(self, args):
raise RuntimeError("Opcode not fully processed")
def LOAD_SYMBOL_INT(self, args):
raise RuntimeError("Opcode not fully processed")
def NATIVE_CALL(self, fname):
args = []
totarg = self.functions[fname].totarg
for i in range(self.functions[fname].totarg):
args.append(self.POP(None).value)
ret = self.functions[fname](self, *args)
#return to calling code, with value ret
self.LOAD_REG_PTR(0) #save return value
self.stack[-1].value = ret
self.LONGJMP(0)
def LOAD_SYMBOL_FLOAT(self, args):
raise RuntimeError("Incomplete opcode")
def WRITE_SYMBOL_REF(self, args):
raise RuntimeError("Incomplete opcode")
def WRITE_SYMBOL_INT(self, args):
raise RuntimeError("Incomplete opcode")
def WRITE_SYMBOL_FLOAT(self, args):
raise RuntimeError("Incomplete opcode")
def LOAD_REG_SYMBOL_PTR(self, args):
self.registers[args] = self.stack
def LOAD_OPCODE_PTR(self, args):
self.stack[-1].value = args
def LOAD_REF(self, args):
self.stack[-1].value = args
def LOAD_CONST_REF(self, args):
self.stack[-1].value = args
def LOAD_CONST_INT(self, args):
self.stack[-1].value = args
def LOAD_CONST_FLOAT(self, args):
self.stack[-1].value = args
def INT_TO_FLOAT(self, args):
self.stack[-1].value = float(self.stack[-1].value)
def UNDEFINED_TO_ZERO_INT(self, args):
self.stack[-1].value = 0
def FLOAT_TO_INT(self, args):
self.stack[-1].value = int(self.stack[-1].value)
def PUSH_REF(self, args):
self.stack.append(StackItem(args))
def SHORTJMP(self, args):
self.cur += args
def SHORTJMPTRUE(self, args):
if self.stack[-1] not in [0, None, Undefined]:
self.cur += args
def SHORTJMPTRUE_REG(self, args):
print(self.stack[-1])
if self.registers[arg[0]] not in [0, None, Undefined]:
self.cur += args[1]
def SHORTJMPFALSE(self, args):
if self.stack[-1] in [0, None, Undefined]:
self.cur += args
def SHORTJMPFALSE_REG(self, args):
if self.registers[args[0]] in [0, None, Undefined]:
self.cur += args[1]
def LONGJMP(self, args):
self.cur = self.registers[args]
def PUSHTRY(self, args):
self.trystack.append(args)
def POPTRY(self, args):
self.trystack.pop()
def THROW(self, args):
self.throw_error(args)
def ARRAY_REF(self, args):
pass
def ARRAY_SET(self, args):
pass
def ADD_INT(self, args):
self.stack[-1].value = int(self.registers[2] + self.registers[3])
def SUB_INT(self, args):
self.stack[-1].value = int(self.registers[2] - self.registers[3])
def MUL_INT(self, args):
self.stack[-1].value = int(self.registers[2] * self.registers[3])
def DIV_INT(self, args):
self.stack[-1].value = int(self.registers[2] / self.registers[3])
def MOD_INT(self, args):
self.stack[-1].value = int(self.registers[2] % self.registers[3])
def BITINV(self, args):
pass
def BITAND(self, args):
self.stack[-1].value = self.registers[2] & self.registers[3]
def BITOR(self, args):
self.stack[-1].value = self.registers[2] | self.registers[3]
def BITXOR(self, args):
self.stack[-1].value = self.registers[2] ^ self.registers[3]
def LSHIFT(self, args):
self.stack[-1].value = self.registers[2] << self.registers[3]
def RSHIFT(self, args):
self.stack[-1].value = self.registers[2] >> self.registers[3]
def NEGATE(self, args):
pass
def ADD_FLOAT(self, args):
self.stack[-1].value = self.registers[2] + self.registers[3]
def SUB_FLOAT(self, args):
self.stack[-1].value = self.registers[2] - self.registers[3]
def MUL_FLOAT(self, args):
self.stack[-1].value = self.registers[2] * self.registers[3]
def DIV_FLOAT(self, args):
self.stack[-1].value = self.registers[2] / self.registers[3]
def MOD_FLOAT(self, args):
self.stack[-1].value = self.registers[2] % self.registers[3]
def LTHAN_INT(self, args):
self.stack[-1].value = self.registers[2] < self.registers[3]
def GTHAN_INT(self, args):
self.stack[-1].value = self.registers[2] > self.registers[3]
def LTHANEQ_INT(self, args):
self.stack[-1].value = self.registers[2] <= self.registers[3]
def GTHANEQ_INT(self, args):
self.stack[-1].value = self.registers[2] >= self.registers[3]
def EQ_INT(self, args):
self.stack[-1].value = self.registers[2] == self.registers[3]
def NOTEQ_INT(self, args):
self.stack[-1].value = self.registers[2] != self.registers[3]
def NOT_INT(self, args):
pass
def LTHAN_FLOAT(self, args):
self.stack[-1].value = self.registers[2] < self.registers[3]
def GTHAN_FLOAT(self, args):
self.stack[-1].value = self.registers[2] > self.registers[3]
def LTHANEQ_FLOAT(self, args):
self.stack[-1].value = self.registers[2] <= self.registers[3]
def GTHANEQ_FLOAT(self, args):
self.stack[-1].value = self.registers[2] >= self.registers[3]
def EQ_FLOAT(self, args):
self.stack[-1].value = self.registers[2] == self.registers[3]
def NOTEQ_FLOAT(self, args):
self.stack[-1].value = self.registers[2] != self.registers[3]
def AND(self, args):
self.stack[-1].value = self.registers[2] and self.registers[3]
def OR(self, args):
self.stack[-1].value = self.registers[2] or self.registers[3]
def ADD(self, args):
pass
def SUB(self, args):
pass
def MUL(self, args):
pass
def DIV(self, args):
pass
def IN(self, args):
pass
```
#### File: tools/extjs_cc/js_parse_perfstatic.py
```python
import ply.yacc as yacc
import sys, os, os.path
import traceback
# Get the token map from the lexer. This is required.
from js_global import glob
from js_ast import *
from js_lex import tokens, StringLit, HexInt
from ply.lex import LexToken, Lexer
#AST nodes that are used in intermediate stages of parsing,
#but are NEVER EVER in the final AST tree.
from js_parser_only_ast import *
from js_process_ast_parser_only import *
from js_parse import *
"""
This is a special "mode" that changes
the syntax to a statically-typed language,
optimized and checked for writing high-performance
code, but can still compile into JavaScript
"""
def p_statementlist(p):
''' statementlist : statement
| statement_nonctrl
| statementlist statement
| statementlist statement_nonctrl
|
'''
set_parse_globals(p);
if len(p) == 1:
p[0] = StatementList()
elif len(p) == 2:
n = StatementList()
n.add(p[1])
p[0] = n
elif len(p) == 3:
if type(p[1]) != StatementList:
p[0] = StatementList()
p[0].add(p[1])
p[0].add(p[2])
else:
p[0] = p[1]
if p[2] != None:
p[0].add(p[2])
def p_class(p):
'''class : CLASS ID template_opt class_tail'''
set_parse_globals(p)
tail = p[4]
heritage = tail[0]
cls = ClassNode(p[2], heritage)
for n in tail[1]:
cls.add(n)
p[0] = cls
if p[3] != None:
p[0].template = p[3]
def p_exprclass(p):
'''exprclass : CLASS id_opt class_tail'''
set_parse_globals(p)
tail = p[3]
heritage = tail[0]
if p[2] == None:
p[2] = "(anonymous)"
cls = ClassNode(p[2], heritage)
for n in tail[1]:
cls.add(n)
p[0] = expand_harmony_class(cls)
def p_class_tail(p):
'''class_tail : class_heritage_opt LBRACKET class_body_opt RBRACKET'''
set_parse_globals(p)
p[0] = [p[1], p[3]]
for i in range(2):
if p[0][i] == None:
p[0][i] = []
def p_class_list(p):
'''class_list : var_type
| class_list COMMA var_type
'''
set_parse_globals(p)
if len(p) == 2:
p[0] = [p[1]];
else:
p[0] = p[1];
if type(p[0]) != list:
p[0] = [p[0]]
p[0].append(p[3])
def p_class_heritage(p):
'''class_heritage : EXTENDS class_list'''
set_parse_globals(p)
p[0] = p[2]
def p_class_heritage_opt(p):
'''class_heritage_opt : class_heritage
|
'''
set_parse_globals(p)
if len(p) == 2:
p[0] = p[1]
def p_class_body_opt(p):
'''class_body_opt : class_element_list
|
'''
set_parse_globals(p)
if len(p) == 1:
p[0] = []
else:
p[0] = p[1]
if p[0] == None:
p[0] = []
def p_class_element_list(p):
'''class_element_list : class_element
| class_element_list class_element
'''
set_parse_globals(p)
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1]
p[0].append(p[2])
def p_class_element(p):
'''class_element : method_def
| STATIC method_def
| class_var
'''
set_parse_globals(p)
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
p[0].is_static = True
def p_class_var(p):
'''class_var : class_vartype ID SEMI
| class_vartype ID ASSIGN expr SEMI
'''
set_parse_globals(p)
p[0] = ClassMember(p[2])
if len(p) == 6:
p[0].add(p[4])
def p_basic_var_type(p):
'''
basic_var_type : BYTE
| INT
| SHORT
| FLOAT
| DOUBLE
| CHAR
'''
p[0] = BuiltinTypeNode(p[1])
def p_var_type2(p):
''' var_type2 : basic_var_type
| ID
| ID template_ref
'''
if len(p) == 2:
if type(p[1]) == str:
p[0] = TypeRefNode(p[1])
else:
p[0] = p[1]
else:
p[0] = TypeRefNode(p[1])
p[0].template = p[2]
def p_class_vartype(p):
'''class_vartype : var_type2
| prop_modifiers var_type2
'''
set_parse_globals(p)
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
p[0].modifiers = p[1]
def p_prop_modifiers(p):
'''prop_modifiers : type_modifiers UNSIGNED
| type_modifiers SIGNED
| type_modifiers CONST
| STATIC
| UNSIGNED
| CONST
|
'''
set_parse_globals(p)
if len(p) == 2:
p[0] = set([p[1]])
else:
p[0] = p[1]
p[0].add(p[2])
def p_method(p):
'''method : ID LPAREN funcdeflist RPAREN func_type_opt LBRACKET statementlist_opt RBRACKET'''
set_parse_globals(p)
name = p[1]
params = p[3]
statementlist = p[7]
if statementlist == None:
statementlist = StatementList()
p[0] = MethodNode(name)
p[0].add(params)
p[0].add(statementlist)
if p[5] != None:
p[0].type = p[5]
def p_method_def(p):
#I don't want to make get/set exclusive parse tokens,
#so I'm going to enforce that here in the production function.
'''method_def : method
| ID ID LPAREN RPAREN func_type_opt LBRACKET statementlist_opt RBRACKET
| ID ID LPAREN setter_param_list RPAREN func_type_opt LBRACKET statementlist_opt RBRACKET
'''
set_parse_globals(p)
if len(p) == 2:
p[0] = p[1]
elif p[1] == "get" and len(p) == 9:
name = p[2]
p[0] = MethodGetter(name)
if p[7] == None: p[7] = StatementList()
p[0].add(p[7])
if p[5] != None:
p[0].type = p[5]
elif p[1] == "set" and len(p) == 10:
name = p[2]
p[0] = MethodSetter(name)
p[0].add(p[4])
if p[8] == None: p[8] = StatementList()
p[0].add(p[8])
if p[6] != None:
p[0].type = p[6]
else:
glob.g_error = True
glob.g_error_pre = p
print_err(p, True)
raise SyntaxError("Expected 'get' or 'set'");
def p_setter_param_list(p):
'''
setter_param_list : ID
'''
set_parse_globals(p)
p[0] = ExprListNode([VarDeclNode(ExprNode([]), name=p[1])])
return
_parser = yacc.yacc()
parser = Parser(_parser);
```
#### File: tools/scripts/gen_binomial_tables.py
```python
import gc, sys
from math import *
table = []
def binomial(n, k):
global table
if len(table) > n:
return table[n][k]
if k == 0.0 or k == n:
return 1
return binomial(n-1, k-1) + binomial(n-1, k);
print("\"use strict\";\nexport var binomial_table = [")
if len(sys.argv) > 1:
steps = int(sys.argv[1])
else:
steps = 64;
for i in range(steps):
arr = []
gc.collect()
sys.stderr.write("doing %i\n" % i);
for j in range(i+1):
arr.append(binomial(i, j))
table.append(arr)
add = "," if i != steps-1 else ""
print(" " + str(arr) + add)
print("];\n")
def bernstein(degree, s):
degree = max(floor(degree), 0.0)
half = floor(degree/2.0)
return binomial(degree, half)*pow(s, half)*pow(1.0-s, degree-half);
print("//point at where bernstein basis floor(degree/2), degree is greatest.\n")
print("export var bernstein_offsets = [");
for i in range(steps):
if i < 3:
print(" 0.5,")
continue;
#find points closest to zero as well as maxima
s01 = 0.45 if i > 15 else 0.3;
s02 = 0.55;
s = 0.5
df = 0.0000001
for j in range(254):
b1 = bernstein(i, s)
b2 = bernstein(i, s+df)
b3 = bernstein(i, s+df*2)
d1 = (b2-b1)/df
d2 = (b3-b2)/df
d = (d2-d1)/df
if abs(d) < 0.00001: break
#if b1 != 0.0: b1 = 1/b1
s += -(d1/d)*0.5;
s = min(max(s, 0.0), 1.0)
for k in range(2):
s2 = s01 if k == 0 else s02
b1 = bernstein(i, s2)
b2 = bernstein(i, s2+df)
d = (b2-b1)/df;
#8
if abs(b1) < 0.00001: continue
if abs(d) == 0.0: continue #< 0.0001: break
fac = -(b1/d)*0.52
sys.stderr.write(" %f %f | %f %f\n" % (b1, b2, s01, s02));
if k==0:
s01 += fac
else:
s02 += fac
#print(d1, s)
add = "," if i != steps-1 else ""
print(" [" + str(s01) + ", " + str(s) + ", " + str(s02) + "]"+add)
print("];\n")
``` |
{
"source": "joeeeeey/nameko-tracer",
"score": 2
} |
#### File: nameko-tracer/nameko_tracer/dependency.py
```python
from collections import defaultdict
from datetime import datetime
import logging
import socket
from weakref import WeakKeyDictionary
from nameko.extensions import DependencyProvider
from nameko_tracer import adapters, constants, utils
logger = logging.getLogger(__name__)
class Tracer(DependencyProvider):
""" Entrypoint logging dependency
Logs call and result details about entrypoints fired.
"""
def __init__(self):
self.logger = None
self.adapter_types = defaultdict(lambda: adapters.DefaultAdapter)
self.worker_timestamps = WeakKeyDictionary()
def setup(self):
config = self.container.config.get(constants.CONFIG_KEY, {})
self.configure_adapter_types(constants.DEFAULT_ADAPTERS)
self.configure_adapter_types(
config.get(constants.ADAPTERS_CONFIG_KEY, {}))
self.logger = logging.getLogger(constants.LOGGER_NAME)
def configure_adapter_types(self, adapters_config):
for entrypoint_path, adapter_path in adapters_config.items():
entrypoint_class = utils.import_by_path(entrypoint_path)
adapter_class = utils.import_by_path(adapter_path)
self.adapter_types[entrypoint_class] = adapter_class
def adapter_factory(self, worker_ctx):
adapter_class = self.adapter_types[type(worker_ctx.entrypoint)]
extra = {'hostname': socket.gethostname()}
return adapter_class(self.logger, extra=extra)
def worker_setup(self, worker_ctx):
""" Log entrypoint call details
"""
timestamp = datetime.utcnow()
self.worker_timestamps[worker_ctx] = timestamp
try:
extra = {
'stage': constants.Stage.request,
'worker_ctx': worker_ctx,
'timestamp': timestamp,
}
adapter = self.adapter_factory(worker_ctx)
adapter.info(
'[%s] entrypoint call trace',
worker_ctx.call_id,
extra=extra)
except Exception:
logger.warning('Failed to log entrypoint trace', exc_info=True)
def worker_result(self, worker_ctx, result=None, exc_info=None):
""" Log entrypoint result details
"""
timestamp = datetime.utcnow()
worker_setup_timestamp = self.worker_timestamps[worker_ctx]
response_time = (timestamp - worker_setup_timestamp).total_seconds()
try:
extra = {
'stage': constants.Stage.response,
'worker_ctx': worker_ctx,
'result': result,
'exc_info_': exc_info,
'timestamp': timestamp,
'response_time': response_time,
}
adapter = self.adapter_factory(worker_ctx)
if exc_info:
adapter.warning(
'[%s] entrypoint result trace',
worker_ctx.call_id,
extra=extra)
else:
adapter.info(
'[%s] entrypoint result trace',
worker_ctx.call_id,
extra=extra)
except Exception:
logger.warning('Failed to log entrypoint trace', exc_info=True)
```
#### File: nameko-tracer/nameko_tracer/filters.py
```python
import abc
import logging
import re
from nameko_tracer import constants, utils
class BaseTruncateFilter(logging.Filter, abc.ABC):
default_entrypoints = []
def __init__(self, entrypoints=None, max_len=None):
entrypoints = entrypoints or self.default_entrypoints
self.entrypoints = [re.compile(r) for r in entrypoints]
self.max_len = max_len or 100
def filter(self, log_record):
data = getattr(log_record, constants.TRACE_KEY)
entrypoint_name = data.get(constants.ENTRYPOINT_NAME_KEY)
if any(regex.match(entrypoint_name) for regex in self.entrypoints):
data = self.truncate(data)
setattr(log_record, constants.TRACE_KEY, data)
return log_record
@abc.abstractmethod
def truncate(self, data):
""" Truncate and return the data
"""
class TruncateCallArgsFilter(BaseTruncateFilter):
""" Truncate serialized call arguments
If the truncation is applied, the call data is serialised to string
beforehand.
Example of a filter truncating call arguments of entrypoint methods
starting with "create" or "insert" to the length of 200 characters::
filter = TruncateRequestFilter(
entrypoints=['^create|^insert'], max_len=200)
"""
default_entrypoints = []
def truncate(self, data):
if constants.REQUEST_KEY not in data:
return data
call_args = utils.serialise_to_string(data[constants.REQUEST_KEY])
length = len(call_args)
if length > self.max_len:
data[constants.REQUEST_KEY] = call_args[:self.max_len]
truncated = True
else:
truncated = False
data[constants.REQUEST_TRUNCATED_KEY] = truncated
data[constants.REQUEST_LENGTH_KEY] = length
return data
class TruncateResponseFilter(BaseTruncateFilter):
""" Truncate serialized response data
If the truncation is applied, the call data is serialised to string
beforehand.
Example of a filter truncating return value of entrypoint methods
starting with "get" or "list" to the length of 200 characters::
filter = TruncateResponseFilter(
entrypoints=['^get|^list'], max_len=200)
"""
default_entrypoints = ['^get_|^list_|^query_']
def truncate(self, data):
if constants.RESPONSE_KEY not in data:
return data
result = utils.serialise_to_string(data[constants.RESPONSE_KEY])
length = len(result)
if length > self.max_len:
data[constants.RESPONSE_KEY] = result[:self.max_len]
truncated = True
else:
truncated = False
data[constants.RESPONSE_TRUNCATED_KEY] = truncated
data[constants.RESPONSE_LENGTH_KEY] = length
return data
TruncateRequestFilter = TruncateCallArgsFilter
class HealthCheckTraceFilter(logging.Filter):
def filter(self, record):
try:
return record.worker_ctx.entrypoint.url not in [
"/health-check",
"/health_check",
"/healthcheck",
]
except AttributeError:
return True
```
#### File: nameko-tracer/nameko_tracer/formatters.py
```python
import json
import logging
from functools import partial
from nameko_tracer import constants
def default(obj):
return str(obj)
serialise = partial(json.dumps, default=default)
class JSONFormatter(logging.Formatter):
""" Format trace data as JSON string
"""
def __init__(self, **option):
self.option = option
def format(self, record):
return serialise(getattr(record, constants.TRACE_KEY), **self.option)
PrettyJSONFormatter = partial(JSONFormatter, indent=4, sort_keys=True)
class ElasticsearchDocumentFormatter(JSONFormatter):
""" Format trace as JSON which can be fed to Elasticsearch as a document
Request and response data fields of the document are serialized as JSON
string before serialising the whole output.
"""
extra_serialise_keys = (
constants.CONTEXT_DATA_KEY,
constants.REQUEST_KEY,
constants.RESPONSE_KEY,
constants.EXCEPTION_ARGS_KEY)
def format(self, record):
trace = getattr(record, constants.TRACE_KEY)
for key in self.extra_serialise_keys:
if key in trace:
trace[key] = serialise(trace[key])
return serialise(trace)
``` |
{
"source": "joe-eklund/foreign-subs",
"score": 3
} |
#### File: fsubs/utils/auth.py
```python
import logging
import jwt
from datetime import datetime, timedelta
from fsubs.config.config import Config
LOGGER = logging.getLogger(__name__)
CONFIG = Config()
def create_access_token(*, data: dict, expires_delta: timedelta = None) -> bytes:
"""Create an access token for bearer authentication."""
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, CONFIG['app']['jwt_secret'],
algorithm=CONFIG['app']['jwt_algorithm'])
return encoded_jwt
```
#### File: fsubs/utils/users.py
```python
import binascii
import hashlib
import logging
import os
from typing import Tuple
from fastapi import HTTPException
from fsubs.models.user import Access
LOGGER = logging.getLogger(__name__)
def hash_password(password: str) -> Tuple[str, str]:
"""
Salt and hash the given password.
:param password: The password to salt and hash.
:returns: A tuple of the salt and hashed password both encoded in ascii.
"""
LOGGER.debug('Hashing password with password...ha you wish.')
salt = os.urandom(32)
salt_ascii = binascii.hexlify(salt).decode().encode('ascii')
key = hashlib.pbkdf2_hmac(
'sha256',
password.encode('utf-8'),
salt_ascii,
100000,
)
return salt_ascii.decode(), binascii.hexlify(key).decode()
def verify_password(password: str, salt: str, key: str) -> bool:
"""
Verify the given password against the given salt and key.
:param password: <PASSWORD> check.
:param salt: The salt to use. Should be encoded in ascii.
:param key: The key to use. Should be encoded in ascii.
:returns: True if given a valid password, False otherwise.
"""
LOGGER.debug("Verifying password.")
new_key = hashlib.pbkdf2_hmac(
'sha256',
password.encode('utf-8'),
salt.encode('ascii'),
100000
)
return binascii.hexlify(new_key).decode() == key
async def check_access(
user: dict,
username: str,
obj_to_check: dict = None,
level: Access = Access.basic,):
"""
Check that the given user has at least the given access level.
:param user: The user object to check.
:param username: The username of the user. Useful for error message if the user read resulted
in `None`.
:param obj_to_check: If supplied, and the given user matches the creator of the object, then
allow access even if the user doesn't have the required level.
:param level: The access level required.
:raises HTTPException: If no user was supplied or if the user doesn't have the required access.
"""
LOGGER.info(f'Checking {username} has at least {level} access.')
if not user:
raise HTTPException(
status_code=500,
detail=f'Unable to get user data for: {username}. Cannot proceed with action.')
if obj_to_check and obj_to_check.get('metadata', {}).get('created_by') == username:
return
if not user.access >= level:
raise HTTPException(
status_code=403,
detail=f'User {username} does not have at least level {level.name} to perform action.')
``` |
{
"source": "JoeEmp/performanc_testing_field",
"score": 3
} |
#### File: performanc_testing_field/com/pe_database.py
```python
from sqlalchemy import create_engine, event, MetaData
from sqlalchemy.ext.declarative import declarative_base
# Base = declarative_base()
class Base():
pass
def pymysql_patch():
# 这里作恶一下
# 已知我们使用的是pymysql,我们修改一下默认游标为字典游标.
# Connection.cursor(cursor: Type[Cursor])
# Create a new cursor to execute queries with.
# :param cursor: The type of cursor to create;
# one of Cursor, SSCursor, DictCursor, or SSDictCursor. None means use Cursor.
from pymysql.cursors import Cursor, DictCursor
Cursor = DictCursor
ENGINE = create_engine(
"mysql+pymysql://root:123456@localhost:3306/pe_test?charset=utf8mb4",
echo=False, isolation_level="READ UNCOMMITTED")
metadata = MetaData()
pymysql_patch()
class MyTable():
def __init__(self):
self.table = None
def create(self):
self.table.create(ENGINE, checkfirst=True)
@property
def name(self):
return self.table.name
def insert(self, *args, **kwargs):
return self.table.insert()
```
#### File: performanc_testing_field/dbmodules/base.py
```python
from sqlalchemy import text, event, delete, select, update, desc
from sqlalchemy.engine.base import Connection
from sqlalchemy.engine.result import ResultProxy, RowProxy
from com.pe_database import ENGINE
import logging
from datetime import datetime
from contextlib import contextmanager
def transaction(func):
def wrapper(self, *args, **kwargs):
try:
conn = self.connect()
with conn.begin():
return func(self, conn, *args, **kwargs)
except Exception as e:
logging.error(e)
return wrapper
class BaseDBServer():
option = {
"eq": '=',
"ne": '!=',
'lt': '<',
'gt': ">",
'le': '<=',
'ge': '>=',
'1': 'desc',
'0': 'asc'
}
@staticmethod
def connect() -> Connection:
return ENGINE.connect()
@staticmethod
def row2json(row: RowProxy) -> dict:
"""deal datetime to json error. """
keys, values = row.keys(), row.values()
new_row = {}
try:
for i in range(len(keys)):
if isinstance(values[i], datetime):
values[i] = datetime.strftime(
values[i], '%Y-%m-%d %H:%M:%S')
new_row[keys[i]] = values[i]
except Exception as e:
logging.error(e)
return new_row
@ staticmethod
def row2dict(orm_row):
d = {}
for key, value in orm_row.__dict__.items():
if '_sa_instance_state' == key:
continue
if isinstance(value, datetime.datetime):
value = value.strftime('%Y-%m-%d %H:%M:%S')
d[key] = value
return d
def sql_execute(self, sql, param=None, conn=None) -> ResultProxy:
if not isinstance(sql, tuple) and not isinstance(sql, list):
sql = [sql]
if param and not isinstance(param, tuple) and not isinstance(param, list):
param = [param]
if not conn:
conn = self.connect()
for i in range(len(sql)):
if param:
ret = conn.execute(text(sql[i]).params(param[i]))
else:
ret = conn.execute(text(sql[i]))
return ret
@transaction
def sql_transaction_execute(self, conn, sql, param=None) -> ResultProxy:
return self.sql_execute(sql, param, conn)
class CoreDBServer(BaseDBServer):
"""可进行简单的单表事务,和多表查询.
https://www.osgeo.cn/sqlalchemy/core/dml.html"""
def __init__(self, table=None):
self.table = table
@transaction
def dml_transaction(self, conn, dml_obj, *args) -> ResultProxy:
return self.dml_execute(dml_obj, conn, *args)
def dml_execute(self, dml_obj, conn=None, data_list=[], *args, **kwargs) -> ResultProxy:
try:
if not conn:
conn = self.connect()
if data_list:
return conn.execute(dml_obj, data_list)
else:
return conn.execute(dml_obj)
except Exception as e:
logging.error(e)
def insert(self, *args, **kwargs) -> ResultProxy:
insert_dml = self.table.insert().values(**kwargs)
return self.dml_transaction(dml_obj=insert_dml)
def many_insert(self, data_list) -> ResultProxy:
insert_dml_temp = self.table.insert()
return self.dml_execute(dml_obj=insert_dml_temp, data_list=data_list)
def clear(self) -> ResultProxy:
return self.dml_execute(delete(self.table))
def update(self, wheres=[], table=None, **values):
table = table or self.table
where_texts = []
for w in wheres:
where_texts.append("%s %s %r" % (w[0], self.option[w[1]], w[2]))
wt = ' AND '.join(where_texts)
upt = table.update().where(text(wt)).values(**values)
return self.dml_execute(upt, is_transaction=True)
def select(self, wheres=[], sorts_by=[], offset=0, limit=1, tables=None) -> ResultProxy:
"""注意,过滤均为and. """
s = select(tables or [self.table])
s = s.offset(offset).limit(limit)
for w in wheres:
s = s.where(text("%s %s %r" % (w[0], self.option[w[1]], w[2])))
for sort in sorts_by:
if '1' == sort[1]:
s = s.order_by(desc(sort[0]))
elif '0' == sort[1]:
s = s.order_by(sort[0])
logging.debug(s)
return self.connect().execute(s)
```
#### File: JoeEmp/performanc_testing_field/env_init.py
```python
import unittest
import os
import logging
from dbmodules.base import BaseDBServer, CoreDBServer
from dbmodules.good import Good, GoodTable
from dbmodules.user import Users, UserTable
from dbmodules.order import Order, OrderTable
from faker import Faker
from com.pe_encrypt import md5_text
from faker.providers import BaseProvider
from random import choice, seed
import pymysql
from settings import *
from com.pe_database import metadata, ENGINE
root_dir = os.path.abspath(os.path.dirname(__file__))
log_file = os.path.join(root_dir, 'env_init.log')
user_data_file = os.path.join(root_dir, './tests/user.txt')
good_data_file = os.path.join(root_dir, './tests/good.txt')
db_file = os.path.join(root_dir, "petest.db")
seed(0)
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s %(levelname)s %(filename)s %(funcName)s %(message)s',
filename=log_file
)
class GoodProvider(BaseProvider):
tags = ['', '低过老罗', '工厂价', '全网最低', ]
names = ['肯尼亚AA水洗', '耶加雪菲水洗', '智能水壶', '小米手机', 'iPhone',
'星际争霸2数字典藏版', '飞鹤奶粉', 'MacbookAir M1', '蜜桃猫手机壳'
'星空', '蒙娜丽莎', '伏尔加河上的纤夫', '马拉之死', '这个需求做不了']
age = ['2020款', '2021款', '2022款', '']
def good_name(self):
good = choice(self.age)+" "+choice(self.names) + " "+choice(self.tags)
return good.strip(' ')
def create_schema():
try:
db = pymysql.connect("localhost", "root", "123456")
db.cursor().execute("CREATE SCHEMA `%s` DEFAULT CHARACTER SET utf8mb4 ;" % MYSQL_SCHEMA)
db.commit()
except Exception as e:
print(e)
db.rollback()
db.close()
def set_time_zone():
# 修正时区
try:
db = pymysql.connect("localhost", "root", "123456")
db.cursor().execute("set global time_zone = '+8:00';")
db.commit()
except Exception as e:
print(e)
db.rollback()
db.close()
class create_db_testcase(unittest.TestCase):
@classmethod
def setUpClass(cls):
create_schema()
set_time_zone()
def setUp(self):
suffix = self._testMethodName.split('_')[-2]
if 'user' == suffix.lower():
self.table = UserTable
elif 'good' == suffix.lower():
self.table = GoodTable
elif 'order' == suffix.lower():
self.table = OrderTable
self.table.drop(ENGINE, checkfirst=True)
self.table.create(ENGINE, checkfirst=True)
self.db = CoreDBServer(self.table)
# @unittest.skip('skip')
def test_create_user_table(self):
values = {"username": 'test_user', "password": '<PASSWORD>'}
self.db.insert(**values)
# @unittest.skip('skip')
def test_create_good_table(self):
values = {'name': 'test_good'}
self.db.insert(**values)
# @unittest.skip('skip')
def test_create_order_table(self):
values = {'username': 'lilei',
'order_no': '202001250159591234', 'good_ids': '[1,2]'}
self.db.insert(**values)
def tearDown(self):
self.db.clear()
return super().tearDown()
@classmethod
def tearDownClass(cls):
return super().tearDownClass()
class init_faker_data():
def __init__(self):
self.fake = Faker('zh-CN')
self.fake.add_provider(GoodProvider)
Faker.seed(0)
def add_user_data(self):
self.table = CoreDBServer(UserTable)
data_list = []
with open(user_data_file, 'w') as f:
name_set = set([])
for _ in range(10000):
while True:
cur_len = len(name_set)
username, password = self.fake.email(), self.fake.password()
name_set.add(username)
if len(name_set) > cur_len:
break
data_list.append(
{"username": username, "password": <PASSWORD>)
}
)
f.write(username+','+password+os.linesep)
self.table.many_insert(data_list=data_list)
def add_good_data(self):
self.table = CoreDBServer(GoodTable)
data_list = []
with open(good_data_file, 'w') as f:
for _ in range(1000):
godd_name, inventory, price = self.fake.good_name(
), self.fake.pyint(), self.fake.pyint()
data_list.append(
{"name": godd_name, "inventory": inventory, "price": price})
f.write(godd_name+","+str(inventory) +
","+str(price)+os.linesep)
self.table.many_insert(data_list=data_list)
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
unittest.main()
elif 'data' == sys.argv[1]:
data = init_faker_data()
data.add_user_data()
data.add_good_data()
```
#### File: performanc_testing_field/handlers/handle.py
```python
from tornado.web import RequestHandler
from com.pe_service_error import PeException, LOGIN_ERROR, PARAMETERS_ERROR, UNKNOW_ERROR
from com.pe_encrypt import sync_token
from dbmodules.user import UserTable
from time import time
import json
from dbmodules.base import BaseDBServer
class BaseHandlers(RequestHandler):
def is_login(self):
"""return payload or raise PeException use handler error """
token = self.request.headers.get('token', None)
if token:
ret = sync_token(token)
if ret.get('exp', 0) > time() and self.is_legal_user(ret.get('username', '')):
ret['code'] = 0
return ret
raise PeException(LOGIN_ERROR)
def is_legal_user(self, username):
sql = "select * from %s where username = %r" % (
UserTable.name, username)
return BaseDBServer().sql_execute(sql).first()
def finish(self, chunk=None):
if isinstance(chunk, dict):
# self.add_header('Content-Type','application/json')
chunk = json.dumps(chunk, ensure_ascii=False)
return super().finish(chunk=chunk)
```
#### File: performanc_testing_field/modules/login.py
```python
from modules.server import Server
from dbmodules.base import CoreDBServer
from dbmodules.user import UserTable
from com.pe_encrypt import get_token
import logging
from com.pe_service_error import PeException, UNKNOW_ERROR
class LoginServer(Server):
def __init__(self):
self.dbser = CoreDBServer(UserTable)
super().__init__()
def login(self, username, password):
"""return token or error msg. """
wheres = [
["username", 'eq', username]
]
row = self.dbser.select(wheres).first()
if not row:
logging.warning(
'username:{} password:{} 用户不存在'.format(username, password))
return self.error_tips("不存在该用户")
if password == row['password']:
return {'code': 0, 'token': get_token(username)}
elif password != row['password']:
logging.warning(
'username:{} password:{} 密码错误'.format(username, password))
return self.error_tips('密码错误')
else:
return self.error_tips('未知错误')
```
#### File: performanc_testing_field/modules/server.py
```python
from dbmodules.base import BaseDBServer
from dbmodules.user import UserTable
class Server():
@staticmethod
def base_tips(reason, tips_type='info', *args, **kwargs):
""" return response with code.
if you want to use custom code,you can use code parameters
"""
if 'info' == tips_type:
code = 0
elif "redirect" == tips_type:
code = 2
elif "warning" == tips_type:
code = 1
elif "error" == tips_type:
code = -1
else:
code = kwargs.pop('code', -1)
return {'code': code, 'msg': reason}
def warning_tips(self, reason):
return self.base_tips(reason, "warning")
def error_tips(self, reason):
return self.base_tips(reason, "error")
def redirect_tips(self, reason):
return self.base_tips(reason, "redirect")
```
#### File: performanc_testing_field/tests/con_api.py
```python
import requests
from settings import PORT
from dbmodules.base import CoreDBServer
from dbmodules.user import UserTable
from dbmodules.order import OrderTable
import logging
def pe_api(url, data=None, headers=None):
try:
ret = requests.post(url, headers=headers, data=data)
# print(ret.request.headers, ret.request.body)
return ret.json()
except requests.exceptions.ConnectionError as e:
print('网络错误')
return
except Exception as e:
return (ret.request.headers, ret.request.body, ret.text)
def login():
row = CoreDBServer(UserTable).select().first()
data = {
'username': row['username'],
'password': row['password']
}
return pe_api('http://localhost:%s/jmeter/login' % PORT, data=data)
def exp_order(order_no):
"""过期订单(有效期为15分钟)."""
sql = 'update %s set create_time = date_sub(create_time, interval 15 minute) where order_no = %r' % (
OrderTable.name, order_no)
CoreDBServer().sql_execute(sql)
``` |
{
"source": "JoeEmp/pi_web_file_system",
"score": 2
} |
#### File: server/com/pi_error.py
```python
PARAMETERS_ERROR = {'code':-1,'msg':'参数错误'}
UNKNOW_ERROR = {'code':-1,'msg':'未知错误,请联系管理员'}
LOCAL_FAIL_ERROR = {'code':-1,'msg':'接口不存在'}
LOGIN_ERROR = {'code':2,'msg':'请重新登录'}
class pi_exception(BaseException):
def __init__(self, reason, *args, **kwargs):
self.reason = reason
super().__init__(*args, **kwargs)
```
#### File: server/com/utils.py
```python
import os
import pwd
import stat
import jwt
from time import time
from settings import SALT
from com.pi_error import pi_exception, UNKNOW_ERROR
import logging
def file_info(file, user):
user_info = get_user(user)
return {
"filename": file,
"is_can_read": user_can_read_file(file, user_info),
"is_can_write": user_can_write_file(file, user_info),
"is_dir": os.path.isdir(file)
}
def user_can_write_file(file, user_info):
return user_can_wrx_file(file, user_info, 'write')
def user_can_read_file(file, user_info):
return user_can_wrx_file(file, user_info, 'read')
def user_can_wrx_file(file, user_info, behave):
s = os.stat(file)
mode = s[stat.ST_MODE]
if 'write' == behave:
target_mode = stat.S_IWOTH
elif "read" == behave:
target_mode = stat.S_IROTH
elif "exec" == behave:
target_mode = stat.S_IEXEC
return (
((s[stat.ST_UID] == user_info['uid']) and (mode & stat.S_IRUSR > 0)) or
((s[stat.ST_GID] == user_info['gid']) and (mode & stat.S_IRGRP > 0)) or
(mode & target_mode > 0)
)
def get_user(user):
user_info = pwd.getpwnam(user)
return {
"name": user_info.pw_name,
"uid": user_info.pw_passwd,
"gid": user_info.pw_gid
}
def gen_token(username):
"""get token. """
global SALT
headers = {"alg": "HS256"}
payload = {"username": username, 'exp': time()+(3600*24*1)}
token = jwt.encode(payload=payload, key=SALT,
algorithm='HS256', headers=headers)
return token
def sync_token(token):
global SALT
try:
return jwt.decode(token, key=SALT, verify=True, algorithms=['HS256'])
except Exception as e:
logging.error(e)
raise pi_exception(UNKNOW_ERROR)
```
#### File: server/handlers/index.py
```python
from tornado.web import RequestHandler, MissingArgumentError
import os
class index_handler(RequestHandler):
def get(self):
self.finish("<h1>Hello</h1>")
``` |
{
"source": "joeeoj/advent2021",
"score": 4
} |
#### File: src/day02/part01.py
```python
from typing import Tuple
from input_data import data
def count_coords(data: list[str]) -> Tuple[int, int]:
"""forward adds to horizontal, down adds to depth, up subtracts from depth. Return the sum of all given lines.
>>> count_coords(['forward 10', 'up 5', 'down 10'])
(10, 5)
>>> count_coords(['forward 1', 'forward 2', 'up 10', 'down 1'])
(3, -9)
"""
horizontal, depth = 0, 0
for row in data:
direction, s = row.split(' ')
n = int(s)
if direction == 'forward':
horizontal += n
elif direction == 'down':
depth += n
elif direction == 'up':
depth -= n
return horizontal, depth
def main() -> int:
h, d = count_coords(data)
return h * d
if __name__ == '__main__':
print(main())
``` |
{
"source": "joeeoj/adventofcode2020",
"score": 4
} |
#### File: adventofcode2020/day01/part1.py
```python
import functools
from operator import mul
import time
def simple_timer(func):
"""Simple decorator to give a rough estimate of calculation time. Thank you to RealPython for
the help -- https://realpython.com/primer-on-python-decorators/"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
start = time.perf_counter()
ans = func(*args, **kwargs)
print(f'{func.__name__!r} run in {time.perf_counter() - start:.6f}')
return ans
return wrapper
@simple_timer
def brute_force(data):
"""Loop through twice to get the answer. It ain't pretty but it works."""
for j in data:
for k in data:
if j == k:
continue
if j + k == 2020:
return (j * k)
return None
@simple_timer
def complement_calc(data):
"""Loop through once to find the complements, then get the union of the complements and the
original list to find the pairs that equal 2020. Multiply for the final result."""
complements = set([2020 - row for row in data])
result = set(data) & complements
return mul(*[v for v in result])
if __name__ == '__main__':
with open('input.txt') as f:
data = [int(i.strip()) for i in f.readlines()]
# quick enough
brute_force_ans = brute_force(data)
print(f'Brute force answer: {brute_force_ans}\n')
# but much faster
complement_ans = complement_calc(data)
print(f'Complement answer: {complement_ans}')
```
#### File: adventofcode2020/day02/part2.py
```python
def index_password_checker(line):
min_max, char, password = line.split(' ')
first_index, second_index = [int(i)-1 for i in min_max.split('-')]
char = char.replace(':', '')
char_count = password.count(char)
# bitwise exclusive or
if (password[first_index] == char) ^ (password[second_index] == char):
return True
return False
if __name__ == '__main__':
with open('input.txt') as f:
data = [row.strip() for row in f.readlines()]
correct_passwords = sum([index_password_checker(row) for row in data])
print(f'{correct_passwords:,} passwords are correct out of {len(data):} total passwords')
```
#### File: adventofcode2020/day03/part1.py
```python
def find_tree(row, index):
if index > len(row):
row = row * ((index // len(row)) + 1)
if row[index] == '#':
return 1
return 0
if __name__ == '__main__':
with open('input.txt') as f:
data = [r.strip() for r in f.readlines()]
indices = range(0, len(data) * 3, 3)
tree_count = 0
for row, i in zip(data, indices):
tree_count += find_tree(row, i)
print(f'Total trees: {tree_count}')
```
#### File: adventofcode2020/day05/part1.py
```python
from typing import List, Tuple
def binary_split(input_range: List[int], part) -> List[int]:
mid = len(input_range) // 2
if part == 'F' or part == 'L':
return input_range[:mid]
elif part == 'B' or part == 'R':
return input_range[mid:]
else:
return None
def parse_pass(boarding_pass: str) -> Tuple[int]:
rows = list(range(128))
cols = list(range(8))
row_part = boarding_pass[:7]
col_part = boarding_pass[7:]
for c in row_part:
rows = binary_split(rows, c)
for c in col_part:
cols = binary_split(cols, c)
row, col = rows[0], cols[0]
seat_id = (row * 8) + col
return (row, col, seat_id)
if __name__ == '__main__':
with open('input.txt') as f:
data = [row.strip() for row in f.readlines()]
seats = [parse_pass(p) for p in data]
ans = max([s[2] for s in seats])
print(ans)
``` |
{
"source": "joeeoj/fantasy-football",
"score": 3
} |
#### File: fantasy-football/code/espn_draft_trends.py
```python
import argparse
import csv
import json
from math import floor
from typing import List
import requests
URL = 'https://fantasy.espn.com/apis/v3/games/ffl/seasons/{year}/segments/0/leaguedefaults/3?view=kona_player_info'
# ty -- https://reddit.com/im6mui
FILTERS = {
"players": {
"limit": 1_500,
"sortDraftRanks": {
"sortPriority": 100,
"sortAsc": True,
"value": "PPR"
}
}
}
HEADERS = {'x-fantasy-filter': json.dumps(FILTERS)}
POSITIONS = {
1: 'QB',
2: 'RB',
3: 'WR',
4: 'TE',
5: 'K',
16: 'DST',
}
# one-off fixes
# player_id, col, val
FIXES = [
# ESPN Wesco as a RB which is wrong for number 85 here
(4039253, 'pos', 'TE'),
]
def download_data(year: int) -> List[dict]:
url = URL.format(year=year)
r = requests.get(url, headers=HEADERS)
return r.json().get('players')
def parse_player(p: dict) -> dict:
player = p.get('player')
ownership = player.get('ownership')
if player is None or ownership is None:
return None
ppr_auc_value = floor(player.get('draftRanksByRankType').get('PPR').get('auctionValue', 0))
avg_auc_value = floor(ownership.get('auctionValueAverage', 0))
ppr_rank = player.get('draftRanksByRankType').get('PPR').get('rank')
avg_draft_post = floor(ownership.get('averageDraftPosition'))
return {
'player_id': int(player.get('id')),
'name': player.get('fullName'),
'pos': POSITIONS.get(player.get('defaultPositionId')),
'ppr_rank': ppr_rank,
'avg_draft_pos': avg_draft_post,
'pos_diff': avg_draft_post - ppr_rank,
'ppr_auc_value': ppr_auc_value,
'avg_auc_value': avg_auc_value,
'auc_diff': avg_auc_value - ppr_auc_value,
}
def fix_errors(players: List[dict]) -> List[dict]:
"""Unfortunately not performant but it only needs to parse through 1k rows so not a big deal"""
search_keys = set([f[0] for f in FIXES])
output_list, to_fix = [], []
for p in players:
if p['player_id'] in search_keys:
to_fix.append(p)
else:
output_list.append(p)
for p in to_fix:
for pid, col, val in FIXES:
if p['player_id'] == pid:
p[col] = val
output_list.append(p)
return output_list
def write_to_csv(players: List[dict], fout: str) -> None:
with open(fout, 'wt') as f:
csvwriter = csv.DictWriter(f, fieldnames=players[0].keys())
csvwriter.writeheader()
csvwriter.writerows(players)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='espn-trends', description='Download ESPN FF draft trends to csv',
formatter_class= argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('year', help='Year to download', type=int)
parser.add_argument('--fout', help='Output file name', default='espn_draft_trends_{year}.csv')
args = parser.parse_args()
players = download_data(args.year)
print(f'Total players: {len(players):,}')
parsed = [parse_player(p) for p in players if parse_player(p) is not None]
print(f'Total parsed players: {len(parsed):,}')
fixed_players = fix_errors(parsed)
assert len(parsed) == len(fixed_players)
fout = args.fout.format(year=args.year) if '{year}' in args.fout else args.fout
write_to_csv(fixed_players, fout)
``` |
{
"source": "joeeoj/mlbcal",
"score": 2
} |
#### File: mlbcal/tests/test_data.py
```python
import datetime
import pytest
from mlbcal.utils.load import get_team_lookup_dict
TOTAL_MLB_TEAMS = 30
TEAMS = get_team_lookup_dict()
def test_lookup_total_teams():
assert len(TEAMS) == TOTAL_MLB_TEAMS
def test_known_team_in_lookup_file():
mariners = TEAMS.get('136')
assert 'sea' in mariners
assert 'seattle' in mariners
assert 'mariners' in mariners
``` |
{
"source": "joeeoj/ps5_links",
"score": 3
} |
#### File: joeeoj/ps5_links/main.py
```python
from functools import partial
import tkinter as tk
import webbrowser
PS5_URLS = {
'Sony': 'https://direct.playstation.com/en-us/consoles/console/playstation5-console.3005816',
'Sony (direct queue)': 'https://direct-queue.playstation.com/softblock/?c=sonyied&e=psdirectprodku1&t=https%3A%2F%2Fdirect.playstation.com%2Fen-us%2Fhardware%2Fps5&cid=en-US',
'Target': 'https://www.target.com/p/playstation-5-console/-/A-81114595',
'BestBuy': 'https://www.bestbuy.com/site/sony-playstation-5-console/6426149.p?skuId=6426149',
'Best Buy (bundles)': 'https://www.bestbuy.com/site/searchpage.jsp?_dyncharset=UTF-8&fs=saas&id=pcat17071&iht=y&keys=keys&ks=960&list=n&nrp=15&saas=saas&sc=Global&sp=-currentprice%20skuidsaas&st=ps5%20bundle&type=page&usc=All%20Categories',
'GameStop': 'https://www.gamestop.com/video-games/playstation-5/consoles',
'Walmart': 'https://www.walmart.com/ip/Sony-PlayStation-5/363472942',
'Amazon': 'https://www.amazon.com/dp/B08FC5L3RG',
'NewEgg': 'https://www.newegg.com/PlayStation-PS5-Systems/BrandSubCat/ID-1541-3762',
'Ant Online (bundle)': 'https://www.antonline.com/Sony/Electronics/Gaming_Devices/Gaming_Consoles/1409261',
'<NAME>': 'https://www.fredmeyer.com/pr/playstation-5',
'Costco': 'https://www.costco.com/sony-playstation-5-gaming-console-bundle.product.100691489.html',
'Now In Stock': 'https://www.nowinstock.net/videogaming/consoles/sonyps5/',
}
HELPER_URLS = {
'Spieltimes YT': 'https://www.youtube.com/c/SpielTimes/videos',
'Blaze2K YT': 'https://www.youtube.com/c/Blaze2k/videos',
'Tweet Deck': 'https://tweetdeck.twitter.com/',
'Now In Stock': 'https://www.nowinstock.net/videogaming/consoles/sonyps5/',
}
INCOGNITO_PATH = f'"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome" --args --new-window --incognito %s'
def open_normal(url):
webbrowser.get('chrome').open(url)
def open_incognito(url):
webbrowser.get(INCOGNITO_PATH).open_new(url)
if __name__ == '__main__':
window = tk.Tk()
window.title('PS5 Links')
reg_label = tk.Label(window, text="Regular websites").grid(row=0, column=0)
for i, (desc, url) in enumerate(HELPER_URLS.items(), start=1):
go_to_website = partial(open_normal, url)
button = tk.Button(
master=window,
text=desc,
width=16,
height=2,
bg="blue",
fg="yellow",
command=go_to_website
).grid(row=i, column=0)
incog_label = tk.Label(window, text="Incognito").grid(row=0, column=1)
for i, (desc, url) in enumerate(PS5_URLS.items(), start=1):
go_to_website = partial(open_incognito, url)
button = tk.Button(
master=window,
text=desc,
width=16,
height=2,
bg="blue",
fg="yellow",
command=go_to_website
).grid(row=i, column=1)
window.mainloop()
``` |
{
"source": "Joeeyy/app_crawler",
"score": 2
} |
#### File: Joeeyy/app_crawler/mul_app_crawler.py
```python
import json
import threading
import requests
from lxml import etree
import pymysql
from queue import Queue
import time
proxies = {"http": "http://127.0.0.1:8118","https": "http://127.0.0.1:8118",}
alphabet = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','*']
cgInfoFile = "./cgInfoFile.txt"
base_url = "https://itunes.apple.com/"
targetCategory = "App Store"
targetCountry = "cn"
db_name="app_store"
table_name = "app_names_cn_4"
host = "localhost"
user = ""
pwd = ""
Boom=False
crawlThreadNum = 0
processFile = "process.json"
failed_requests=[]
failed_sqls=[]
def clean_name(name):
name = name.replace('\\','\\\\')
name = name.replace('\"','\\\"')
name = name.replace('\'','\\\'')
return name
class ioThread(threading.Thread):
def __init__(self, name, dataQueue):
super(ioThread, self).__init__()
self.name = name
self.dataQueue = dataQueue
self.db = pymysql.connect(host, user, pwd, db_name)
self.cursor = self.db.cursor()
def run(self):
while not Boom:
#print("%s %d"%(self.name,self.dataQueue.qsize()))
#print(crawlThreadNum, self.dataQueue.qsize(), self.dataQueue.empty())
if crawlThreadNum == 0 and self.dataQueue.empty():
print("%s crawlThread done, and no data wait for write, exit."%self.name)
break
try:
t = self.dataQueue.get(timeout=10)
for each in t[0]:
each = clean_name(each)
sql = 'insert into %s(app_name, genre_id) values("%s", %s)'%(table_name,each,t[1])
try:
self.cursor.execute(sql)
except:
failed_sqls.append(sql)
self.db.commit()
except:
pass
print("%s stopped"%self.name)
self.db.close()
f = open("failed_sqls.txt",'a')
for each in failed_sqls:
f.write(each+"\n")
f.close()
class crawlThread(threading.Thread):
def __init__(self, name, crawl_dict, genre_dict, dataQueue, lock):
super(crawlThread, self).__init__()
self.name = name
self.crawl_dict = crawl_dict
self.genre_dict = genre_dict
self.lock = lock
self.dataQueue = dataQueue
def run(self):
for genre_name, genre_id in self.genre_dict.items():
for a in alphabet:
apd = False
while not apd:
if self.crawl_dict[genre_name][a]['done']:
print("%s genre done"%self.name)
break
with self.lock:
current_page = self.crawl_dict[genre_name][a]['current_page']
if current_page == 0:
current_page += 1
self.crawl_dict[genre_name][a]['current_page'] += 1
self.crawl_dict[genre_name][a]['current_page'] += 1
url = base_url + targetCountry + "/genre/id" + genre_id + "?mt=8"+"&letter=%s"%a + "&page=%d"%current_page
apd = self.parseAUrl(url, genre_id)
if apd == None:
with self.lock:
self.crawl_dict[genre_name][a]['current_page'] -= 1
continue
if apd:
with self.lock:
self.crawl_dict[genre_name][a]['current_page'] -= 1
self.crawl_dict[genre_name][a]['done'] = True
Boom = True
global crawlThreadNum
if crawlThreadNum == 1:
print("last crawl thread: %s going to exit."%self.name)
thread_processFile = self.name+"_"+processFile
f = open(thread_processFile,'w')
f.write(json.dumps(self.crawl_dict))
f.close()
f = open('failed_requests.txt')
for each in failed_requests:
f.write("%s %s\n"%(each[1],each[0]))
f.close()
with self.lock:
crawlThreadNum -= 1
def parseAUrl(self, url="",genre_id=0):
if url=="":
return None
print(self.name, url)
try:
response = requests.get(url,proxies=proxies, timeout=60)
except:
print("%s Error occurred."%self.name)
failed_requests.append((url,genre_id))
return None
status_code = response.status_code
html_text = response.text
print("%s status_code: %d"%(self.name,status_code))
html = etree.HTML(html_text)
# 主信息块,分为左中右三块。
leftCol_texts = html.xpath('//div[@id="selectedcontent"]/div[@class="column first"]/ul/li/a/text()')
#leftCol_hrefs = html.xpath('//div[@id="selectedcontent"]/div[@class="column first"]/ul/li/a/@href')
middleCol_texts = html.xpath('//div[@id="selectedcontent"]/div[@class="column"]/ul/li/a/text()')
rightCol_texts = html.xpath('//div[@id="selectedcontent"]/div[@class="column last"]/ul/li/a/text()')
if len(leftCol_texts)==0:
return True
else:
self.dataQueue.put((leftCol_texts,genre_id))
if len(middleCol_texts)==0:
return True
else:
self.dataQueue.put((middleCol_texts,genre_id))
if len(rightCol_texts)==0:
return True
else:
self.dataQueue.put((rightCol_texts,genre_id))
return False
# cgInfo here is a json_str
def read_cgInfo():
f = open(cgInfoFile,'r')
cgInfo = f.read()
f.close()
return cgInfo
# returns a dict, in which cate_name is the key, and id is the value
def getCategories(cg_json=None):
category_dict = {}
if cg_json == None:
return category_dict
for key in cg_json.keys():
category_dict[cg_json[key]['name']] = key
return category_dict
# returns a dict, in which genre_name is the key, and id is the value
def getGenres(cg_json=None, category_dict=None, target=""):
genre_dict = {}
if cg_json==None or category_dict==None or target == "":
return genre_dict
if not category_dict.__contains__(target):
return genre_dict
for key in cg_json[category_dict[target]]['subgenres'].keys():
genre_dict[cg_json[category_dict[target]]['subgenres'][key]['name']] = key
return genre_dict
def createCrawlDict(genre_dict):
'''
crawl_dict = {
genre: {
'A': {
current_index = num
done: False # done, when crawl finished
}
'B': {
}
...
}
}
'''
crawl_dict = {}
for genre in genre_dict:
crawl_dict[genre]={}
for a in alphabet:
crawl_dict[genre][a]={}
crawl_dict[genre][a]['done']=False
crawl_dict[genre][a]['current_page']=0
return crawl_dict
def main():
print("multi-thread app crawler of apple app store ...")
dataQueue = Queue()
cg_json_str = read_cgInfo()
cg_json = json.loads(cg_json_str)
category_dict = getCategories(cg_json)
targetCategory_id = category_dict[targetCategory]
genre_dict = getGenres(cg_json, category_dict, targetCategory)
crawl_dict = createCrawlDict(genre_dict)
crawlThreads = []
lock = threading.Lock()
for i in range(10):
threadName = "crawlThread-%d"%i
thread = crawlThread(threadName, crawl_dict, genre_dict, dataQueue, lock)
thread.start()
crawlThreads.append(thread)
global crawlThreadNum
crawlThreadNum = len(crawlThreads)
ioThreads = []
for i in range(1):
threadName = "ioThread-%d"%i
thread = ioThread(threadName, dataQueue)
thread.start()
ioThreads.append(thread)
for thread in crawlThreads:
thread.join()
#crawlByCategory(genre_dict)
for thread in ioThreads:
thread.join()
if __name__ == '__main__':
main()
``` |
{
"source": "joefarrington/bloodbank_rl",
"score": 3
} |
#### File: bloodbank_rl/pyomo_models/model_constructors.py
```python
import pyomo.environ as pyo
import numpy as np
class PyomoModelConstructor:
def __init__(
self,
demand,
t_max=30,
a_max=3,
initial_inventory={1: 0, 2: 36},
fixed_order_cost=225,
variable_order_cost=650,
holding_cost=130,
emergency_procurement_cost=3250,
wastage_cost=650,
M=100,
additional_fifo_constraints=True,
weekly_policy=False,
shelf_life_at_arrival_dist=[0, 0, 1],
):
self.model = pyo.ConcreteModel()
# Check that `shelf_life_at_arrival_dist` sums to 1 and had right
# number of elements
assert (
len(shelf_life_at_arrival_dist) == a_max
), "`shelf_life_at_arrival_dist` must have number of elements equal to `a_max`"
assert (
np.sum(shelf_life_at_arrival_dist) == 1
), "`shelf_life_at_arrival_dist` must sum to 1"
self.shelf_life_at_arrival_dist = shelf_life_at_arrival_dist
self.model.T = pyo.RangeSet(1, t_max)
self.model.A = pyo.RangeSet(1, a_max)
self.weekly_policy = weekly_policy
if self.weekly_policy:
self.model.Wd = pyo.RangeSet(0, 6)
self.model.M = M
# Hydra doesn't support integer keys so convert here if needed
self.model.initial_inventory = {int(k): v for k, v in initial_inventory.items()}
self.additional_fifo_constraints = additional_fifo_constraints
self.model.demand = demand
self.model.CssF = fixed_order_cost
self.model.CssP = variable_order_cost
self.model.CssH = holding_cost
self.model.CssE = emergency_procurement_cost
self.model.CssW = wastage_cost
self.model.cons = pyo.ConstraintList()
def build_model(self):
self._add_common_variables()
if self.weekly_policy:
self._add_specific_variables_weekly()
else:
self._add_specific_variables()
self._add_cost_function()
self._add_common_constraints()
if self.weekly_policy:
self._add_specific_constraints_weekly()
else:
self._add_specific_constraints()
return self.model
def _add_common_variables(self):
self.model.OQ = pyo.Var(
self.model.T, domain=pyo.NonNegativeReals
) # Units ordered at end of day t
self.model.X = pyo.Var(
self.model.T, self.model.A, domain=pyo.NonNegativeReals
) # Units received at beginning of day t with shelf life a
self.model.DssR = pyo.Var(
self.model.T, self.model.A, domain=pyo.NonNegativeReals
) # Remaining demand on day t after using product with shelf life a days
self.model.IssB = pyo.Var(
self.model.T, self.model.A, domain=pyo.NonNegativeReals
) # On-hand inventory at the beginning of day t with shelf life a days
self.model.IssE = pyo.Var(
self.model.T, self.model.A, domain=pyo.NonNegativeReals
) # Inventory at the end of day t with shelf life a days
self.model.IP = pyo.Var(
self.model.T, domain=pyo.NonNegativeReals
) # Inventory position at the end of day t
self.model.E = pyo.Var(
self.model.T, domain=pyo.NonNegativeReals
) # Number of units obtained through emergency procurement on day t
self.model.W = pyo.Var(
self.model.T, domain=pyo.NonNegativeReals
) # Number of units wasted at the end of day t
self.model.Delta = pyo.Var(
self.model.T, domain=pyo.Binary
) # 1 if IP_t is less than s, 0 otherwise
self.model.F = pyo.Var(
self.model.T, domain=pyo.Binary
) # 1 is order placed on day t, 0 otherwise
if self.additional_fifo_constraints:
self.model.binDssR = pyo.Var(
self.model.T, self.model.A, domain=pyo.Binary
) # Binary flag if there is remaining demand on day t after using product with shelf life a days
def _add_cost_function(self):
# This does not include the extra cost we considered looking at the difference between s and S
self.model.fixed_cost = sum(
self.model.CssF * self.model.F[t] for t in self.model.T
)
self.model.variable_cost = sum(
self.model.CssP * self.model.OQ[t] for t in self.model.T
)
self.model.holding_cost = sum(
self.model.CssH * sum(self.model.IssE[t, a] for a in self.model.A if a > 1)
for t in self.model.T
)
self.model.wastage_cost = sum(
self.model.CssW * self.model.W[t] for t in self.model.T
)
self.model.shortage_cost = sum(
self.model.CssE * self.model.E[t] for t in self.model.T
)
self.model.objective = pyo.Objective(
expr=self.model.fixed_cost
+ self.model.variable_cost
+ self.model.holding_cost
+ self.model.wastage_cost
+ self.model.shortage_cost,
sense=pyo.minimize,
)
def _add_common_constraints(self):
# Equation 3
for t in self.model.T:
self.model.cons.add(self.model.OQ[t] <= self.model.M * self.model.F[t])
# Equation 4
# For now, not included, because review time never actually gets changed
# Equations 5 and 6
for t in self.model.T:
if t == 1:
self.model.cons.add(sum(self.model.X[t, a] for a in self.model.A) == 0)
else:
self.model.cons.add(
sum(self.model.X[t, a] for a in self.model.A)
== self.model.OQ[t - 1]
)
# For the baseline setting, all inventory should have three useful days of live when received
# This works fine for the baseline setting, but we do currently get some rounding issues when
# moving away from it.
for t in self.model.T:
for a in self.model.A:
if t == 1:
pass # covered in constraint above
elif self.shelf_life_at_arrival_dist[a - 1] == 0:
self.model.cons.add(self.model.X[t, a] == 0)
else:
self.model.cons.add(
self.model.X[t, a]
>= (
self.model.OQ[t - 1]
* self.shelf_life_at_arrival_dist[a - 1]
)
- 0.5
)
self.model.cons.add(
self.model.X[t, a]
<= (
self.model.OQ[t - 1]
* self.shelf_life_at_arrival_dist[a - 1]
)
+ 0.5
)
# Equations 7 and 8:
for t in self.model.T:
for a in self.model.A:
if a == 1:
self.model.cons.add(
self.model.demand[t]
- self.model.IssB[t, a]
- self.model.X[t, a]
== self.model.DssR[t, a] - self.model.IssE[t, a]
)
else:
self.model.cons.add(
self.model.DssR[t, a - 1]
- self.model.IssB[t, a]
- self.model.X[t, a]
== self.model.DssR[t, a] - self.model.IssE[t, a]
)
if self.additional_fifo_constraints:
# We need to enforce that only one variable on the RHS on equations 7 and 8 can be non-zero
# For that we need an extra binary variable e.g. Pauls-Worm (inventory control for a perishable product with non-stationary
# demand and service level constraints)
for t in self.model.T:
for a in self.model.A:
self.model.cons.add(
self.model.M * self.model.binDssR[t, a] >= self.model.DssR[t, a]
)
self.model.cons.add(
self.model.M * (1 - self.model.binDssR[t, a])
>= self.model.IssE[t, a]
)
# Equation 9
# Amended to just suself.model.m over X for t < current t
# using u as t'
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
== sum(self.model.IssE[t, a] for a in self.model.A if a > 1)
+ sum(self.model.OQ[u] for u in self.model.T if u < t)
- sum(
self.model.X[u, a]
for a in self.model.A
for u in self.model.T
if u <= t
)
)
# Equation 16
# Paper says this should be in all, but no S for s,Q model, so specify where required
# Equation 17
for t in self.model.T:
if t == self.model.T[-1]:
pass
else:
for a in self.model.A:
if a == self.model.A[-1]:
pass
else:
self.model.cons.add(
self.model.IssB[t + 1, a] == self.model.IssE[t, a + 1]
)
# Equation 18
for t in self.model.T:
self.model.cons.add(self.model.E[t] == self.model.DssR[t, self.model.A[-1]])
# Equation 19
for t in self.model.T:
self.model.cons.add(self.model.W[t] == self.model.IssE[t, 1])
# Equation 20
for t in self.model.T:
self.model.cons.add(self.model.IssB[t, self.model.A[-1]] == 0)
# Equation 21
for a in self.model.A:
if a == self.model.A[-1]:
pass
else:
self.model.cons.add(
self.model.IssB[1, a] == self.model.initial_inventory[a]
)
def _add_specific_variables(self):
# Implement for each model
pass
def _add_specific_constraints(self):
# Impletement for each model
pass
def _add_specific_variables_weekly(self):
# Implement for each model
pass
def _add_specific_constraints_weekly(self):
# Impletement for each model
pass
@staticmethod
def policy_parameters():
# Implement for each model
pass
class sS_PyomoModelConstructor(PyomoModelConstructor):
def _add_specific_variables(self):
self.model.s = pyo.Var(
self.model.T, domain=pyo.NonNegativeReals
) # re-order point
self.model.S = pyo.Var(self.model.T, domain=pyo.NonNegativeReals)
def _add_specific_constraints(self):
# Equation 10
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
<= (self.model.s[t] - 1) + self.model.M * (1 - self.model.Delta[t])
)
# Equation 11
for t in self.model.T:
self.model.cons.add(
self.model.IP[t] >= self.model.s[t] - self.model.M * self.model.Delta[t]
)
# Equation 16
for t in self.model.T:
self.model.cons.add(self.model.S[t] >= self.model.s[t] + 1)
# Equation B-2
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
<= (self.model.S[t] - self.model.IP[t])
+ self.model.M * (1 - self.model.Delta[t])
)
# Equation B-3
self.model.cons.add(
self.model.OQ[t]
>= (self.model.S[t] - self.model.IP[t])
- self.model.M * (1 - self.model.Delta[t])
)
# Equation B-4
self.model.cons.add(self.model.OQ[t] <= self.model.M * self.model.Delta[t])
def _add_specific_variables_weekly(self):
self.model.s = pyo.Var(self.model.Wd, domain=pyo.NonNegativeReals)
self.model.S = pyo.Var(self.model.Wd, domain=pyo.NonNegativeReals)
def _add_specific_constraints_weekly(self):
# Equation 10
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
<= (self.model.s[(t - 1) % 7] - 1)
+ self.model.M * (1 - self.model.Delta[t])
)
# Equation 11
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
>= self.model.s[(t - 1) % 7] - self.model.M * self.model.Delta[t]
)
# Equation 16, but taking into account
# that each weekday should have its own parameter
for w in self.model.Wd:
self.model.cons.add(self.model.S[w] >= self.model.s[w] + 1)
# Equation B-2
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
<= (self.model.S[(t - 1) % 7] - self.model.IP[t])
+ self.model.M * (1 - self.model.Delta[t])
)
# Equation B-3
self.model.cons.add(
self.model.OQ[t]
>= (self.model.S[(t - 1) % 7] - self.model.IP[t])
- self.model.M * (1 - self.model.Delta[t])
)
# Equation B-4
self.model.cons.add(self.model.OQ[t] <= self.model.M * self.model.Delta[t])
@staticmethod
def policy_parameters():
return ["s", "S"]
class sQ_PyomoModelConstructor(PyomoModelConstructor):
def _add_specific_variables(self):
self.model.s = pyo.Var(
self.model.T, domain=pyo.NonNegativeReals
) # re-order point
self.model.Q = pyo.Var(
self.model.T, domain=pyo.NonNegativeReals
) # order-up to level
def _add_specific_constraints(self):
# Constraints for s, Q model
# Equation 10
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
<= (self.model.s[t] - 1) + self.model.M * (1 - self.model.Delta[t])
)
# Equation 11
for t in self.model.T:
self.model.cons.add(
self.model.IP[t] >= self.model.s[t] - self.model.M * self.model.Delta[t]
)
# Constraint C-2
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
<= self.model.Q[t] + self.model.M * (1 - self.model.Delta[t])
)
# Constraint C-3
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
>= self.model.Q[t] - self.model.M * (1 - self.model.Delta[t])
)
# Constaint C-4
for t in self.model.T:
self.model.cons.add(self.model.OQ[t] <= self.model.M * self.model.Delta[t])
def _add_specific_variables_weekly(self):
self.model.s = pyo.Var(self.model.Wd, domain=pyo.NonNegativeReals)
self.model.Q = pyo.Var(
self.model.Wd, domain=pyo.NonNegativeReals
) # order-up to level
def _add_specific_constraints_weekly(self):
# Constraints for s, Q model
# Equation 10
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
<= (self.model.s[(t - 1) % 7] - 1)
+ self.model.M * (1 - self.model.Delta[t])
)
# Equation 11
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
>= self.model.s[(t - 1) % 7] - self.model.M * self.model.Delta[t]
)
# Constraint C-2
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
<= self.model.Q[(t - 1) % 7] + self.model.M * (1 - self.model.Delta[t])
)
# Constraint C-3
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
>= self.model.Q[(t - 1) % 7] - self.model.M * (1 - self.model.Delta[t])
)
# Constaint C-4
for t in self.model.T:
self.model.cons.add(self.model.OQ[t] <= self.model.M * self.model.Delta[t])
@staticmethod
def policy_parameters():
return ["s", "Q"]
class sSaQ_PyomoModelConstructor(PyomoModelConstructor):
def _add_specific_variables(self):
self.model.s = pyo.Var(
self.model.T, domain=pyo.NonNegativeReals
) # re-order point
self.model.S = pyo.Var(self.model.T, domain=pyo.NonNegativeReals)
self.model.Q = pyo.Var(
self.model.T, domain=pyo.NonNegativeReals
) # order-up to level
self.model.alpha = pyo.Var(self.model.T, domain=pyo.NonNegativeReals)
self.model.delta = pyo.Var(
self.model.T, domain=pyo.Binary
) # 1 if IP_t is less than a, 0 otherwise
def _add_specific_constraints(self):
# Equation 10
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
<= (self.model.s[t] - 1) + self.model.M * (1 - self.model.Delta[t])
)
# Equation 11
for t in self.model.T:
self.model.cons.add(
self.model.IP[t] >= self.model.s[t] - self.model.M * self.model.Delta[t]
)
# Equation 12
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
<= (self.model.alpha[t] - 1) + self.model.M * (1 - self.model.delta[t])
)
# Equation 13
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
>= self.model.alpha[t] - self.model.M * self.model.delta[t]
)
# Equation 14 - linearised into A-1 to A-5
## Equation A-1
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
<= self.model.Q[t]
+ self.model.M * self.model.delta[t]
+ self.model.M * (1 - self.model.Delta[t])
)
## Equation A-2
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
>= self.model.Q[t]
- self.model.M * self.model.delta[t]
- self.model.M * (1 - self.model.Delta[t])
)
## Equation A-3
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
<= (self.model.S[t] - self.model.IP[t])
+ self.model.M * (1 - self.model.delta[t])
+ self.model.M * (1 - self.model.Delta[t])
)
## Equation A-4
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
>= (self.model.S[t] - self.model.IP[t])
- self.model.M * (1 - self.model.delta[t])
- self.model.M * (1 - self.model.Delta[t])
)
## Equation A-5
for t in self.model.T:
self.model.cons.add(self.model.OQ[t] <= self.model.M * self.model.Delta[t])
# Equation 15
for t in self.model.T:
self.model.cons.add(self.model.s[t] >= self.model.alpha[t] + 1)
# Equation 16
for t in self.model.T:
self.model.cons.add(self.model.S[t] >= self.model.s[t] + 1)
def _add_specific_variables_weekly(self):
self.model.s = pyo.Var(self.model.Wd, domain=pyo.NonNegativeReals)
self.model.S = pyo.Var(self.model.Wd, domain=pyo.NonNegativeReals)
self.model.Q = pyo.Var(
self.model.Wd, domain=pyo.NonNegativeReals
) # order-up to level
self.model.alpha = pyo.Var(self.model.Wd, domain=pyo.NonNegativeReals)
self.model.delta = pyo.Var(
self.model.T, domain=pyo.Binary
) # 1 if IP_t is less than a, 0 otherwise
def _add_specific_constraints_weekly(self):
# Equation 10
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
<= (self.model.s[(t - 1) % 7] - 1)
+ self.model.M * (1 - self.model.Delta[t])
)
# Equation 11
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
>= self.model.s[(t - 1) % 7] - self.model.M * self.model.Delta[t]
)
# Equation 12
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
<= (self.model.alpha[(t - 1) % 7] - 1)
+ self.model.M * (1 - self.model.delta[t])
)
# Equation 13
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
>= self.model.alpha[(t - 1) % 7] - self.model.M * self.model.delta[t]
)
# Equation 14 - lineared into A-1 to A-5
## Equation A-1
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
<= self.model.Q[(t - 1) % 7]
+ self.model.M * self.model.delta[t]
+ self.model.M * (1 - self.model.Delta[t])
)
## Equation A-2
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
>= self.model.Q[(t - 1) % 7]
- self.model.M * self.model.delta[t]
- self.model.M * (1 - self.model.Delta[t])
)
## Equation A-3
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
<= (self.model.S[(t - 1) % 7] - self.model.IP[t])
+ self.model.M * (1 - self.model.delta[t])
+ self.model.M * (1 - self.model.Delta[t])
)
## Equation A-4
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
>= (self.model.S[(t - 1) % 7] - self.model.IP[t])
- self.model.M * (1 - self.model.delta[t])
- self.model.M * (1 - self.model.Delta[t])
)
## Equation A-5
for t in self.model.T:
self.model.cons.add(self.model.OQ[t] <= self.model.M * self.model.Delta[t])
# Equation 15
for w in self.model.Wd:
self.model.cons.add(self.model.s[w] >= self.model.alpha[w] + 1)
# Equation 16, but taking into account
# that each weekday should have its own parameter
for w in self.model.Wd:
self.model.cons.add(self.model.S[w] >= self.model.s[w] + 1)
@staticmethod
def policy_parameters():
return ["s", "S", "alpha", "Q"]
class sSbQ_PyomoModelConstructor(PyomoModelConstructor):
def _add_specific_variables(self):
self.model.s = pyo.Var(
self.model.T, domain=pyo.NonNegativeReals
) # re-order point
self.model.S = pyo.Var(self.model.T, domain=pyo.NonNegativeReals)
self.model.Q = pyo.Var(
self.model.T, domain=pyo.NonNegativeReals
) # order-up to level
self.model.beta = pyo.Var(self.model.T, domain=pyo.NonNegativeReals)
self.model.nu = pyo.Var(
self.model.T, domain=pyo.Binary
) # 1 if IP_t is less than beta, 0 otherwise
def _add_specific_constraints(self):
# Equation 10
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
<= (self.model.s[t] - 1) + self.model.M * (1 - self.model.Delta[t])
)
# Equation 11
for t in self.model.T:
self.model.cons.add(
self.model.IP[t] >= self.model.s[t] - self.model.M * self.model.Delta[t]
)
# Equation 16
for t in self.model.T:
self.model.cons.add(self.model.S[t] >= self.model.s[t] + 1)
# Equation 26
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
<= (self.model.beta[t] - 1) + self.model.M * (1 - self.model.nu[t])
)
# Equation 27
for t in self.model.T:
self.model.cons.add(
self.model.IP[t] >= self.model.beta[t] - self.model.M * self.model.nu[t]
)
# Equation 28 - lineared into A-6 to A-10
## Equation A-6
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
<= self.model.Q[t]
+ self.model.M * (1 - self.model.nu[t])
+ self.model.M * (1 - self.model.Delta[t])
)
## Equation A-7
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
>= self.model.Q[t]
- self.model.M * (1 - self.model.nu[t])
- self.model.M * (1 - self.model.Delta[t])
)
## Equation A-8
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
<= (self.model.S[t] - self.model.IP[t])
+ (self.model.M * self.model.nu[t])
+ self.model.M * (1 - self.model.Delta[t])
)
## Equation A-9
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
>= (self.model.S[t] - self.model.IP[t])
- (self.model.M * self.model.nu[t])
- self.model.M * (1 - self.model.Delta[t])
)
## Equation A-10
for t in self.model.T:
self.model.cons.add(self.model.OQ[t] <= self.model.M * self.model.Delta[t])
# Equation 29
for t in self.model.T:
self.model.cons.add(self.model.s[t] >= self.model.beta[t] + 1)
def _add_specific_variables_weekly(self):
self.model.s = pyo.Var(self.model.Wd, domain=pyo.NonNegativeReals)
self.model.S = pyo.Var(self.model.Wd, domain=pyo.NonNegativeReals)
self.model.Q = pyo.Var(
self.model.Wd, domain=pyo.NonNegativeReals
) # order-up to level
self.model.beta = pyo.Var(self.model.Wd, domain=pyo.NonNegativeReals)
self.model.nu = pyo.Var(
self.model.T, domain=pyo.Binary
) # 1 if IP_t is less than b, 0 otherwise
def _add_specific_constraints_weekly(self):
# Equation 10
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
<= (self.model.s[(t - 1) % 7] - 1)
+ self.model.M * (1 - self.model.Delta[t])
)
# Equation 11
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
>= self.model.s[(t - 1) % 7] - self.model.M * self.model.Delta[t]
)
# Equation 16, but taking into account
# that each weekday should have its own parameter
for w in self.model.Wd:
self.model.cons.add(self.model.S[w] >= self.model.s[w] + 1)
# Equation 26
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
<= (self.model.beta[(t - 1) % 7] - 1)
+ self.model.M * (1 - self.model.nu[t])
)
# Equation 27
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
>= self.model.beta[(t - 1) % 7] - self.model.M * self.model.nu[t]
)
# Equation 28 - lineared into A-6 to A-10
## Equation A-6
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
<= self.model.Q[(t - 1) % 7]
+ self.model.M * (1 - self.model.nu[t])
+ self.model.M * (1 - self.model.Delta[t])
)
## Equation A-7
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
>= self.model.Q[(t - 1) % 7]
- self.model.M * (1 - self.model.nu[t])
- self.model.M * (1 - self.model.Delta[t])
)
## Equation A-8
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
<= (self.model.S[(t - 1) % 7] - self.model.IP[t])
+ (self.model.M * self.model.nu[t])
+ self.model.M * (1 - self.model.Delta[t])
)
## Equation A-9
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
>= (self.model.S[(t - 1) % 7] - self.model.IP[t])
- (self.model.M * self.model.nu[t])
- self.model.M * (1 - self.model.Delta[t])
)
## Equation A-10
for t in self.model.T:
self.model.cons.add(self.model.OQ[t] <= self.model.M * self.model.Delta[t])
# Equation 29
for w in self.model.Wd:
self.model.cons.add(self.model.s[w] >= self.model.beta[w] + 1)
@staticmethod
def policy_parameters():
return ["s", "S", "beta", "Q"]
# Classic base-stock policy, but with one parameter per weekday
class S_PyomoModelConstructor(PyomoModelConstructor):
def _add_specific_variables(self):
self.model.S = pyo.Var(self.model.T, domain=pyo.NonNegativeReals)
def _add_specific_constraints(self):
# Equation 10
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
<= (self.model.S[t] - 1) + self.model.M * (1 - self.model.Delta[t])
)
# Equation 11
for t in self.model.T:
self.model.cons.add(
self.model.IP[t] >= self.model.S[t] - self.model.M * self.model.Delta[t]
)
# Equation B-2
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
<= (self.model.S[t] - self.model.IP[t])
+ self.model.M * (1 - self.model.Delta[t])
)
# Equation B-3
self.model.cons.add(
self.model.OQ[t]
>= (self.model.S[t] - self.model.IP[t])
- self.model.M * (1 - self.model.Delta[t])
)
# Equation B-4
self.model.cons.add(self.model.OQ[t] <= self.model.M * self.model.Delta[t])
def _add_specific_variables_weekly(self):
self.model.S = pyo.Var(self.model.Wd, domain=pyo.NonNegativeReals)
def _add_specific_constraints_weekly(self):
# Equation 10
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
<= (self.model.S[(t - 1) % 7] - 1)
+ self.model.M * (1 - self.model.Delta[t])
)
# Equation 11
for t in self.model.T:
self.model.cons.add(
self.model.IP[t]
>= self.model.S[(t - 1) % 7] - self.model.M * self.model.Delta[t]
)
# Equation B-2
for t in self.model.T:
self.model.cons.add(
self.model.OQ[t]
<= (self.model.S[(t - 1) % 7] - self.model.IP[t])
+ self.model.M * (1 - self.model.Delta[t])
)
# Equation B-3
self.model.cons.add(
self.model.OQ[t]
>= (self.model.S[(t - 1) % 7] - self.model.IP[t])
- self.model.M * (1 - self.model.Delta[t])
)
# Equation B-4
self.model.cons.add(self.model.OQ[t] <= self.model.M * self.model.Delta[t])
@staticmethod
def policy_parameters():
return ["S"]
```
#### File: bloodbank_rl/pyomo_models/stochastic_model_runner.py
```python
import numpy as np
import pandas as pd
import pyomo.environ as pyo
import mpisppy.utils.sputils as sputils
from mpisppy.opt.ef import ExtensiveForm
from pathlib import Path
import os
import sys
path_root = Path(os.path.abspath(__file__)).parents[2]
sys.path.append(str(path_root))
from bloodbank_rl.environments.platelet_bankSR import PoissonDemandProviderSR
import bloodbank_rl.pyomo_models.model_constructors as pyomo_mc
class PyomoModelRunner:
def __init__(
self,
model_constructor,
model_constructor_params,
n_scenarios,
demand_provider,
demand_provider_kwargs=None,
scenario_name_start=0, # Used this as starting seed for Pyomo experiments with sim data
solver_string="gurobi_persistent",
solver_options={"LogFile": "gurobi.log", "OutputFlag": 1, "LogToConsole": 0},
log=None,
):
self.model_constructor = model_constructor
self.model_constructor_params = model_constructor_params
self.n_scenarios = n_scenarios
self.demand_provider = demand_provider
self.demand_provider_kwargs = demand_provider_kwargs
self.scenario_name_start = scenario_name_start
self.solver_string = solver_string
self.solver_options = solver_options
self.all_scenario_names = [
f"{i+self.scenario_name_start}" for i in range(0, self.n_scenarios)
]
self.checks_to_perform = self._determine_checks_to_perform()
self.log = log
def scenario_creator(self, scenario_name):
if self.demand_provider_kwargs:
prov = self.demand_provider(
**self.demand_provider_kwargs, seed=int(scenario_name)
)
else:
prov = self.demand_provider(seed=int(scenario_name))
prov.reset()
demand = {
t: prov.generate_demand()
for t in range(1, self.model_constructor_params["t_max"] + 1)
}
model = self.model_constructor(
demand=demand, **self.model_constructor_params
).build_model()
# Telling it which decisions belong to first stage - for us this could be all our policy parameters
# because we can't change them during a trajectory
first_stage_params = self._get_first_stage_decision_params(model)
sputils.attach_root_node(model, 0, first_stage_params)
# If we don't specify, assume that all equally likely
model._mpisppy_probability = 1.0 / self.n_scenarios
return model
def _get_first_stage_decision_params(self, model):
if self.model_constructor.policy_parameters() == ["s", "S"]:
return [model.s, model.S]
elif self.model_constructor.policy_parameters() == ["s", "Q"]:
return [model.s, model.Q]
elif self.model_constructor.policy_parameters() == ["s", "S", "alpha", "Q"]:
return [model.s, model.S, model.alpha, model.Q]
elif self.model_constructor.policy_parameters() == ["s", "S", "beta", "Q"]:
return [model.s, model.S, model.beta, model.Q]
elif self.model_constructor.policy_parameters() == ["S"]:
return [model.S]
else:
raise ValueError("Policy parameters not recognised")
def solve_program(self):
options = {"solver": self.solver_string}
self.ef = ExtensiveForm(
options=options,
all_scenario_names=self.all_scenario_names,
scenario_creator=self.scenario_creator,
)
self.results = self.ef.solve_extensive_form(solver_options=self.solver_options)
objval = self.ef.get_objective_value()
return objval
def construct_results_dfs(self):
self.results_list = []
self.costs_df = pd.DataFrame(
columns=[
"Seed",
"Variable cost",
"Holding cost",
"Fixed cost",
"Wastage cost",
"Shortage cost",
]
)
for tup in self.ef.scenarios():
scen = tup[0]
if self.demand_provider_kwargs:
prov = self.demand_provider(
**self.demand_provider_kwargs, seed=int(scen)
)
else:
prov = self.demand_provider(seed=int(scen))
prov.reset()
demand = {
t: prov.generate_demand()
for t in range(1, self.model_constructor_params["t_max"] + 1)
}
model = tup[1]
# Add common variables to output
res_dicts = [
{
"opening_inventory": [
round(model.IssB[t, a](), 0) for a in model.A
],
"received": [round(model.X[t, a](), 0) for a in model.A],
"demand": round(demand[t], 0),
"DSSR": [round(model.DssR[t, a](), 0) for a in model.A],
"wastage": round(model.W[t](), 0),
"shortage": round(model.E[t](), 0),
"closing inventory": [
round(model.IssE[t, a](), 0) for a in model.A
],
"inventory position": round(model.IP[t](), 0),
"order quantity": round(model.OQ[t](), 0),
}
for t in model.T
]
# Add policy paramters to results
for res_dict, t in zip(res_dicts, model.T):
for param in self.model_constructor.policy_parameters():
if self.model_constructor_params["weekly_policy"]:
param_string = f"model.{param}[(t-1) % 7]()"
else:
param_string = f"model.{param}[t]()"
res_dict[f"{param}"] = round(eval(param_string), 0)
self.results_list.append(pd.DataFrame(res_dicts))
# Record the costs for each scenario and store in a single Pandas DataFrame
scen_costs_dict = {
"Seed": scen,
"Variable cost": round(model.variable_cost(), 0),
"Holding cost": round(model.holding_cost(), 0),
"Fixed cost": round(model.fixed_cost(), 0),
"Wastage cost": round(model.wastage_cost(), 0),
"Shortage cost": round(model.shortage_cost(), 0),
}
self.costs_df = self.costs_df.append(scen_costs_dict, ignore_index=True)
if self.log is not None:
self.log.info(f"##### Scenario {scen} #####")
self.log.info(f"Variable cost: {round(model.variable_cost(),0)}")
self.log.info(f"Holding cost: {round(model.holding_cost(),0)}")
self.log.info(f"Fixed cost: {round(model.fixed_cost(),0)}")
self.log.info(f"Wastage cost: {round(model.wastage_cost(),0)}")
self.log.info(f"Shortage cost: {round(model.shortage_cost(),0)}")
else:
print(f"##### Scenario {scen} #####")
# For now, also print the costs as useful for debugging
print(f"Variable cost: {round(model.variable_cost(),0)}")
print(f"Holding cost: {round(model.holding_cost(),0)}")
print(f"Fixed cost: {round(model.fixed_cost(),0)}")
print(f"Wastage cost: {round(model.wastage_cost(),0)}")
print(f"Shortage cost: {round(model.shortage_cost(),0)}")
def save_results(self, directory_path_string):
for scen, df in zip(self.all_scenario_names, self.results_list):
filename = Path(directory_path_string) / f"scenario_{scen}_output.csv"
df.to_csv(filename)
filename = Path(directory_path_string) / f"all_costs.csv"
self.costs_df.to_csv(filename)
def check_outputs(self, directory_path_string):
self.results_of_checks_list = []
for scen, scenario_df in zip(self.all_scenario_names, self.results_list):
# Ensure that entries in columns with array values are numpy arrays
array_cols = ["opening_inventory", "received", "DSSR", "closing inventory"]
for col in array_cols:
scenario_df[f"{col}"] = scenario_df[f"{col}"].apply(
lambda x: np.array(x)
)
# Do a merge to easily run checks where we look at consecutive rows
merged_results = pd.concat(
[
scenario_df,
scenario_df.loc[:, ["opening_inventory", "received"]]
.shift(-1)
.add_prefix("next_"),
],
axis=1,
)
# Run the necessary checks
out_df = pd.DataFrame()
for f in self.checks_to_perform:
res = merged_results.apply(f, axis=1)
out_df = pd.concat([out_df, res], axis=1)
# Print the number of rows with failure and store
# the results if any failures for a scenario
fail_check_rows = out_df[~out_df.all(axis=1)]
n_rows_with_fail = fail_check_rows.shape[0]
if self.log is not None:
self.log.info(
f"Scenario {scen}: {n_rows_with_fail} rows with a failed check"
)
else:
print(f"Scenario {scen}: {n_rows_with_fail} rows with a failed check")
if n_rows_with_fail > 0:
filename = Path(directory_path_string) / f"scenario_{scen}_checks.csv"
out_df.to_csv(filename)
self.results_of_checks_list.append(out_df)
### Functions for checking the output is consistent with constraints ###
# TODO: Could run a check that policy params same in each scenario
def _determine_checks_to_perform(self):
checks_to_run = [
self._check_wastage,
self._check_shortage,
self._check_inventory_during_day,
self._check_no_max_age_opening_inventory,
self._check_close_to_next_open_inventory,
self._check_order_to_next_received,
]
if self.model_constructor.policy_parameters() == ["s", "S"]:
return checks_to_run + [self._check_sS]
elif self.model_constructor.policy_parameters() == ["s", "Q"]:
return checks_to_run + [self._check_sQ]
elif self.model_constructor.policy_parameters() == ["s", "S", "alpha", "Q"]:
return checks_to_run + [self._check_sSaQ]
elif self.model_constructor.policy_parameters() == ["s", "S", "beta", "Q"]:
return checks_to_run + [self._check_sSbQ]
elif self.model_constructor.policy_parameters() == ["S"]:
return checks_to_run + [self._check_S]
else:
raise ValueError("Policy parameters not recognised")
# High level wastage check
def _check_wastage(self, row):
return pd.Series(
{
"check_wastage": row["wastage"]
== max(
0, row["opening_inventory"][0] + row["received"][0] - row["demand"]
)
}
)
# High level shortage check
def _check_shortage(self, row):
return pd.Series(
{
"check_shortage": row["shortage"]
== max(
0,
row["demand"]
- row["opening_inventory"].sum()
- row["received"].sum(),
)
}
)
# Check closing inventory
def _calculate_remaining_stock_and_demand(self, row):
total_remaining_demand = row["demand"]
inventory = row["opening_inventory"] + row["received"]
remaining_demand = np.zeros_like(inventory)
for idx, stock in enumerate(inventory):
demand_filled = min(total_remaining_demand, stock)
remaining_stock = stock - demand_filled
total_remaining_demand = total_remaining_demand - demand_filled
inventory[idx] = remaining_stock
remaining_demand[idx] = total_remaining_demand
return inventory, remaining_demand
def _check_inventory_during_day(self, row):
(
calc_closing_inventory,
calc_remaining_demand,
) = self._calculate_remaining_stock_and_demand(row)
return pd.Series(
{
"check_closing_inventory": (
row["closing inventory"] == calc_closing_inventory
).all(),
"check_DSSR": (row["DSSR"] == calc_remaining_demand).all(),
"check_inventory_position": row["inventory position"]
== row["closing inventory"][1:].sum(),
}
)
def _check_no_max_age_opening_inventory(self, row):
return pd.Series(
{"check_no_max_age_opening_inventory": row["opening_inventory"][-1] == 0}
)
def _check_close_to_next_open_inventory(self, row):
if row["next_opening_inventory"] is np.nan:
return pd.Series({"check_close_to_next_open_inventory": None})
else:
return pd.Series(
{
"check_close_to_next_open_inventory": (
row["closing inventory"][1:]
== row["next_opening_inventory"][:-1]
).all()
}
)
def _check_order_to_next_received(self, row):
if row["next_received"] is np.nan:
return pd.Series({"check_order_to_next_received": None})
else:
return pd.Series(
{
"check_order_to_next_received": row["order quantity"]
== row["next_received"].sum()
}
)
def _check_sS(self, row):
S_gt_s = row["S"] >= row["s"] + 1
if row["inventory position"] < row["s"]:
order_quantity_to_params = (
row["order quantity"] == row["S"] - row["inventory position"]
)
else:
order_quantity_to_params = row["order quantity"] == 0
return pd.Series(
{
"check_sS_S_gt_s": S_gt_s,
"check_sS_order_quantity_to_params": order_quantity_to_params,
}
)
def _check_S(self, row):
if row["inventory position"] < row["S"]:
order_quantity_to_params = (
row["order quantity"] == row["S"] - row["inventory position"]
)
else:
order_quantity_to_params = row["order quantity"] == 0
return pd.Series(
{"check_S_order_quantity_to_params": order_quantity_to_params,}
)
def _check_sQ(self, row):
if row["inventory position"] < row["s"]:
order_quantity_to_params = row["order quantity"] == row["Q"]
else:
order_quantity_to_params = row["order quantity"] == 0
return pd.Series(
{"check_sQ_order_quantity_to_params": order_quantity_to_params}
)
def _check_sSaQ(self, row):
S_gt_s = row["S"] >= row["s"] + 1
s_gt_a = row["s"] >= row["alpha"] + 1
if row["inventory position"] < row["alpha"]:
order_quantity_to_params = (
row["order quantity"] == row["S"] - row["inventory position"]
)
elif row["inventory position"] < row["s"]:
order_quantity_to_params = row["order quantity"] == row["Q"]
else:
order_quantity_to_params = row["order quantity"] == 0
return pd.Series(
{
"check_sSaQ_S_gt_s": S_gt_s,
"check_sSaQ_s_gt_a": s_gt_a,
"check_sSaQ_order_quantity_to_params": order_quantity_to_params,
}
)
def _check_sSbQ(self, row):
S_gt_s = row["S"] >= row["s"] + 1
s_gt_b = row["s"] >= row["beta"] + 1
if row["inventory position"] < row["beta"]:
order_quantity_to_params = row["order quantity"] == row["Q"]
elif row["inventory position"] < row["s"]:
order_quantity_to_params = (
row["order quantity"] == row["S"] - row["inventory position"]
)
else:
order_quantity_to_params = row["order quantity"] == 0
return pd.Series(
{
"check_sSbQ_S_gt_s": S_gt_s,
"check_sSbQ_s_gt_b": s_gt_b,
"check_sSbQ_order_quantity_to_params": order_quantity_to_params,
}
)
```
#### File: bloodbank_rl/tianshou_utils/policies.py
```python
from tianshou.policy import A2CPolicy, PPOPolicy
from typing import Any, Dict, List, Optional, Type
from tianshou.data import Batch
# Normal key structure for output doesn't work with MLFlow logging nicely
# loss used as a key and also parent
class A2CPolicyforMLFlow(A2CPolicy):
def learn(
self, batch: Batch, batch_size: int, repeat: int, **kwargs: Any
) -> Dict[str, List[float]]:
loss_dict = super().learn(batch, batch_size, repeat, **kwargs)
output_loss_dict = {}
output_loss_dict["loss"] = loss_dict["loss"]
output_loss_dict["loss_component/actor"] = loss_dict["loss/actor"]
output_loss_dict["loss_component/vf"] = loss_dict["loss/vf"]
output_loss_dict["loss_component/ent"] = loss_dict["loss/ent"]
return output_loss_dict
class PPOPolicyforMLFlow(PPOPolicy):
def learn(
self, batch: Batch, batch_size: int, repeat: int, **kwargs: Any
) -> Dict[str, List[float]]:
loss_dict = super().learn(batch, batch_size, repeat, **kwargs)
output_loss_dict = {}
output_loss_dict["loss"] = loss_dict["loss"]
output_loss_dict["loss_component/clip"] = loss_dict["loss/clip"]
output_loss_dict["loss_component/vf"] = loss_dict["loss/vf"]
output_loss_dict["loss_component/ent"] = loss_dict["loss/ent"]
return output_loss_dict
``` |
{
"source": "joefarrington/MIMIC_Extract",
"score": 2
} |
#### File: joefarrington/MIMIC_Extract/mimic_direct_extract.py
```python
import numpy as np
import pandas as pd
from pandas import DataFrame
import psycopg2
from sklearn import metrics
from datetime import datetime
from datetime import timedelta
import sys
import os
import time
from os import environ
from os.path import isfile, isdir, splitext
import argparse
import cPickle
import numpy as np
import numpy.random as npr
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from datapackage_io_util import (
load_datapackage_schema,
load_sanitized_df_from_csv,
save_sanitized_df_to_csv,
sanitize_df,
)
# Output filenames
static_filename = 'static_data.csv'
static_columns_filename = 'static_colnames.txt'
dynamic_filename = 'vitals_hourly_data.csv'
columns_filename = 'vitals_colnames.txt'
subjects_filename = 'subjects.npy'
times_filename = 'fenceposts.npy'
dynamic_hd5_filename = 'vitals_hourly_data.h5'
dynamic_hd5_filt_filename = 'all_hourly_data.h5'
codes_filename = 'C.npy'
codes_hd5_filename = 'C.h5'
idx_hd5_filename = 'C_idx.h5'
outcome_filename = 'outcomes_hourly_data.csv'
outcome_hd5_filename = 'outcomes_hourly_data.h5'
outcome_columns_filename = 'outcomes_colnames.txt'
# SQL command params
dbname = 'mimic'
schema_name = 'mimiciii'
ID_COLS = ['subject_id', 'hadm_id', 'icustay_id']
ITEM_COLS = ['itemid', 'label', 'LEVEL1', 'LEVEL2']
def add_outcome_indicators(out_gb):
subject_id = out_gb['subject_id'].unique()[0]
hadm_id = out_gb['hadm_id'].unique()[0]
icustay_id = out_gb['icustay_id'].unique()[0]
max_hrs = out_gb['max_hours'].unique()[0]
on_hrs = set()
for index, row in out_gb.iterrows():
on_hrs.update(range(row['starttime'], row['endtime'] + 1))
off_hrs = set(range(max_hrs + 1)) - on_hrs
on_vals = [0]*len(off_hrs) + [1]*len(on_hrs)
hours = list(off_hrs) + list(on_hrs)
return pd.DataFrame({'subject_id': subject_id, 'hadm_id':hadm_id,
'hours_in':hours, 'on':on_vals}) #icustay_id': icustay_id})
def add_blank_indicators(out_gb):
subject_id = out_gb['subject_id'].unique()[0]
hadm_id = out_gb['hadm_id'].unique()[0]
#icustay_id = out_gb['icustay_id'].unique()[0]
max_hrs = out_gb['max_hours'].unique()[0]
hrs = range(max_hrs + 1)
vals = list([0]*len(hrs))
return pd.DataFrame({'subject_id': subject_id, 'hadm_id':hadm_id,
'hours_in':hrs, 'on':vals})#'icustay_id': icustay_id,
def get_values_by_name_from_df_column_or_index(data_df, colname):
""" Easily get values for named field, whether a column or an index
Returns
-------
values : 1D array
"""
try:
values = data_df[colname]
except KeyError as e:
if colname in data_df.index.names:
values = data_df.index.get_level_values(colname)
else:
raise e
return values
def continuous_outcome_processing(out_data, data, icustay_timediff):
"""
Args
----
out_data : pd.DataFrame
index=None
Contains subset of icustay_id corresp to specific sessions where outcome observed.
data : pd.DataFrame
index=icustay_id
Contains full population of static demographic data
Returns
-------
out_data : pd.DataFrame
"""
out_data['intime'] = out_data['icustay_id'].map(data['intime'].to_dict())
out_data['outtime'] = out_data['icustay_id'].map(data['outtime'].to_dict())
out_data['max_hours'] = out_data['icustay_id'].map(icustay_timediff)
out_data['starttime'] = out_data['starttime'] - out_data['intime']
out_data['starttime'] = out_data.starttime.apply(lambda x: x.days*24 + x.seconds//3600)
out_data['endtime'] = out_data['endtime'] - out_data['intime']
out_data['endtime'] = out_data.endtime.apply(lambda x: x.days*24 + x.seconds//3600)
out_data = out_data.groupby(['icustay_id'])
return out_data
#
def fill_missing_times(df_by_sid_hid_itemid):
max_hour = df_by_sid_hid_itemid.index.get_level_values(max_hours)[0]
missing_hours = list(set(range(max_hour+1)) - set(df_by_sid_hid_itemid['hours_in'].unique()))
# Add rows
sid = df_by_sid_hid_itemid.subject_id.unique()[0]
hid = df_by_sid_hid_itemid.hadm_id.unique()[0]
icustay_id = df_by_sid_hid_itemid.icustay_id.unique()[0]
itemid = df_by_sid_hid_itemid.itemid.unique()[0]
filler = pd.DataFrame({'subject_id':[sid]*len(missing_hours),
'hadm_id':[hid]*len(missing_hours),
'icustay_id':[icustay_id]*len(missing_hours),
'itemid':[itemid]*len(missing_hours),
'hours_in':missing_hours,
'value':[np.nan]*len(missing_hours),
'max_hours': [max_hour]*len(missing_hours)})
return pd.concat([df_by_sid_hid_itemid, filler], axis=0)
def save_pop(
data_df, outPath, static_filename, pop_size_int,
static_data_schema, host=None
):
# Connect to local postgres version of mimic
# Serialize to disk
csv_fpath = os.path.join(outPath, static_filename)
save_sanitized_df_to_csv(csv_fpath, data_df, static_data_schema)
"""
# Lower cost to doing this conversion and of serializing it afterwards
# (http://matthewrocklin.com/blog/work/2015/03/16/Fast-Serialization)
data['admission_type'] = data['admission_type'].astype('category')
data['gender'] = data['gender'].astype('category')
data['first_careunit'] = data['first_careunit'].astype('category')
data['ethnicity'] = data['ethnicity'].astype('category')
# Process the timestamps
data['intime'] = pd.to_datetime(data['intime']) #, format="%m/%d/%Y"))
data['outtime'] = pd.to_datetime(data['outtime'])
data['admittime'] = pd.to_datetime(data['admittime'])
data['dischtime'] = pd.to_datetime(data['dischtime'])
data['deathtime'] = pd.to_datetime(data['deathtime'])
# Serialize to disk
data.to_csv(os.path.join(outPath, static_filename))
"""
return data_df
# From Dave's approach!
def get_variable_mapping(mimic_mapping_filename):
# Read in the second level mapping of the itemids
var_map = DataFrame.from_csv(mimic_mapping_filename, index_col=None).fillna('').astype(str)
var_map.ITEMID = var_map.ITEMID.astype(int)
var_map.M_E_APPENDIXB = var_map.M_E_APPENDIXB.astype(int)
var_map = var_map.ix[(var_map['LEVEL2'] != '') & (var_map.COUNT>0)]
var_map = var_map.ix[(var_map.STATUS == 'ready')]
var_map = var_map.ix[(var_map.M_E_APPENDIXB == 1)]
return var_map
def get_variable_ranges(range_filename):
# Read in the second level mapping of the itemid, and take those values out
columns = [ 'LEVEL2', 'OUTLIER LOW', 'VALID LOW', 'IMPUTE', 'VALID HIGH', 'OUTLIER HIGH' ]
to_rename = dict(zip(columns, [ c.replace(' ', '_') for c in columns ]))
to_rename['LEVEL2'] = 'VARIABLE'
var_ranges = DataFrame.from_csv(range_filename, index_col=None)
var_ranges = var_ranges[columns]
var_ranges.rename_axis(to_rename, axis=1, inplace=True)
var_ranges = var_ranges.drop_duplicates(subset='VARIABLE', keep='first')
var_ranges['VARIABLE'] = map(str.lower, var_ranges['VARIABLE'])
var_ranges.set_index('VARIABLE', inplace=True)
var_ranges = var_ranges.ix[var_ranges.notnull().all(axis=1)]
return var_ranges
UNIT_CONVERSIONS = [
('weight', 'oz', None, lambda x: x/16.*0.45359237),
('weight', 'lbs', None, lambda x: x*0.45359237),
('fraction inspired oxygen', None, lambda x: x > 1, lambda x: x/100.),
('oxygen saturation', None, lambda x: x <= 1, lambda x: x*100.),
('temperature', 'f', lambda x: x > 79, lambda x: (x - 32) * 5./9),
('height', 'in', None, lambda x: x*2.54),
]
def standardize_units(X, name_col='itemid', unit_col='valueuom', value_col='value', inplace=True):
if not inplace: X = X.copy()
name_col_vals = get_values_by_name_from_df_column_or_index(X, name_col)
unit_col_vals = get_values_by_name_from_df_column_or_index(X, unit_col)
try:
name_col_vals = name_col_vals.str
unit_col_vals = unit_col_vals.str
except:
print("Can't call *.str")
print(name_col_vals)
print(unit_col_vals)
raise
#name_filter, unit_filter = [
# (lambda n: col.contains(n, case=False, na=False)) for col in (name_col_vals, unit_col_vals)
#]
# TODO(mmd): Why does the above not work, but the below does?
name_filter = lambda n: name_col_vals.contains(n, case=False, na=False)
unit_filter = lambda n: unit_col_vals.contains(n, case=False, na=False)
for name, unit, rng_check_fn, convert_fn in UNIT_CONVERSIONS:
name_filter_idx = name_filter(name)
needs_conversion_filter_idx = name_filter_idx & False
if unit is not None: needs_conversion_filter_idx |= name_filter(unit) | unit_filter(unit)
if rng_check_fn is not None: needs_conversion_filter_idx |= rng_check_fn(X[value_col])
idx = name_filter_idx & needs_conversion_filter_idx
X.loc[idx, value_col] = convert_fn(X[value_col][idx])
return X
def range_unnest(df, col, out_col_name=None, reset_index=False):
assert len(df.index.names) == 1, "Does not support multi-index."
if out_col_name is None: out_col_name = col
col_flat = pd.DataFrame(
[[i, x] for i, y in df[col].iteritems() for x in range(y+1)],
columns=[df.index.names[0], out_col_name]
)
if not reset_index: col_flat = col_flat.set_index(df.index.names[0])
return col_flat
# TODO(mmd): improve args
def save_numerics(
data, X, I, var_map, var_ranges, outPath, dynamic_filename, columns_filename, subjects_filename,
times_filename, dynamic_hd5_filename, group_by_level2, apply_var_limit, min_percent
):
assert len(data) > 0 and len(X) > 0, "Must provide some input data to process."
var_map = var_map[
['LEVEL2', 'ITEMID', 'LEVEL1']
].rename_axis(
{'LEVEL2': 'LEVEL2', 'LEVEL1': 'LEVEL1', 'ITEMID': 'itemid'}, axis=1
).set_index('itemid')
X['value'] = pd.to_numeric(X['value'], 'coerce')
X.astype({k: int for k in ID_COLS}, inplace=True)
to_hours = lambda x: max(0, x.days*24 + x.seconds // 3600)
X = X.set_index('icustay_id').join(data[['intime']])
X['hours_in'] = (X['charttime'] - X['intime']).apply(to_hours)
X.drop(columns=['charttime', 'intime'], inplace=True)
X.set_index('itemid', append=True, inplace=True)
# Pandas has a bug with the below for small X
#X = X.join([var_map, I]).set_index(['label', 'LEVEL1', 'LEVEL2'], append=True)
X = X.join(var_map).join(I).set_index(['label', 'LEVEL1', 'LEVEL2'], append=True)
standardize_units(X, name_col='LEVEL1', inplace=True)
if apply_var_limit > 0:
X = apply_variable_limits(X, var_ranges, 'LEVEL2')
group_item_cols = ['LEVEL2'] if group_by_level2 else ITEM_COLS
X = X.groupby(ID_COLS + group_item_cols + ['hours_in']).agg(['mean', 'std', 'count'])
X.columns = X.columns.droplevel(0)
X.columns.names = ['Aggregation Function']
data['max_hours'] = (data['outtime'] - data['intime']).apply(to_hours)
# TODO(mmd): Maybe can just create the index directly?
missing_hours_fill = range_unnest(data, 'max_hours', out_col_name='hours_in', reset_index=True)
missing_hours_fill['tmp'] = np.NaN
# TODO(mmd): The below is a bit wasteful.
#itemids = var_map.join(I['label']).reset_index()[group_item_cols].drop_duplicates()
#itemids['tmp'] = np.NaN
#missing_hours_fill = missing_hours_fill.merge(itemids, on='tmp', how='outer')
fill_df = data.reset_index()[ID_COLS].join(missing_hours_fill.set_index('icustay_id'), on='icustay_id')
fill_df.set_index(ID_COLS + ['hours_in'], inplace=True)
# Pivot table droups NaN columns so you lose any uniformly NaN.
X = X.unstack(level = group_item_cols)
X.columns = X.columns.reorder_levels(order=group_item_cols + ['Aggregation Function'])
#X = X.reset_index().pivot_table(index=ID_COLS + ['hours_in'], columns=group_item_cols, values=X.columns)
X = X.reindex(fill_df.index)
#X.columns = X.columns.droplevel(0).reorder_levels(order=[1, 0])
#if group_by_level2:
# X.columns.names = ['LEVEL2', 'Aggregation Function'] # Won't work with ungrouped!
#else:
# X.columns.names = ['itemid', 'Aggregation Function']
# X.columms = X.MultiIndex.from_frame(X[ITEM_COLS])
X = X.sort_index(axis=0).sort_index(axis=1)
print "Shape of X : ", X.shape
# Turn back into columns
if columns_filename is not None:
col_names = [str(x) for x in X.columns.values]
with open(os.path.join(outPath, columns_filename), 'w') as f: f.write('\n'.join(col_names))
# Get the max time for each of the subjects so we can reconstruct!
if subjects_filename is not None:
np.save(os.path.join(outPath, subjects_filename), data['subject_id'].as_matrix())
if times_filename is not None:
np.save(os.path.join(outPath, times_filename), data['max_hours'].as_matrix())
#fix nan in count to be zero
idx = pd.IndexSlice
if group_by_level2:
X.loc[:, idx[:, 'count']] = X.loc[:, idx[:, 'count']].fillna(0)
else:
X.loc[:, idx[:,:,:,:, 'count']] = X.loc[:, idx[:,:,:,:, 'count']].fillna(0)
# Drop columns that have very few recordings
n = round((1-min_percent/100.0)*X.shape[0])
drop_col = []
for k in X.columns:
if k[-1] == 'mean':
if X[k].isnull().sum() > n:
drop_col.append(k[:-1])
X = X.drop(columns = drop_col)
########
if dynamic_filename is not None: np.save(os.path.join(outPath, dynamic_filename), X.as_matrix())
if dynamic_hd5_filename is not None: X.to_hdf(os.path.join(outPath, dynamic_hd5_filename), 'X')
return X
def save_icd9_codes(data, codes, outPath, codes_filename, codes_h5_filename):
codes.set_index(ID_COLS, inplace=True)
codes.to_hdf(os.path.join(outPath, codes_h5_filename), 'C')
return codes
def save_outcome(
data, dbname, schema_name, outPath, outcome_filename, outcome_hd5_filename,
outcome_columns_filename, outcome_schema, host=None, password=None
):
""" Retrieve outcomes from DB and save to disk
Vent and vaso are both there already - so pull the start and stop times from there! :)
Returns
-------
Y : Pandas dataframe
Obeys the outcomes data spec
"""
icuids_to_keep = get_values_by_name_from_df_column_or_index(data, 'icustay_id')
icuids_to_keep = set([str(s) for s in icuids_to_keep])
# Add a new column called intime so that we can easily subtract it off
data = data.reset_index()
data = data.set_index('icustay_id')
data['intime'] = pd.to_datetime(data['intime']) #, format="%m/%d/%Y"))
data['outtime'] = pd.to_datetime(data['outtime'])
icustay_timediff_tmp = data['outtime'] - data['intime']
icustay_timediff = pd.Series([timediff.days*24 + timediff.seconds//3600
for timediff in icustay_timediff_tmp], index=data.index.values)
# Setup access to PSQL db
query_args = {'dbname': dbname}
if args['psql_host'] is not None: query_args['host'] = args['psql_host']
if args['psql_password'] is not None: query_args['password'] = args['psql_password']
con = psycopg2.connect(**query_args)
cur = con.cursor()
# Query on ventilation data
cur.execute('SET search_path to ' + schema_name)
query = """
select i.subject_id, i.hadm_id, v.icustay_id, v.ventnum, v.starttime, v.endtime
FROM icustay_detail i
INNER JOIN ventdurations v ON i.icustay_id = v.icustay_id
where v.icustay_id in ({icuids})
and v.starttime between intime and outtime
and v.endtime between intime and outtime;
""".format(icuids=','.join(icuids_to_keep))
vent_data = pd.read_sql_query(query, con)
vent_data = continuous_outcome_processing(vent_data, data, icustay_timediff)
vent_data = vent_data.apply(add_outcome_indicators)
vent_data.rename(columns = {'on':'vent'}, inplace=True)
vent_data = vent_data.reset_index()
# Get the patients without the intervention in there too so that we
ids_with = vent_data['icustay_id']
ids_with = set(map(int, ids_with))
ids_all = set(map(int, icuids_to_keep))
ids_without = (ids_all - ids_with)
#ids_without = map(int, ids_without)
# Create a new fake dataframe with blanks on all vent entries
out_data = data.copy(deep=True)
out_data = out_data.reset_index()
out_data = out_data.set_index('icustay_id')
out_data = out_data.iloc[out_data.index.isin(ids_without)]
out_data = out_data.reset_index()
out_data = out_data[['subject_id', 'hadm_id', 'icustay_id']]
out_data['max_hours'] = out_data['icustay_id'].map(icustay_timediff)
# Create all 0 column for vent
out_data = out_data.groupby('icustay_id')
out_data = out_data.apply(add_blank_indicators)
out_data.rename(columns = {'on':'vent'}, inplace=True)
out_data = out_data.reset_index()
# Concatenate all the data vertically
Y = pd.concat([vent_data[['subject_id', 'hadm_id', 'icustay_id', 'hours_in', 'vent']],
out_data[['subject_id', 'hadm_id', 'icustay_id', 'hours_in', 'vent']]],
axis=0)
# Start merging all other interventions
table_names = ['vasopressordurations', 'adenosinedurations', 'dobutaminedurations', 'dopaminedurations', 'epinephrinedurations', 'isupreldurations',
'milrinonedurations', 'norepinephrinedurations', 'phenylephrinedurations', 'vasopressindurations']
column_names = ['vaso', 'adenosine', 'dobutamine', 'dopamine', 'epinephrine', 'isuprel',
'milrinone', 'norepinephrine', 'phenylephrine', 'vasopressin']
# TODO(mmd): This section doesn't work. What is its purpose?
for t, c in zip(table_names, column_names):
# TOTAL VASOPRESSOR DATA
cur.execute('SET search_path to ' + schema_name)
query = """
select i.subject_id, i.hadm_id, v.icustay_id, v.vasonum, v.starttime, v.endtime
FROM icustay_detail i
INNER JOIN {table} v ON i.icustay_id = v.icustay_id
where v.icustay_id in ({icuids})
and v.starttime between intime and outtime
and v.endtime between intime and outtime;
""".format(icuids=','.join(icuids_to_keep), table=t)
new_data = pd.read_sql_query(query,con)
new_data = continuous_outcome_processing(new_data, data, icustay_timediff)
new_data = new_data.apply(add_outcome_indicators)
new_data.rename(columns = {'on':c}, inplace=True)
new_data = new_data.reset_index()
# c may not be in Y if we are only extracting a subset of the population, in which c was never
# performed.
if not c in new_data:
print "Column ", c, " not in data."
continue
Y = Y.merge(
new_data[['subject_id', 'hadm_id', 'icustay_id', 'hours_in', c]],
on=['subject_id', 'hadm_id', 'icustay_id', 'hours_in'],
how='left'
)
# Sort the values
Y.fillna(0, inplace=True)
Y[c] = Y[c].astype(int)
#Y = Y.sort_values(['subject_id', 'icustay_id', 'hours_in']) #.merge(df3,on='name')
Y = Y.reset_index(drop=True)
print 'Extracted ' + c + ' from ' + t
tasks=["colloid_bolus", "crystalloid_bolus", "nivdurations"]
for task in tasks:
cur.execute('SET search_path to ' + schema_name)
if task=='nivdurations':
query = """
select i.subject_id, i.hadm_id, v.icustay_id, v.starttime, v.endtime
FROM icustay_detail i
INNER JOIN {table} v ON i.icustay_id = v.icustay_id
where v.icustay_id in ({icuids})
and v.starttime between intime and outtime
and v.endtime between intime and outtime;
""".format(icuids=','.join(icuids_to_keep), table=task)
else:
query = """
select i.subject_id, i.hadm_id, v.icustay_id, v.charttime AS starttime,
v.charttime AS endtime
FROM icustay_detail i
INNER JOIN {table} v ON i.icustay_id = v.icustay_id
where v.icustay_id in ({icuids})
and v.charttime between intime and outtime
""".format(icuids=','.join(icuids_to_keep), table=task)
new_data = pd.read_sql_query(query, con=con)
if new_data.shape[0] == 0:
continue
new_data = continuous_outcome_processing(new_data, data, icustay_timediff)
new_data = new_data.apply(add_outcome_indicators)
new_data.rename(columns = {'on':task}, inplace=True)
new_data = new_data.reset_index()
new_data.to_csv('new_task.csv')
Y = Y.merge(
new_data[['subject_id', 'hadm_id', 'icustay_id', 'hours_in', task]],
on=['subject_id', 'hadm_id', 'icustay_id', 'hours_in'],
how='left'
)
# Sort the values
Y.fillna(0, inplace=True)
Y[task] = Y[task].astype(int)
Y = Y.reset_index(drop=True)
print 'Extracted ' + task
# TODO: ADD THE RBC/PLT/PLASMA DATA
# TODO: ADD DIALYSIS DATA
# TODO: ADD INFECTION DATA
cur.close()
con.close()
Y = Y.filter(items=['subject_id', 'hadm_id', 'icustay_id', 'hours_in', 'vent'] + column_names + tasks)
Y.subject_id = Y.subject_id.astype(int)
Y.icustay_id = Y.icustay_id.astype(int)
Y.hours_in = Y.hours_in.astype(int)
Y.vent = Y.vent.astype(int)
Y.vaso = Y.vaso.astype(int)
y_id_cols = ID_COLS + ['hours_in']
Y = Y.sort_values(y_id_cols)
Y.set_index(y_id_cols, inplace=True)
print 'Shape of Y : ', Y.shape
# SAVE AS NUMPY ARRAYS AND TEXT FILES
#np_Y = Y.as_matrix()
#np.save(os.path.join(outPath, outcome_filename), np_Y)
# Turn back into columns
df = Y.reset_index()
df = sanitize_df(df, outcome_schema)
csv_fpath = os.path.join(outPath, outcome_filename)
save_sanitized_df_to_csv(csv_fpath, df, outcome_schema)
col_names = list(df.columns.values)
col_names = col_names[3:]
with open(os.path.join(outPath, outcome_columns_filename), 'w') as f:
f.write('\n'.join(col_names))
# TODO(mmd): Why does df have the index? Is sanitize making multiindex?
# SAVE THE DATA AS A PANDAS OBJECT
# TODO(<NAME>): Why writing out Y after you've separately sanitized df?
Y.to_hdf(os.path.join(outPath, outcome_hd5_filename), 'Y')
return df
# Apply the variable limits to remove things
# TODO(mmd): controlled printing.
def apply_variable_limits(df, var_ranges, var_names_index_col='LEVEL2'):
idx_vals = df.index.get_level_values(var_names_index_col)
non_null_idx = ~df.value.isnull()
var_names = set(idx_vals)
var_range_names = set(var_ranges.index.values)
for var_name in var_names:
var_name_lower = var_name.lower()
if var_name_lower not in var_range_names:
print("No known ranges for %s" % var_name)
continue
outlier_low_val, outlier_high_val, valid_low_val, valid_high_val = [
var_ranges.loc[var_name_lower, x] for x in ('OUTLIER_LOW','OUTLIER_HIGH','VALID_LOW','VALID_HIGH')
]
running_idx = non_null_idx & (idx_vals == var_name)
outlier_low_idx = (df.value < outlier_low_val)
outlier_high_idx = (df.value > outlier_high_val)
valid_low_idx = ~outlier_low_idx & (df.value < valid_low_val)
valid_high_idx = ~outlier_high_idx & (df.value > valid_high_val)
var_outlier_idx = running_idx & (outlier_low_idx | outlier_high_idx)
var_valid_low_idx = running_idx & valid_low_idx
var_valid_high_idx = running_idx & valid_high_idx
df.loc[var_outlier_idx, 'value'] = np.nan
df.loc[var_valid_low_idx, 'value'] = valid_low_val
df.loc[var_valid_high_idx, 'value'] = valid_high_val
n_outlier = sum(var_outlier_idx)
n_valid_low = sum(var_valid_low_idx)
n_valid_high = sum(var_valid_high_idx)
if n_outlier + n_valid_low + n_valid_high > 0:
print(
"%s had %d / %d rows cleaned:\n"
" %d rows were strict outliers, set to np.nan\n"
" %d rows were low valid outliers, set to %.2f\n"
" %d rows were high valid outliers, set to %.2f\n"
"" % (
var_name,
n_outlier + n_valid_low + n_valid_high, sum(running_idx),
n_outlier, n_valid_low, valid_low_val, n_valid_high, valid_high_val
)
)
return df
def plot_variable_histograms(col_names, df):
# Plot some of the data, just to make sure it looks ok
for c, vals in df.iteritems():
n = vals.dropna().count()
if n < 2: continue
# get median, variance, skewness
med = vals.dropna().median()
var = vals.dropna().var()
skew = vals.dropna().skew()
# plot
fig = plt.figure(figsize=(13, 6))
plt.subplots(figsize=(13,6))
vals.dropna().plot.hist(bins=100, label='HIST (n={})'.format(n))
# fake plots for KS test, median, etc
plt.plot([], label=' ',color='lightgray')
plt.plot([], label='Median: {}'.format(format(med,'.2f')),
color='lightgray')
plt.plot([], label='Variance: {}'.format(format(var,'.2f')),
color='lightgray')
plt.plot([], label='Skew: {}'.format(format(skew,'.2f')),
color='light:gray')
# add title, labels etc.
plt.title('{} measurements in ICU '.format(str(c)))
plt.xlabel(str(c))
plt.legend(loc="upper left", bbox_to_anchor=(1,1),fontsize=12)
plt.xlim(0, vals.quantile(0.99))
fig.savefig(os.path.join(outPath, (str(c) + '_HIST_.png')), bbox_inches='tight')
# Main, where you can call what makes sense.
if __name__ == '__main__':
# Construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument('--out_path', type=str, default= '/scratch/{}/phys_acuity_modelling/data'.format(os.environ['USER']),
help='Enter the path you want the output')
ap.add_argument('--resource_path',
type=str,
default=os.path.expandvars("$MIMIC_EXTRACT_CODE_DIR/resources/"))
ap.add_argument('--extract_pop', type=int, default=1,
help='Whether or not to extract population data: 0 - no extraction, ' +
'1 - extract if not present in the data directory, 2 - extract even if there is data')
ap.add_argument('--extract_numerics', type=int, default=1,
help='Whether or not to extract numerics data: 0 - no extraction, ' +
'1 - extract if not present in the data directory, 2 - extract even if there is data')
ap.add_argument('--extract_outcomes', type=int, default=1,
help='Whether or not to extract outcome data: 0 - no extraction, ' +
'1 - extract if not present in the data directory, 2 - extract even if there is data')
ap.add_argument('--extract_codes', type=int, default=1,
help='Whether or not to extract ICD9 codes: 0 - no extraction, ' +
'1 - extract if not present in the data directory, 2 - extract even if there is data')
ap.add_argument('--pop_size', type=int, default=0,
help='Size of population to extract')
ap.add_argument('--exit_after_loading', type=int, default=0)
ap.add_argument('--var_limits', type=int, default=1,
help='Whether to create a version of the data with variable limits included. ' +
'1 - apply variable limits, 0 - do not apply variable limits')
ap.add_argument('--plot_hist', type=int, default=1,
help='Whether to plot the histograms of the data')
ap.add_argument('--psql_host', type=str, default=None,
help='Postgres host. Try "/var/run/postgresql/" for Unix domain socket errors.')
ap.add_argument('--psql_password', type=str, default=None, help='Postgres password.')
ap.add_argument('--group_by_level2', action='store_false', dest='group_by_level2', default=True,
help='Do group by level2.')
ap.add_argument('--min_percent', type=float, default=0.0,
help='Minimum percentage of row numbers need to be observations for each numeric column.' +
'min_percent = 1 means columns with more than 99 percent of nan will be removed')
ap.add_argument('--min_age', type=int, default=15,
help='Minimum age of patients to be included')
ap.add_argument('--min_duration', type=int, default=12,
help='Minimum hours of stay to be included')
ap.add_argument('--max_duration', type=int, default=240,
help='Maximum hours of stay to be included')
#############
# Parse args
args = vars(ap.parse_args())
for key in sorted(args.keys()):
print key, args[key]
if not isdir(args['resource_path']):
raise ValueError("Invalid resource_path: %s" % args['resource_path'])
mimic_mapping_filename = os.path.join(args['resource_path'], 'itemid_to_variable_map.csv')
range_filename = os.path.join(args['resource_path'], 'variable_ranges.csv')
# Load specs for output tables
static_data_schema = load_datapackage_schema(
os.path.join(args['resource_path'], 'static_data_spec.json'))
outcome_data_schema = load_datapackage_schema(
os.path.join(args['resource_path'], 'outcome_data_spec.json'))
if not isdir(args['out_path']):
print 'ERROR: OUTPATH %s DOES NOT EXIST' % args['out_path']
sys.exit()
else:
outPath = args['out_path']
# Modify the filenames
if args['pop_size'] > 0:
pop_size = str(args['pop_size'])
static_filename = splitext(static_filename)[0] + '_' + pop_size + splitext(static_filename)[1]
dynamic_filename = splitext(dynamic_filename)[0] + '_' + pop_size + splitext(dynamic_filename)[1]
#columns_filename = splitext(columns_filename)[0] + '_' + pop_size + splitext(columns_filename)[1]
subjects_filename = splitext(subjects_filename)[0] + '_' + pop_size + splitext(subjects_filename)[1]
times_filename = splitext(times_filename)[0] + '_' + pop_size + splitext(times_filename)[1]
dynamic_hd5_filename = splitext(dynamic_hd5_filename)[0] + '_' + pop_size + splitext(dynamic_hd5_filename)[1]
outcome_filename = splitext(outcome_filename)[0] + '_' + pop_size + splitext(outcome_filename)[1]
dynamic_hd5_filt_filename = splitext(dynamic_hd5_filt_filename)[0] + '_' + pop_size + splitext(dynamic_hd5_filt_filename)[1]
outcome_hd5_filename = splitext(outcome_hd5_filename)[0] + '_' + pop_size + splitext(outcome_hd5_filename)[1]
#outcome_columns_filename = splitext(outcome_columns_filename)[0] + '_' + pop_size + splitext(outcome_columns_filename)[1]
codes_filename = splitext(codes_filename)[0] + '_' + pop_size + splitext(codes_filename)[1]
codes_hd5_filename = splitext(codes_hd5_filename)[0] + '_' + pop_size + splitext(codes_hd5_filename)[1]
idx_hd5_filename = splitext(idx_hd5_filename)[0] + '_' + pop_size + splitext(idx_hd5_filename)[1]
query_args = {'dbname': dbname}
if args['psql_host'] is not None: query_args['host'] = args['psql_host']
if args['psql_password'] is not None: query_args['password'] = args['psql_password']
#############
# Population extraction
data = None
if (args['extract_pop'] == 0 | (args['extract_pop'] == 1) ) & isfile(os.path.join(outPath, static_filename)):
data = DataFrame.from_csv(os.path.join(outPath, static_filename))
data = sanitize_df(data, static_data_schema)
"""
data['admission_type'] = data['admission_type'].astype('category')
data['gender'] = data['gender'].astype('category')
data['first_careunit'] = data['first_careunit'].astype('category')
data['ethnicity'] = data['ethnicity'].astype('category')
data['intime'] = pd.to_datetime(data['intime']) #, format="%m/%d/%Y"))
data['outtime'] = pd.to_datetime(data['outtime'])
data['admittime'] = pd.to_datetime(data['admittime'])
data['dischtime'] = pd.to_datetime(data['dischtime'])
data['deathtime'] = pd.to_datetime(data['deathtime'])
"""
elif (args['extract_pop'] == 1 & (not isfile(os.path.join(outPath, static_filename)))) | (args['extract_pop'] == 2):
con = psycopg2.connect(**query_args)
cur = con.cursor()
pop_size_string = ''
if args['pop_size'] > 0:
pop_size_string = 'LIMIT ' + str(args['pop_size'])
min_age_string = str(args['min_age'])
min_dur_string = str(args['min_duration'])
max_dur_string = str(args['max_duration'])
min_day_string = str(float(args['min_duration'])/24)
cur.execute('SET search_path to ' + schema_name)
query = \
"""
select distinct i.subject_id, i.hadm_id, i.icustay_id,
i.gender, i.admission_age as age, a.insurance,
a.deathtime, i.ethnicity, i.admission_type, s.first_careunit,
CASE when a.deathtime between i.intime and i.outtime THEN 1 ELSE 0 END AS mort_icu,
CASE when a.deathtime between i.admittime and i.dischtime THEN 1 ELSE 0 END AS mort_hosp,
i.hospital_expire_flag,
i.hospstay_seq, i.los_icu,
i.admittime, i.dischtime,
i.intime, i.outtime
FROM icustay_detail i
INNER JOIN admissions a ON i.hadm_id = a.hadm_id
INNER JOIN icustays s ON i.icustay_id = s.icustay_id
WHERE s.first_careunit NOT like 'NICU'
and i.hadm_id is not null and i.icustay_id is not null
and i.hospstay_seq = 1
and i.icustay_seq = 1
and i.admission_age >= {min_age}
and i.los_icu >= {min_day}
and (i.outtime >= (i.intime + interval '{min_dur} hours'))
and (i.outtime <= (i.intime + interval '{max_dur} hours'))
ORDER BY subject_id
{limit}
;
""".format(limit=pop_size_string, min_age=min_age_string, min_dur=min_dur_string,
max_dur=max_dur_string, min_day=min_day_string)
data_df = pd.read_sql_query(query, con)
cur.close()
con.close()
data_df = sanitize_df(data_df, static_data_schema)
data = save_pop(data_df, outPath, static_filename, args['pop_size'], static_data_schema)
if data is None: print 'SKIPPED static_data'
else: print "loaded static_data"
#############
# If there is numerics extraction
X = None
if (args['extract_numerics'] == 0 | (args['extract_numerics'] == 1) ) & isfile(os.path.join(outPath, dynamic_hd5_filename)):
X = pd.read_hdf(os.path.join(outPath, dynamic_hd5_filename))
elif (args['extract_numerics'] == 1 & (not isfile(os.path.join(outPath, dynamic_hd5_filename)))) | (args['extract_numerics'] == 2):
print "Extracting vitals data..."
start_time = time.time()
########
# Step 1) Get the set of variables we want for the patients we've identified!
icuids_to_keep = get_values_by_name_from_df_column_or_index(data, 'icustay_id')
icuids_to_keep = set([str(s) for s in icuids_to_keep])
data = data.copy(deep=True).reset_index().set_index('icustay_id')
# Select out SID, TIME, ITEMID, VALUE form each of the sources!
var_map = get_variable_mapping(mimic_mapping_filename)
var_ranges = get_variable_ranges(range_filename)
chartitems_to_keep = var_map.loc[var_map['LINKSTO'] == 'chartevents'].ITEMID
chartitems_to_keep = set([ str(i) for i in chartitems_to_keep ])
# labitems_to_keep = var_map.loc[var_map['LINKSTO'] == 'labevents'].ITEMID
# labitems_to_keep = set([ str(i) for i in labitems_to_keep ])
con = psycopg2.connect(**query_args)
cur = con.cursor()
print " starting db query with %d subjects..." % (len(icuids_to_keep))
cur.execute('SET search_path to ' + schema_name)
query = \
"""
select c.subject_id, i.hadm_id, c.icustay_id, c.charttime, c.itemid, c.value, valueuom
FROM icustay_detail i
INNER JOIN chartevents c ON i.icustay_id = c.icustay_id
where c.icustay_id in ({icuids})
and c.itemid in ({chitem})
and c.charttime between intime and outtime
and c.error is distinct from 1
and c.valuenum is not null
;
""".format(icuids=','.join(icuids_to_keep), chitem=','.join(chartitems_to_keep)) #, lbitem=','.join(labitems_to_keep))
# UNION ALL
# select distinct i.subject_id, i.hadm_id, i.icustay_id, l.charttime, l.itemid, l.value, valueuom
# FROM icustay_detail i
# INNER JOIN labevents l ON i.hadm_id = l.hadm_id
# where i.icustay_id in ({icuids})
# and l.itemid in ({lbitem})
# and l.charttime between (intime - interval '6' hour) and outtime
# and l.valuenum > 0 -- lab values cannot be 0 and cannot be negative
X = pd.read_sql_query(query, con)
itemids = set(X.itemid.astype(str))
query_d_items = \
"""
SELECT itemid, label, dbsource, linksto, category, unitname
FROM d_items
WHERE itemid in ({itemids})
;
""".format(itemids=','.join(itemids))
I = pd.read_sql_query(query_d_items, con).set_index('itemid')
cur.close()
con.close()
print " db query finished after %.3f sec" % (time.time() - start_time)
#Save the raw data to check codes etc
X.to_hdf(os.path.join(outPath, dynamic_hd5_filt_filename), 'raw_chartevents')
X = save_numerics(
data, X, I, var_map, var_ranges, outPath, dynamic_filename, columns_filename, subjects_filename,
times_filename, dynamic_hd5_filename, group_by_level2=args['group_by_level2'], apply_var_limit=args['var_limits'],
min_percent=args['min_percent']
)
if X is None: print "SKIPPED vitals_hourly_data"
else: print "LOADED vitals_hourly_data"
#############
# If there is codes extraction
C = None
if ( (args['extract_codes'] == 0) or (args['extract_codes'] == 1) ) and isfile(os.path.join(outPath, codes_hd5_filename)):
C = pd.read_hdf(os.path.join(outPath, codes_hd5_filename))
elif ( (args['extract_codes'] == 1) and (not isfile(os.path.join(outPath, codes_hd5_filename))) ) or (args['extract_codes'] == 2):
hadm_ids_to_keep = get_values_by_name_from_df_column_or_index(data, 'hadm_id')
hadm_ids_to_keep = set([str(hadm_id) for hadm_id in hadm_ids_to_keep])
con = psycopg2.connect(**query_args)
cur = con.cursor()
cur.execute('SET search_path to ' + schema_name)
# TODO(mmd): skipping some icd9_codes means that position in the list of icd9_codes doesn't mean the same
# thing to everyrow.
query = \
"""
SELECT
i.icustay_id, d.subject_id, d.hadm_id,
array_agg(d.icd9_code ORDER BY seq_num ASC) AS icd9_codes
FROM mimiciii.diagnoses_icd d INNER JOIN icustays i
ON i.hadm_id = d.hadm_id AND i.subject_id = d.subject_id
WHERE d.hadm_id IN ({hadm_ids}) AND seq_num IS NOT NULL
GROUP BY i.icustay_id, d.subject_id, d.hadm_id
;
""".format(hadm_ids = ','.join(hadm_ids_to_keep))
codes = pd.read_sql_query(query,con)
cur.close()
con.close()
C = save_icd9_codes(data, codes, outPath, codes_filename, codes_hd5_filename)
if C is None: print "SKIPPED codes_data"
else: print "LOADED codes_data"
#############
# If there is outcome extraction
Y = None
if ( (args['extract_outcomes'] == 0) | (args['extract_outcomes'] == 1) ) & isfile(os.path.join(outPath, outcome_hd5_filename)):
Y = pd.read_hdf(os.path.join(outPath, outcome_hd5_filename))
elif ( (args['extract_outcomes'] == 1) & (not isfile(os.path.join(outPath, outcome_hd5_filename))) ) | (args['extract_outcomes'] == 2):
Y = save_outcome(
data, dbname, schema_name, outPath, outcome_filename, outcome_hd5_filename,
outcome_columns_filename, outcome_data_schema, host=args['psql_host'], password=args['<PASSWORD>']
)
print(X.shape, X.index.names, X.columns.names)
print(Y.shape, Y.index.names, Y.columns.names, Y.columns)
print(C.shape, C.index.names, C.columns.names)
print(data.shape, data.index.names, data.columns.names)
if args['exit_after_loading']:
sys.exit()
shared_idx = X.index
shared_sub = list(X.index.get_level_values('icustay_id').unique())
#X = X.loc[shared_idx]
# TODO(mmd): Why does this work?
Y = Y.loc[shared_idx]
# Problems start here.
C = C.loc[shared_idx]
data = data.loc[shared_sub]
data = data.reset_index().set_index(ID_COLS)
# Map the lowering function to all column names
X.columns = pd.MultiIndex.from_tuples(
[tuple((str(l).lower() for l in cols)) for cols in X.columns], names=X.columns.names
)
if args['group_by_level2']:
var_names = list(X.columns.get_level_values('LEVEL2'))
else:
var_names = list(X.columns.get_level_values('itemid'))
Y.columns = map(lambda x: str(x).lower(), Y.columns)
out_names = list(Y.columns.values[3:])
C.columns = map(str.lower, C.columns)
icd_names = list(C.columns.values[1:])
data.columns = map(lambda x: str(x).lower(), data.columns)
static_names = list(data.columns.values[3:])
print 'Shape of X : ', X.shape
print 'Shape of Y : ', Y.shape
print 'Shape of C : ', C.shape
print 'Shape of static : ', data.shape
print 'Variable names : ', ",".join(var_names)
print 'Output names : ', ",".join(out_names)
print 'Ic_dfD9 names : ', ",".join(icd_names)
print 'Static data : ', ",".join(static_names)
X.to_hdf(os.path.join(outPath, dynamic_hd5_filt_filename), 'vitals_labs')
Y.to_hdf(os.path.join(outPath, dynamic_hd5_filt_filename), 'interventions')
C.to_hdf(os.path.join(outPath, dynamic_hd5_filt_filename), 'c_df')
data.to_hdf(os.path.join(outPath, dynamic_hd5_filt_filename), 'patients', format='table')
#fencepost.to_hdf(os.path.join(outPath, dynamic_hd5_filt_filename), 'fencepost')
#############
#X.to_hdf(os.path.join(outPath, dynamic_hd5_filt_filename), 'X')
#print 'FINISHED VAR LIMITS'
X_mean = X.iloc[:, X.columns.get_level_values(-1)=='mean']
X_mean.to_hdf(os.path.join(outPath, dynamic_hd5_filt_filename), 'vitals_labs_mean')
#TODO: Log the variables that are in 0-1 space, like
#to_log = ['fio2', 'glucose']
#for feat in to_log:
# X[feat] = x[feat].apply(np.log)
#############
# Plot the histograms
if args['plot_hist'] == 1:
plot_variable_histograms(var_names, X)
#############
# Print the total proportions!
rows, vars = X.shape
print ''
for l, vals in X.iteritems():
ratio = 1.0 * vals.dropna().count() / rows
print str(l) + ': ' + str(round(ratio, 3)*100) + '% present'
#############
# Print the per subject proportions!
df = X.groupby(['subject_id']).count()
for k in [1, 2, 3]:
print '% of subjects had at least ' + str(k) + ' present'
d = df > k
d = d.sum(axis=0)
d = d / len(df)
d = d.reset_index()
for index, row in d.iterrows():
print str(index) + ': ' + str(round(row[0], 3)*100) + '%'
print '\n'
print 'Done!'
``` |
{
"source": "Joefdez/gravitaionalLensing1",
"score": 3
} |
#### File: Joefdez/gravitaionalLensing1/source_generators.py
```python
import numpy as np
def square_source(rr, xsize, ysize, lum=0):
source = np.zeros([xsize, ysize])
xhalf,yhalf = xsize/2., ysize/2.
for ii in range(xsize):
for jj in range(ysize):
if abs(xhalf - ii)<=rr and abs(yhalf - jj)<=rr:
source[ii,jj] = lum
return source
def circular_source(rr, xsize, ysize, lum=0):
source = np.zeros([xsize, ysize])
r2=r**2 #Calculate square of radius of source
xhalf,yhalf=xsize/2.,ysize/2. #Calculate coordinates of center
for i in range(xsize): #Scan through frame to find points which are to be included in source
for j in range(ysize):
if (xhalf-i)**2+(yhalf-j)**2<r2:
source[i,j]=1. #Change zeros to ones in source
return source #Return desired source
def discs_source(rr, xsize, ysize, lum=0):
source = np.zeros([xsize, ysize])
space = rr/ndiscs
rrs = np.arange( space, (rr + rr/ndiscs) , rr/ndiscs)
rr2 = rrs*rrs
lums = np.zeros(ndiscs)
for ii in range(ndiscs):
lums[ii]= 2**ii
xhalf,yhalf=xsize/2.,ysize/2. #Calculate cooridinates of center
for i in range(xsize): #Scan through frame to find points to be included in source
for j in range(ysize):
if (xhalf-i)**2+(yhalf-j)**2<=rr2[0]:
source[i,j]=64.
elif (xhalf-i)**2+(yhalf-j)**2>rr2[0] and (xhalf-i)**2+(yhalf-j)**2<=rr2[1]:
source[i,j]=32.
elif (xhalf-i)**2+(yhalf-j)**2>rr2[1] and (xhalf-i)**2+(yhalf-j)**2<=rr2[2]:
source[i,j]=8.
elif (xhalf-i)**2+(yhalf-j)**2>rr2[2] and (xhalf-i)**2+(yhalf-j)**2<=rr2[3]:
source[i,j]=2.
elif (xhalf-i)**2+(yhalf-j)**2>rr2[3] and (xhalf-i)**2+(yhalf-j)**2<=rr2[4]:
source[i,j]=1.
return source
```
#### File: Joefdez/gravitaionalLensing1/sources_oop.py
```python
from source_generators import * # includes numpy import, np
import matplotlib.pylab as plt
class modelSource ():
'Source class to represent objects to be lensed'
def __init__(self, name, stype, side, radius=0.0, aspectRatio = 1.0, maxLum = 1.0):
""" Constructor method """
self.name = name
self.type = stype
self.aspectRatio = aspectRatio
self.maxLum = maxLum
if aspectRatio == 1.0:
self.xsize, self.ysize = side, side
else:
self.xsize, self.ysize = side, side*aspectRatio
self.radius = radius
if stype == "square":
self.view = square_source( radius, self.xsize, self.ysize, maxLum )
elif stype == "circular":
self.view = circular_source( radius, self.xsize, self.ysize)
elif stype == "discs":
self.view = discs_source( radius, self.xsize, self.ysize)
self.lensedView = None
print "Source array " + self.name + " generated."
def plotSource(self):
""" Plot the source """
plt.figure('lens') #Declare figure
ax1=plt.axes() #Declare axis
ax1.xaxis.set_ticklabels([]) #Remove ticks
ax1.yaxis.set_ticklabels([])
#plt.figtext(-2.5, -2.5, pn)
#plt.title(pn,loc='center')
plt.imshow(self.view)
class imageSource():
Class for handling actual images as sources
def __init__(self, file ):
#Remember to open and close properly
``` |
{
"source": "Joefdez/joelib",
"score": 2
} |
#### File: joelib/physics/afterglow_dynamics.py
```python
from numpy import *
from joelib.constants.constants import cc, mp
from joelib.physics.afterglow_properties import normT, adabatic_index
def sound_speed(gam, beta): # Sound speed prescription following LMR18
TTs = normT(gam, beta)
ada = adabatic_index(TTs)
return cc*(ada*(ada-1.)*(gam-1.)/(1+ ada*(gam-1.)))**(1./2.)
def dthetadr(gamma, RR, theta, nn, aa):
return 1./(RR*gamma**(1.+aa)*theta**(aa))
def dmdr(gamma, RR, thetaE, theta, nn, aa):
t1 = (1./3.)*RR**2.*sin(theta)/(gamma**(1.+aa)*theta**(aa)) # First term: change in swept-up mass due to the change in solid angle
t2 = (cos(thetaE)-cos(theta))*RR**2. # Second term: change in swept-up mass due to radial expansion
return 2.*pi*nn*mp*(t1+t2)
def dgdm(M0, gam, mm):
beta = sqrt(1-1./gam**2.)
TT = normT(gam, beta)
ada = adabatic_index(TT)
#numerator = -4.*pi*jet.nn*mp*rr**2. * ( ada*(gam**2.-1)-(ada-1)*gam*beta**2 )
#denominator = jet.EE/(jet.Gam0*cc**2.) + 4./3.*pi*jet.nn*mp*rr**3.*(2.*ada*gam-(ada-1)*(1.+gam**(-2)))
numerator = -10.**mm*log(10)*( ada*(gam**2.-1)-(ada-1)*gam*beta**2 )
denominator = M0 + 10.**mm*(2.*ada*gam-(ada-1)*(1.+gam**(-2)))
#print denominator
return numerator/denominator
# Functions from G&P 2012
def dMdr_GP12(Gamma, mm, RR, theta, nn):
return 2.*pi*(theta*RR)**2.*mp*nn
def dgdR_GP12(Gamma, mm, RR, theta, nn):
dmdr = dMdr_GP12(Gamma, mm, RR, theta, nn)
return -1.*Gamma/(2.*mm) * dmdr
##########################################################################################################################################################################################
############################################################################### DYNAMICS ACCORDING TO ASAF PE'ER #########################################################################
def solver_collimated_shell(M0, gamma0, angExt0, RRs, nn, steps):
"""
Solver for dynamics of relativistic, collimated shell.
"""
gammas, TTs, = zeros(steps), zeros(steps)
#print gamma0
gammas[0] = gamma0
MMs = angExt0/3. * nn * mp * RRs**3.
for ii in range(1, steps):
# First, calculate the evolution of Gamma, theta, and swept up mass
delM = log10(MMs[ii]) - log10(MMs[ii-1])
mms = MMs[ii-1]
gamma = gammas[ii-1]
k1_gamma = delM*dgdm(M0, gamma, log10(mms))
k2_gamma = delM*dgdm(M0, gamma + 0.5*k1_gamma, log10(mms)+0.5*delM)
k3_gamma = delM*dgdm(M0, gamma + 0.5*k2_gamma, log10(mms)+0.5*delM)
k4_gamma = delM*dgdm(M0, gamma + k3_gamma, log10(mms)+delM)
#print k1_gamma, k2_gamma, k3_gamma, k4_gamma
gammas[ii] = gamma + (1./6.) * (k1_gamma + 2 * k2_gamma + 2 * k3_gamma + k4_gamma)
# Next calculate the on-axis time for a distant observer
betas = sqrt(1.-gammas**(-2.))
integrand = 1./(cc*gammas**2.*betas*(1.+betas))
TTs[0] = RRs[0]/(cc*gammas[0]**2.*betas[0]*(1.+betas[0]))
for ii in range(1,steps):
TTs[ii] = trapz(integrand[0:ii+1], RRs[0:ii+1]) + TTs[0]
return gammas, betas, MMs, TTs
def solver_expanding_shell(M0, gamma0, thetaE, theta0, RRs, nn, aa, steps, angExt0, cells, Rd, withSpread=True):
#Solver for dynamics of laterally expaning shell.
# First evolve swept up mass and theta, at fixed Lorentz factor
#print shape(gamma0), shape(theta0)
gammas, thetas, MMs, TTs, angExts = zeros(steps), zeros(steps), zeros(steps), zeros(steps), zeros(steps)
gammas[0], thetas[0], angExts[0] = gamma0, theta0, angExt0
MMs[0] = angExts[0]/3. * nn * mp * RRs[0]**3.
for ii in range(1, steps):
# First, calculate the evolution of Gamma, theta, and swept up mass
RRn, RR = RRs[ii], RRs[ii-1]
delR = RRn-RR
theta, mms = thetas[ii-1], MMs[ii-1]
gamma = gammas[ii-1]
if (theta<pi) and (RR>Rd):
k1_theta = delR*dthetadr(gamma, RR, theta, nn, aa)
k1_mms = delR*dmdr(gamma, RR, thetaE, theta, nn, aa)/cells
k2_theta = delR*dthetadr(gamma, RR+0.5*delR, theta + 0.5*k1_theta, nn, aa)
k2_mms = delR*dmdr(gamma, RR+0.5*delR, thetaE, theta + 0.5*k1_theta, nn, aa)/cells
k3_theta = delR*dthetadr(gamma, RR+0.5*delR, theta + 0.5*k2_theta, nn, aa)
k3_mms = delR*dmdr(gamma, RR+0.5*delR, thetaE, theta + 0.5*k2_theta, nn, aa)/cells
k4_theta = delR*dthetadr(gamma, RR + delR, theta + k3_theta, nn, aa)
k4_mms = delR*dmdr(gamma, RR+delR, thetaE, theta + k3_theta, nn, aa)/cells
thetas[ii] = theta + (1./6.) * (k1_theta + 2 * k2_theta + 2 * k3_theta + k4_theta)
MMs[ii] = mms + (1./6.) * (k1_mms + 2 * k2_mms + 2 * k3_mms + k4_mms)
else: # If the shell is already spherical, stop considering lateral expansion in the swept-up mass
MMs[ii] = mms + 2.*pi*(cos(thetaE)-cos(theta))*RR**2.*delR*nn*mp/cells
thetas[ii] = theta
delM = log10(MMs[ii]) - log10(mms)
k1_gamma = delM*dgdm(M0, gamma, log10(mms))
k2_gamma = delM*dgdm(M0, gamma + 0.5*k1_gamma, log10(mms)+0.5*delM)
k3_gamma = delM*dgdm(M0, gamma + 0.5*k2_gamma, log10(mms)+0.5*delM)
k4_gamma = delM*dgdm(M0, gamma + k3_gamma, log10(mms)+delM)
#print k1_gamma, k2_gamma, k3_gamma, k4_gamma
gammas[ii] = gamma + (1./6.) * (k1_gamma + 2 * k2_gamma + 2 * k3_gamma + k4_gamma)
# Next calculate the on-axis time for a distant observer
betas = sqrt(1.-gammas**(-2.))
if withSpread:
betas = sqrt(1.-gammas**(-2.))
dThetadr = concatenate([zeros(1), diff(thetas)/diff(RRs)])
dR = concatenate([zeros(1), diff(RRs)])
integrand = 1./(cc*betas) * sqrt(1.+RRs**2.*dThetadr**2.) - 1./(cc)
TTs[0] = RRs[0]/(cc*betas[0])* (sqrt(1.+RRs[0]**2.*dThetadr[0]**2.)) - RRs[0]/cc
else:
integrand = 1./(cc*gammas**2.*betas*(1.+betas))
TTs[0] = RRs[0]/(cc*gammas[0]**2.*betas[0]*(1.+betas[0]))
for ii in range(1,steps):
TTs[ii] = trapz(integrand[0:ii+1], RRs[0:ii+1]) + TTs[0]
# Just to finish off, calculate the solid angle extent of each ring
angExts = 2.*pi*(1.-cos(thetas))/cells
return gammas, betas, thetas, MMs, TTs, angExts # Return 2*thetas, because theta is half the opening angle
##########################################################################################################################################################################################
############################################################################### DYNAMICS LIKE BM #########################################################################
def BMsolver_collimated_shell(M0, gamma0, angExt0, RRs, nn, steps):
"""
Solver for dynamics of relativistic, collimated shell.
Lateral expansion implemented as in GP12 or LMR18
"""
gammas, TTs, = zeros(steps), zeros(steps)
#print gamma0
gammas[0] = gamma0
MMs = angExt0/3. * nn * mp * RRs**3.
for ii in range(1, steps):
# First, calculate the evolution of Gamma, theta, and swept up mass
delM = log10(MMs[ii]) - log10(MMs[ii-1])
mms = MMs[ii-1]
gamma = gammas[ii-1]
k1_gamma = delM*dgdm(M0, gamma, log10(mms))
k2_gamma = delM*dgdm(M0, gamma + 0.5*k1_gamma, log10(mms)+0.5*delM)
k3_gamma = delM*dgdm(M0, gamma + 0.5*k2_gamma, log10(mms)+0.5*delM)
k4_gamma = delM*dgdm(M0, gamma + k3_gamma, log10(mms)+delM)
#print k1_gamma, k2_gamma, k3_gamma, k4_gamma
gammas[ii] = gamma + (1./6.) * (k1_gamma + 2 * k2_gamma + 2 * k3_gamma + k4_gamma)
# Next calculate the on-axis time for a distant observer
betas = sqrt(1.-gammas**(-2.))
integrand = 1./(cc*gammas**2.*betas*(1.+betas))
TTs[0] = RRs[0]/(cc*gammas[0]**2.*betas[0]*(1.+betas[0]))
for ii in range(1,steps):
TTs[ii] = trapz(integrand[0:ii+1], RRs[0:ii+1]) + TTs[0]
return gammas, betas, MMs, TTs
def BMsolver_expanding_shell(M0, gamma0, thetaE, theta0, RRs, nn, aa, steps, angExt0, cells):
#Solver for dynamics of laterally expaning shell.
#Lateral expansion implemented as in GP12 or LMR18
# First evolve swept up mass and theta, at fixed Lorentz factor
#print shape(gamma0), shape(theta0)
gammas, thetas, MMs, TTs, angExts = zeros(steps), zeros(steps), zeros(steps), zeros(steps), zeros(steps)
gammas[0], thetas[0], angExts[0] = gamma0, theta0, angExt0
MMs[0] = angExts[0]/3. * nn * mp * RRs[0]**3.
for ii in range(1, steps):
# First, calculate the evolution of Gamma, theta, and swept up mass
RRn, RR = RRs[ii], RRs[ii-1]
delR = RRn-RR
theta, mms = thetas[ii-1], MMs[ii-1]
gamma = gammas[ii-1]
if (theta<pi): #and (gamma<=4.):
k1_theta = delR*dthetadr(gamma, RR, theta, nn, aa)
k1_mms = delR*dmdr(gamma, RR, thetaE, theta, nn, aa)/cells
k2_theta = delR*dthetadr(gamma, RR+0.5*delR, theta + 0.5*k1_theta, nn, aa)
k2_mms = delR*dmdr(gamma, RR+0.5*delR, thetaE, theta + 0.5*k1_theta, nn, aa)/cells
k3_theta = delR*dthetadr(gamma, RR+0.5*delR, theta + 0.5*k2_theta, nn, aa)
k3_mms = delR*dmdr(gamma, RR+0.5*delR, thetaE, theta + 0.5*k2_theta, nn, aa)/cells
k4_theta = delR*dthetadr(gamma, RR + delR, theta + k3_theta, nn, aa)
k4_mms = delR*dmdr(gamma, RR+delR, thetaE, theta + k3_theta, nn, aa)/cells
thetas[ii] = theta + (1./6.) * (k1_theta + 2 * k2_theta + 2 * k3_theta + k4_theta)
MMs[ii] = mms + (1./6.) * (k1_mms + 2 * k2_mms + 2 * k3_mms + k4_mms)
else: # If the shell is already spherical, stop considering lateral expansion in the swept-up mass
MMs[ii] = mms + 2.*pi*(cos(thetaE)-cos(theta))*RR**2.*delR*nn*mp/cells
thetas[ii] = theta
delM = log10(MMs[ii]) - log10(mms)
k1_gamma = delM*dgdm(M0, gamma, log10(mms))
k2_gamma = delM*dgdm(M0, gamma + 0.5*k1_gamma, log10(mms)+0.5*delM)
k3_gamma = delM*dgdm(M0, gamma + 0.5*k2_gamma, log10(mms)+0.5*delM)
k4_gamma = delM*dgdm(M0, gamma + k3_gamma, log10(mms)+delM)
#print k1_gamma, k2_gamma, k3_gamma, k4_gamma
gammas[ii] = gamma + (1./6.) * (k1_gamma + 2 * k2_gamma + 2 * k3_gamma + k4_gamma)
# Next calculate the on-axis time for a distant observer
"""
betas = sqrt(1.-gammas**(-2.))
integrand = 1./(cc*gammas**2.*betas*(1.+betas))
TTs[0] = RRs[0]/(cc*gammas[0]**2.*betas[0]*(1.+betas[0]))
#dtdr = dthetadr(gammas, RRs, thetas, nn, aa)
#TTs[0] = RRs[0]*sqrt(1.+ (RRs[0]*dtdr[0])**2)/(cc*betas[0]) - RRs[0]/cc
#integrand = sqrt(1.+ (RRs*dtdr)**2)/(cc*betas)
for ii in range(1,steps):
TTs[ii] = trapz(integrand[0:ii+1], RRs[0:ii+1]) + TTs[0]
"""
betas = sqrt(1.-gammas**(-2.))
dThetadr = concatenate([zeros(1), diff(thetas)/diff(RRs)])
dR = concatenate([zeros(1), diff(RRs)])
#integrand1 = 1./(cc*Gams**2.*Betas*(1.+Betas))
integrand = 1./(cc*betas) * sqrt(1.+RRs**2.*dThetadr**2.) - 1./(cc)
#TTs_ne[0] = RRs[0]/(cc*Gams[0]**2.*Betas[0]*(1.+Betas[0]))
TTs[0] = RRs[0]/(cc*betas[0])* (sqrt(1.+RRs[0]**2.*dThetadr[0]**2.)) - RRs[0]/cc
for ii in range(1,steps):
#TTs_ne[ii] = trapz(integrand1[0:ii+1], RRs[0:ii+1]) + TTs_ne[0]
TTs[ii] = trapz(integrand[0:ii+1], RRs[0:ii+1]) + TTs[0]
#TTs = TTs - RRs/cc
# Just to finish off, calculate the solid angle extent of each ring
angExts = 2.*pi*(1.-cos(thetas))/cells
return gammas, betas, thetas, MMs, TTs, angExts # Return 2*thetas, because theta is half the opening angle
############################################################################### DYNAMICS LIKE BM #########################################################################
##########################################################################################################################################################################################
"""
def solver_expanding_shell(M0, gamma0, thetaE, theta0, initJoAngle, RRs, nn, aa, steps, angExt0, cells):
Solver for dynamics of laterally expaning shell.
Lateral expansion implemented as in GP12 or LMR18
# First evolve swept up mass and theta, at fixed Lorentz factor
#print shape(gamma0), shape(theta0)
gammas, thetas, MMs, TTs, angExts = zeros(steps), zeros(steps), zeros(steps), zeros(steps), zeros(steps)
thetasOut = zeros(steps)
gammas[0], thetas[0], thetasOut[0], angExts[0] = gamma0, initJoAngle, theta0, angExt0
MMs[0] = angExts[0]/3. * nn * mp * RRs[0]**3.
print(cos(thetaE)- cos(thetasOut[0]))/cells
for ii in range(1, steps):
# First, calculate the evolution of Gamma, theta, and swept up mass
RRn, RR = RRs[ii], RRs[ii-1]
delR = RRn-RR
theta, mms = thetas[ii-1], MMs[ii-1]
thetaOut = thetasOut[ii-1] + (theta-thetas[0])
gamma = gammas[ii-1]
print(thetaE, thetasOut[ii], theta, MMs[ii-1], MMs[ii])
if theta<pi:
k1_theta = delR*dthetadr(gamma, RR, theta, nn, aa)
k1_mms = delR*dmdr(gamma, RR, thetaE, thetaOut, nn, aa)/cells
k2_theta = delR*dthetadr(gamma, RR+0.5*delR, theta + 0.5*k1_theta, nn, aa)
k2_mms = delR*dmdr(gamma, RR+0.5*delR, thetaE, thetaOut + 0.5*k1_theta, nn, aa)/cells
k3_theta = delR*dthetadr(gamma, RR+0.5*delR, theta + 0.5*k2_theta, nn, aa)
k3_mms = delR*dmdr(gamma, RR+0.5*delR, thetaE, thetaOut + 0.5*k2_theta, nn, aa)/cells
k4_theta = delR*dthetadr(gamma, RR + delR, theta + k3_theta, nn, aa)
k4_mms = delR*dmdr(gamma, RR+delR, thetaE, thetaOut + k3_theta, nn, aa)/cells
thetas[ii] = theta + (1./6.) * (k1_theta + 2 * k2_theta + 2 * k3_theta + k4_theta)
#thetasOut[ii] = thetaOut + (1./6.) * (k1_theta + 2 * k2_theta + 2 * k3_theta + k4_theta)
MMs[ii] = mms + (1./6.) * (k1_mms + 2 * k2_mms + 2 * k3_mms + k4_mms)
else: # If the shell is already spherical, stop considering lateral expansion in the swept-up mass
MMs[ii] = mms + 2.*pi*(cos(thetaE)-cos(thetaOut))*RR**2.*delR*nn*mp/cells
thetas[ii] = theta
delM = log10(MMs[ii]) - log10(mms)
k1_gamma = delM*dgdm(M0, gamma, log10(mms))
k2_gamma = delM*dgdm(M0, gamma + 0.5*k1_gamma, log10(mms)+0.5*delM)
k3_gamma = delM*dgdm(M0, gamma + 0.5*k2_gamma, log10(mms)+0.5*delM)
k4_gamma = delM*dgdm(M0, gamma + k3_gamma, log10(mms)+delM)
#print k1_gamma, k2_gamma, k3_gamma, k4_gamma
gammas[ii] = gamma + (1./6.) * (k1_gamma + 2 * k2_gamma + 2 * k3_gamma + k4_gamma)
# Next calculate the on-axis time for a distant observer
betas = sqrt(1.-gammas**(-2.))
integrand = 1./(cc*gammas**2.*betas*(1.+betas))
TTs[0] = RRs[0]/(cc*gammas[0]**2.*betas[0]*(1.+betas[0]))
#dtdr = dthetadr(gammas, RRs, thetas, nn, aa)
#TTs[0] = RRs[0]*sqrt(1.+ (RRs[0]*dtdr[0])**2)/(cc*betas[0]) - RRs[0]/cc
#integrand = sqrt(1.+ (RRs*dtdr)**2)/(cc*betas)
for ii in range(1,steps):
TTs[ii] = trapz(integrand[0:ii+1], RRs[0:ii+1]) + TTs[0]
#TTs = TTs - RRs/cc
# Just to finish off, calculate the solid angle extent of each ring
angExts = 2.*pi*(1.-cos(thetas))/cells
return gammas, betas, thetas, MMs, TTs, angExts # Return 2*thetas, because theta is half the opening angle
"""
def obsTime_onAxis_integrated(RRs, Gams, Betas):
"""
Very crude numerical integration to obtain the on-axis observer time
"""
TTs = zeros(len(Betas))
integrand = 1./(cc*Gams**2.*Betas*(1.+Betas))
TTs[0] = RRs[0]/(cc*Gams[0]**2.*Betas[0]*(1.+Betas[0]))
for ii in range(1,len(Betas)):
TTs[ii] = trapz(integrand[0:ii+1], RRs[0:ii+1]) + TTs[0]
return TTs
def obsTime_onAxis_LE_integrated(RRs, thetas, Gams, Betas):
"""
Very crude numerical integration to obtain the on-axis observer time
accounting for lateral expansion
"""
TTs_ee = zeros(len(Betas))
dthetadr = concatenate([zeros(1), diff(thetas)/diff(RRs)])
dR = concatenate([zeros(1), diff(RRs)])
#integrand1 = 1./(cc*Gams**2.*Betas*(1.+Betas))
integrand2 = 1./(cc*Betas) * sqrt(1.+RRs**2.*dthetadr**2.) - 1./(cc)
#TTs_ne[0] = RRs[0]/(cc*Gams[0]**2.*Betas[0]*(1.+Betas[0]))
TTs_ee[0] = RRs[0]/(cc*Betas[0])* (sqrt(1.+RRs[0]**2.*dthetadr[0]**2.)) - RRs[0]/cc
for ii in range(1,len(Betas)):
#TTs_ne[ii] = trapz(integrand1[0:ii+1], RRs[0:ii+1]) + TTs_ne[0]
TTs_ee[ii] = trapz(integrand2[0:ii+1], RRs[0:ii+1]) + TTs_ee[0]
return TTs_ee #TTs_ne, TTs_ee
def obsTime_offAxis_General_NEXP(RR, TT, theta):
return TT + RR/cc * (1.-cos(theta))
def obsTime_offAxis_General_EXP(RRs, TTs, costhetas):
delTTs = zeros(len(RRs))
delTTs[0] = RRs[0] * (1.-costhetas[0])
for ii in range(1, len(RRs)):
delTTs[ii] = trapz((1.-costhetas[0:ii+1]), RRs[0:ii+1]) + delTTs[0]
return TTs + delTTs/cc
```
#### File: joelib/physics/grb_observables.py
```python
from numpy import *
from joelib.constants.constants import cc, mp, me, qe, sigT, sTd
from tqdm import *
from joelib.physics.afterglow_dynamics import obsTime_offAxis_General_NEXP, obsTime_offAxis_General_EXP
from joelib.physics.afterglow_properties import *
from scipy.interpolate import interp1d
from scipy.interpolate import griddata as gdd
from matplotlib.pylab import *
from scipy.ndimage import gaussian_filter
# Utilities and functions for calculating GRB observables, such as light curves for different components of the emission and synthetic images ##########
def obsangle(thetas, phis, alpha_obs):
"""
Return the cosine of the observer angle for the different shockwave segments and and
and observer at and angle alpha_obs with respect to the jet axis
(contained in yz plane)
"""
#u_obs_x, u_obs_y, u_obs_z = 0., sin(alpha_obs), cos(alpha_obs)
u_obs_y, u_obs_z = sin(alpha_obs), cos(alpha_obs)
#seg_x =
seg_y = sin(thetas)*sin(phis)
seg_z = cos(thetas)
#return arccos(u_obs_x*seg_x + u_obs_y*seg_y + u_obs_z*seg_z)
return u_obs_y*seg_y + u_obs_z*seg_z
def obsangle_cj(thetas, phis, alpha_obs):
"""
Return the cosine of the observer angle for the different shockwave
segments in the counter jet and observer at an angle alpha_obs with respect to the jet axis
(contained in yz plane)
"""
#u_obs_x, u_obs_y, u_obs_z = 0., sin(alpha_obs), cos(alpha_obs)
u_obs_y, u_obs_z = sin(alpha_obs), cos(alpha_obs)
#seg_x =
seg_y = sin(pi-thetas)*sin(phis)
seg_z = cos(pi-thetas)
#return arccos(u_obs_x*seg_x + u_obs_y*seg_y + u_obs_z*seg_z)
return u_obs_y*seg_y + u_obs_z*seg_z
def dopplerFactor(cosa, beta):
"""
Calculate the doppler factors of the different jethead segments
cosa -> cosine of observeration angle, obtained using obsangle
"""
return (1.-beta)/(1.-beta*cosa)
def light_curve_peer_TH(jet, pp, alpha_obs, obsFreqs, DD, rangeType, timeD, Rb):
# Takes top hat jet as input parameter!
if rangeType=='range':
tt0, ttf, num = timeD
lt0 = log10(tt0*sTd) # Convert to seconds and then logspace
ltf = log10(ttf*sTd) # Convert to seconds and then logspace
tts = logspace(lt0, ltf+(ltf-lt0)/num, num) # Timeline on which the flux is evaluated.
elif rangeType=='discrete':
tts, num = timeD, len(timeD)
# Takes top hat jet as input parameter!
if type(obsFreqs)==float:
obsFreqs = array([obsFreqs])
#calpha = obsangle(jet.cthetas, jet.cphis, alpha_obs)
#alpha = arccos(calpha)
#calpha_cj = obsangle_cj(jet.cthetas, jet.cphis, alpha_obs)
#alpha_cj = arccos(calpha_cj)
# if (ttf>max_Tobs or ttf>max_Tobs_cj):
# print("ttf larger than maximum observable time. Adjusting value. ")
# ttf = min(max_Tobs, max_Tobs_cj)
lt0 = log10(tt0*sTd) # Convert to seconds and then logspace
ltf = log10(ttf*sTd) # Convert to seconds and then logspace
tts = logspace(lt0, ltf+(ltf-lt0)/num, num) # Timeline on which the flux is evaluated.
light_curve = zeros([len(obsFreqs), num])
light_curve_RS = zeros([len(obsFreqs), num])
light_curve_CJ = zeros([len(obsFreqs), num])
for ii in tqdm(range(jet.ncells)):
#for ii in range(jet.ncells):
layer = jet.layer[ii]
#print(layer, type(layer))
#theta_cell = jet.cthetas[layer-1]
phi_cell = jet.cphis[ii]
#calpha, calpha_cj = obsangle(theta_cell, phi_cell, alpha_obs), obsangle_cj(theta_cell, phi_cell, alpha_obs)
#alpha, alpha_cj = arccos(calpha), arccos(calpha_cj)
onAxisTint = interp1d(jet.RRs, jet.TTs)
if jet.aa >= 0:
# For laterally expanding shells
#theta_cellR = ones([jet.steps])*jet.cthetas[0,layer-1] + 0.5*arcsin(sin(jet.thetas[:, layer-1])-sin(jet.initJoAngle))
#cthetas_cell = jet.cthetas[:,layer-1]
cthetas_cell = jet.cthetas[0,layer-1]
calphaR, calphaR_cj = obsangle(cthetas_cell, phi_cell, alpha_obs), obsangle_cj(cthetas_cell, phi_cell, alpha_obs)
#alphaR, alphaR_cj = arccos(calphaR), arccos(calphaR_cj)
#alphaRI, alphaR_cjI = interp1d(jet.RRs, alphaR), interp1d(jet.RRs, alphaR_cj)
#calphaRI, calphaR_cjI = interp1d(jet.RRs, calphaR), interp1d(jet.RRs, calphaR_cj)
#calphaR =
ttobs = jet.TTs + jet.RRs/cc * (1.-calphaR)
ttobs_cj = jet.TTs + jet.RRs/cc*(1.-calphaR_cj)
#ttobs = obsTime_offAxis_General_NEXP(jet.RRs, jet.TTs, alphaR)
#print(ttobs.min())/sTd
#ttobs_cj = obsTime_offAxis_General_EXP(jet.RRs, jet.TTs, calphaR_cj)
else:
# For collimated shells
calpha, calpha_cj = obsangle(jet.cthetas[0,layer-1], phi_cell, alpha_obs), obsangle(jet.cthetas[0,layer-1], phi_cell, alpha_obs)
alpha, alpha_cj = arccos(calpha), arccos(calpha_cj)
ttobs = obsTime_offAxis_General_NEXP(jet.RRs, jet.TTs, alpha)
ttobs_cj = obsTime_offAxis_General_NEXP(jet.RRs, jet.TTs, alpha_cj)
#ttobs = obsTime_offAxis_UR(jet.RRs, jet.TTs, jet.Betas, alpha)
filTM = where(tts<=max(ttobs))[0]
filTm = where(tts[filTM]>=min(ttobs))[0]
filTM_cj = where(tts<=max(ttobs_cj))[0]
filTm_cj = where(tts[filTM_cj]>=min(ttobs_cj))[0]
#print shape(filTM_cj[filTM_cj])
#print shape(filTM_cj[filTM_cj])
#print(len(tts[filT]))
Rint = interp1d(ttobs, jet.RRs)
Robs = Rint(tts[filTM][filTm])
#Robs = Rint(tts)
GamObs = jet.GamInt(Robs)
BetaObs = sqrt(1.-GamObs**(-2.))
#if jet.evolution == 'adiabatic':
# onAxisTobs = obsTime_onAxis_adiabatic(Robs, BetaObs)
#elif jet.evolution == 'peer':
# onAxisTobs = obsTime_onAxis_integrated(Robs, GamObs, BetaObs)
onAxisTobs = onAxisTint(Robs)
thetaObs = jet.cthetasI[layer-1](Robs)
calpha = obsangle(thetaObs, phi_cell, alpha_obs)
#calpha = cos(alpha)
#angExt = jet.angExtI(Robs)
nE = jet.neI(Robs)
Rint_cj = interp1d(ttobs_cj, jet.RRs)
Robs_cj= Rint(tts[filTM_cj][filTm_cj])
GamObs_cj = jet.GamInt(Robs_cj)
BetaObs_cj = sqrt(1.-GamObs_cj**(-2.))
onAxisTobs_cj = onAxisTint(Robs_cj)
thetaObs_cj = jet.cthetasI[layer-1](Robs_cj)
calpha_cj = obsangle_cj(thetaObs_cj, phi_cell, alpha_obs)
#angExt_cj = jet.angExtI(Robs_cj)
nE_cj = jet.neI(Robs_cj)
#if jet.aa>=0:
# alpha# = , alpha_cj = alphaRI(Robs), alphaR_cjI(Robs)
# calpha, calpha_cj = calphaRI(Robs), calphaR_cjI(Robs)
#else:
# alpha, alpha_cj = ones(len(Robs))*alpha, ones(len(Robs))*alpha_cj
# calpha, calpha_cj = ones(len(Robs))*calpha, ones(len(Robs))*calpha_cj
# Forward shock stuff, principal jet
"""
Bfield = Bfield_modified(GamObs, BetaObs, jet.nn, jet.epB)
gamMobs, nuMobs = minGam_modified(GamObs, jet.epE, jet.epB, jet.nn, pp, Bfield, jet.Xp)
gamCobs, nuCobs = critGam_modified(GamObs, jet.epE, jet.epB, jet.nn, pp, Bfield, onAxisTobs)
Fnuobs = fluxMax_modified(Robs, GamObs, nE, Bfield, jet.PhiP)
"""
nuMobs = jet.nuMI(Robs)
nuCobs = jet.nuCI(Robs)
Fnuobs = jet.FnuMaxI(Robs)
# Forward shock, counter-jet stuff
"""
Bfield_cj = Bfield_modified(GamObs_cj, BetaObs_cj, jet.nn, jet.epB)
gamMobs_cj, nuMobs_cj = minGam_modified(GamObs_cj, jet.epE, jet.epB, jet.nn, pp, Bfield_cj, jet.Xp)
gamCobs_cj, nuCobs_cj = critGam_modified(GamObs_cj, jet.epE, jet.epB, jet.nn, pp, Bfield_cj, onAxisTobs_cj)
Fnuobs_cj = fluxMax_modified(Robs_cj, GamObs_cj, nE_cj, Bfield_cj, jet.PhiP)
"""
nuMobs_cj = jet.nuMI(Robs_cj)
nuCobs_cj = jet.nuCI(Robs_cj)
Fnuobs_cj = jet.FnuMaxI(Robs_cj)
# Reverse shock stuff
nuM_RS, nuC_RS, Fnu_RS = params_tt_RS(jet, onAxisTobs, Rb)
dopFacs = dopplerFactor(calpha, BetaObs)
#afac = angExt/maximum(angExt*ones(num)[filTM][filTm], 2.*pi*(1.-cos(1./GamObs)))
dopFacs_cj = dopplerFactor(calpha_cj, BetaObs_cj)
#afac_cj = angExt_cj/maximum(angExt_cj*ones(num)[filTM_cj][filTm_cj], 2.*pi*(1.-cos(1./GamObs_cj)))
for freq in obsFreqs:
fil1, fil2 = where(nuMobs<=nuCobs)[0], where(nuMobs>nuCobs)[0]
fil3, fil4 = where(nuM_RS<=nuC_RS)[0], where(nuM_RS>nuC_RS)[0]
fil5, fil6 = where(nuMobs_cj<=nuCobs_cj)[0], where(nuMobs_cj>nuCobs_cj)[0]
freqs = freq/dopFacs # Calculate the rest-frame frequencies correspondng to the observed frequency
freqs_cj = freq/dopFacs_cj
light_curve[obsFreqs==freq, filTM[filTm][fil1]] = light_curve[obsFreqs==freq, filTM[filTm][fil1]] + (
(GamObs[fil1]*(1.-BetaObs[fil1]*calpha))**(-3.) * FluxNuSC_arr(jet.pp, nuMobs[fil1], nuCobs[fil1], Fnuobs[fil1], freqs[fil1]))#*calpha
#light_curve[obsFreqs==freq, :] = light_curve[obsFreqs==freq, :] + (
# (GamObs*(1.-BetaObs*calpha))**(-3.) * FluxNuSC_arr(jet.pp, nuMobs, nuCobs, Fnuobs, freqs))#*calpha
if len(fil2[fil2])>0:
light_curve[obsFreqs==freq, filTM[filTm][fil2]] = light_curve[obsFreqs==freq, filTM[filTm][fil2]] + (
(GamObs[fil2]*(1.-BetaObs[fil2]*calpha))**(-3.) * FluxNuSC_arr(jet.pp, nuMobs[fil2], nuCobs[fil2], Fnuobs[fil2], freqs[fil2]))#*calpha
#light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] + (
# (GamObs[fil3]*(1.-BetaObs[fil3]*calpha[fil3]))**(-3.) * FluxNuSC_arr(jet.pp, nuM_RS[fil3], nuC_RS[fil3], Fnu_RS[fil3], freqs[fil3]))#*calpha
#light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] + (
# afac[fil4] * dopFacs[fil4]**3. * FluxNuFC_arr(jet, nuM_RS[fil4], nuC_RS[fil4], Fnu_RS[fil4], freqs[fil4]))*calpha
light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] = light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] + (
(GamObs_cj[fil5]*(1.-BetaObs_cj[fil5]*calpha_cj))**(-3.) * FluxNuSC_arr(jet.pp, nuMobs_cj[fil5], nuCobs_cj[fil5], Fnuobs_cj[fil5], freqs_cj[fil5]))#*calpha
if len(fil6[fil6])>0:
light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil6]] = light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil6]] + (
(GamObs_cj[fil6]*(1.-BetaObs_cj[fil6]*calpha_cj))**(-3.) * FluxNuSC_arr(jet.pp, nuMobs_cj[fil6], nuCobs_cj[fil6], Fnuobs_cj[fil6], freqs_cj[fil6]))#*calpha
#return tts, 2.*light_curve, 2.*light_curve_RS
return tts, light_curve/(DD**2.), light_curve_RS/(DD**2.), light_curve_CJ/(DD**2.)
def light_curve_peer_SJ(jet, pp, alpha_obs, obsFreqs, DD, rangeType, timeD, Rb):
# Takes top hat jet as input parameter!
if rangeType=='range':
tt0, ttf, num = timeD
lt0 = log10(tt0*sTd) # Convert to seconds and then logspace
ltf = log10(ttf*sTd) # Convert to seconds and then logspace
tts = logspace(lt0, ltf+(ltf-lt0)/num, num) # Timeline on which the flux is evaluated.
elif rangeType=='discrete':
tts, num = timeD*sTd, len(timeD)
if type(obsFreqs)!=ndarray:
obsFreqs = array([obsFreqs])
#calpha = obsangle(jet.cthetas, jet.cphis, alpha_obs)
#alpha = arccos(calpha)
#calpha_cj = obsangle_cj(jet.cthetas, jet.cphis, alpha_obs)
#alpha_cj = arccos(calpha_cj)
# if (ttf>max_Tobs or ttf>max_Tobs_cj):
# print "ttf larger than maximum observable time. Adjusting value. "
# ttf = min(max_Tobs, max_Tobs_cj)
light_curve = zeros([len(obsFreqs), num])
light_curve_RS = zeros([len(obsFreqs), num])
light_curve_CJ = zeros([len(obsFreqs), num])
#f, a = subplots()
#f2, a2 = subplots()
#for ii in tqdm(range(jet.ncells)):
for ii in range(jet.ncells):
layer = jet.layer[ii]
if jet.cell_Gam0s[layer-1] <= 1.+1e-5:
continue
#print(layer, type(layer))
#theta_cell = jet.cthetas[layer-1]
phi_cell = jet.cphis[ii]
#calpha, calpha_cj = obsangle(theta_cell, phi_cell, alpha_obs), obsangle_cj(theta_cell, phi_cell, alpha_obs)
#alpha, alpha_cj = arccos(calpha), arccos(calpha_cj)
#print(layer-1)
GamInt = jet.GamInt[layer-1]
onAxisTint = jet.TTInt[layer-1]
"""
if jet.aa >= 0:
# For laterally expanding shells
#theta_cellR = ones([jet.steps])*jet.cthetas[0,layer-1] + 0.5*arcsin(sin(jet.thetas[:, layer-1])-sin(jet.initJoAngle))
theta_cellR = jet.cthetas[:,layer-1]
calphaR, calphaR_cj = obsangle(theta_cellR, phi_cell, alpha_obs), obsangle_cj(theta_cellR, phi_cell, alpha_obs)
#alphaR, alphaR_cj = arccos(calphaR), arccos(calphaR_cj)
#alphaRI, alphaR_cjI = interp1d(jet.RRs, alphaR), interp1d(jet.RRs, alphaR_cj)
#ttobs = obsTime_offAxis_General_EXP(jet.RRs, jet.TTs[:,layer-1], calphaR)
#ttobs_cj = obsTime_offAxis_General_EXP(jet.RRs, jet.TTs[:,layer-1], calphaR_cj)
#cthetas_cell = jet.cthetas[:,layer-1]
cthetas_cell = jet.cthetas0[layer-1]
ttobs = jet.TTs[:, layer-1] + jet.RRs/cc * (1.-calphaR)
ttobs_cj = jet.TTs[:,layer-1] + jet.RRs/cc* (1.-calphaR_cj)
else:
# For collimated shells
calpha, calpha_cj = obsangle(jet.cthetas0[layer-1], phi_cell, alpha_obs), obsangle_cj(jet.cthetas0[layer-1], phi_cell, alpha_obs)
alpha, alpha_cj = arccos(calpha), arccos(calpha_cj)
ttobs = obsTime_offAxis_General_NEXP(jet.RRs, jet.TTs[:,layer-1], alpha)
ttobs_cj = obsTime_offAxis_General_NEXP(jet.RRs, jet.TTs[:,layer-1], alpha_cj)
"""
theta_cellR = jet.cthetas0[layer-1]
calphaR, calphaR_cj = obsangle(theta_cellR, phi_cell, alpha_obs), obsangle_cj(theta_cellR, phi_cell, alpha_obs)
ttobs = jet.TTs[:, layer-1] + jet.RRs/cc * (1.-calphaR)
#ttobs_cj = jet.TTs[:,layer-1] + jet.RRs/cc* (1.-calphaR_cj)
#ttobs = obsTime_offAxis_UR(jet.RRs, jet.TTs, jet.Betas, alpha)
filTM = where(tts<=max(ttobs))[0]
filTm = where(tts[filTM]>=min(ttobs))[0]
#filTM_cj = where(tts<=max(ttobs_cj))[0]
#filTm_cj = where(tts[filTM_cj]>=min(ttobs_cj))[0]
#print shape(filTM_cj[filTM_cj])
#print shape(filTM_cj[filTM_cj])
#print(len(tts[filT]))
Rint = interp1d(ttobs, jet.RRs, copy=False)
Robs = Rint(tts[filTM][filTm])
GamObs = GamInt(Robs)
BetaObs = sqrt(1.-GamObs**(-2.))
#if jet.evolution == 'adiabatic':
# onAxisTobs = obsTime_onAxis_adiabatic(Robs, BetaObs)
#elif jet.evolution == 'peer':
# onAxisTobs = obsTime_onAxis_integrated(Robs, GamObs, BetaObs)
#onAxisTobs = onAxisTint(Robs)
#thetaObs = jet.cthetasI[layer-1](Robs)
#thetaObs = jet.cthetas0[layer-1]
calpha = calphaR # obsangle(thetaObs, phi_cell, alpha_obs)
nE = jet.neI[layer-1](Robs)
#angExt = jet.angExtI[layer-1](Robs)
#Rint_cj = interp1d(ttobs_cj, jet.RRs)
#Robs_cj= Rint(tts[filTM_cj][filTm_cj])
#GamObs_cj = jet.GamInt[layer-1](Robs_cj)
#BetaObs_cj = sqrt(1.-GamObs_cj**(-2.))
#onAxisTobs_cj = onAxisTint(Robs_cj)
#thetaObs_cj = jet.cthetasI[layer-1](Robs_cj)
#calpha_cj = obsangle_cj(thetaObs_cj, phi_cell, alpha_obs)
#nE_cj = jet.neI[layer-1](Robs_cj)
#angExt_cj = jet.angExtI[layer-1](Robs_cj)
#if jet.aa>=0:
# alpha, alpha_cj = alphaRI(Robs), alphaR_cjI(Robs)
# calpha, calpha_cj = cos(alpha), cos(alpha_cj)
#else:
#print(ii, layer-1, alpha*180/pi, shape(filTM[filTM]), shape(filTM[filTM][filTm]), onAxisTobs[0]/sTd, onAxisTobs[-1]/sTd)
# alpha, alpha_cj = ones(len(Robs))*alpha, ones(len(Robs))*alpha_cj
# calpha, calpha_cj = ones(len(Robs))*calpha, ones(len(Robs))*calpha_cj
# Forward shock stuff, principal jet
"""
Bfield = Bfield_modified(GamObs, BetaObs, jet.nn, jet.epB)
gamMobs, nuMobs = minGam_modified(GamObs, jet.epE, jet.epB, jet.nn, pp, Bfield, jet.Xp)
gamCobs, nuCobs = critGam_modified(GamObs, jet.epE, jet.epB, jet.nn, pp, Bfield, onAxisTobs)
nuMobs, nuCobs = nuMobs*(1.-BetaObs)/(1.-BetaObs*calpha), nuCobs*(1.-BetaObs)/(1.-BetaObs*calpha)
Fnuobs = fluxMax_modified(Robs, GamObs, nE, Bfield, jet.PhiP)
a.loglog(tts[filTM][filTm]/sTd, nuMobs)
a2.loglog(tts[filTM][filTm], GamObs)
"""
nuMobs = jet.nuMI[layer-1](Robs)
nuCobs = jet.nuCI[layer-1](Robs)
Fnuobs = jet.FnuMax[layer-1](Robs)
#nuMobs_cj = jet.nuMI[layer-1](Robs_cj)
#nuCobs_cj = jet.nuCI[layer-1](Robs_cj)
#Fnuobs_cj = jet.FnuMax[layer-1](Robs_cj)
# Forward shock, counter-jet stuff
#Bfield_cj = Bfield_modified(GamObs_cj, BetaObs_cj, jet.nn, jet.epB)
#gamMobs_cj, nuMobs_cj = minGam_modified(GamObs_cj, jet.epE, jet.epB, jet.nn, pp, Bfield_cj, jet.Xp)
#gamCobs_cj, nuCobs_cj = critGam_modified(GamObs_cj, jet.epE, jet.epB, jet.nn, pp, Bfield_cj, onAxisTobs_cj)
#Fnuobs_cj = fluxMax_modified(Robs_cj, GamObs_cj, nE_cj, Bfield_cj, jet.PhiP)
# Reverse shock stuff
#nuM_RS, nuC_RS, Fnu_RS = params_tt_RS_SJ(jet, onAxisTobs, layer-1, Rb)
dopFacs = dopplerFactor(calpha, BetaObs)
#afac = angExt/maximum(angExt*ones(num), 2.*pi*(1.-cos(1./GamObs)))
#afac = maximum(ones(num), thetaObs**2*GamObs**2)
#dopFacs_cj = dopplerFactor(calpha_cj, BetaObs_cj)
#afac_cj = angExt_cj/maximum(angExt_cj*ones(num)[filTM_cj][filTm_cj], 2.*pi*(1.-cos(1./GamObs_cj)))
for freq in obsFreqs:
fil1, fil2 = where(nuMobs<=nuCobs)[0], where(nuMobs>nuCobs)[0]
#fil3, fil4 = where(nuM_RS<=nuC_RS)[0], where(nuM_RS>nuC_RS)[0]
#fil5, fil6 = where(nuMobs_cj<=nuCobs_cj)[0], where(nuMobs_cj>nuCobs_cj)[0]
freqs = freq/dopFacs # Calculate the rest-frame frequencies correspondng to the observed frequency
#freqs_cj = freq/dopFacs_cj
#freqs = freq*ones(len(dopFacs))
light_curve[obsFreqs==freq, filTM[filTm][fil1]] = light_curve[obsFreqs==freq, filTM[filTm][fil1]] + (
(GamObs[fil1]*(1.-BetaObs[fil1]*calpha))**(-3.) * FluxNuSC_arr(jet.pp, nuMobs[fil1], nuCobs[fil1], Fnuobs[fil1], freqs[fil1]))
#if len(fil2[fil2])>0:
# light_curve[obsFreqs==freq, filTM[filTm][fil2]] = light_curve[obsFreqs==freq, filTM[filTm][fil2]] + (
# (GamObs[fil2]*(1.-BetaObs[fil2]*calpha[fil2]))**(-3.) * FluxNuFC_arr(jet.pp, nuMobs[fil2], nuCobs[fil2], Fnuobs[fil2], freqs[fil2]))#*calpha
#light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] + (
# (GamObs[fil3]*(1.-BetaObs[fil3]*calpha))**(-3.) * FluxNuSC_arr(jet.pp, nuM_RS[fil3], nuC_RS[fil3], Fnu_RS[fil3], freqs[fil3]))#*calpha
#light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] + (
# afac[fil4] * dopFacs[fil4]**3. * FluxNuFC_arr(jet, nuM_RS[fil4], nuC_RS[fil4], Fnu_RS[fil4], freqs[fil4]))*calpha
#light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] = light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] + (
# (GamObs_cj[fil5]*(1.-BetaObs_cj[fil5]*calpha_cj))**(-3.) * FluxNuSC_arr(jet.pp, nuMobs_cj[fil5], nuCobs_cj[fil5], Fnuobs_cj[fil5], freqs_cj[fil5]))#*calpha
#return tts, 2.*light_curve, 2.*light_curve_RS
return tts, light_curve/(DD**2.), light_curve_RS/(DD**2.), light_curve_CJ/(DD**2.)
def flux_at_time_SJ(jet, alpha_obs, tt_obs, freq, DD):
ncells = jet.ncells
#calpha = zeros([2*jet.ncells])
#alpha = zeros([2*jet.ncells])
TTs, RRs, Gams= zeros(2*ncells), zeros(2*ncells), zeros(2*ncells)
thetas, thetas0 = zeros(2*ncells), zeros(ncells)
#nuMs, nuCs, fluxes = zeros(2.*self.ncells), zeros(2.*self.ncells), zeros(2.*self.ncells)
#fluxes = zeros(2*ncells)
calphas = zeros(2*ncells)
nE = zeros(2*ncells)
for ii in tqdm(range(jet.ncells)):
layer = jet.layer[ii]
phi_cell = jet.cphis[ii]
#onAxisTint = interp1d(jet.RRs, jet.TTs[:,layer-1])
"""
if jet.aa >= 0:
#cthetas_cell = jet.cthetas[:,layer-1]
calphaR, calphaR_cj = obsangle(jet.cthetas[:,layer-1], phi_cell, alpha_obs), obsangle_cj(jet.cthetas[:,layer-1], phi_cell, alpha_obs)
ttobs = jet.TTs[:,layer-1] + jet.RRs/cc * (1.-calphaR)
#ttobs_cj = obsTime_offAxis_General_EXP(jet.RRs, jet.TTs[:,layer-1], calphaR_cj)
ttobs_cj = jet.TTs[:,layer-1] + jet.RRs/cc * (1.-calphaR_cj)
else:
calpha, calpha_cj = obsangle(jet.cthetas[:,layer-1], phi_cell, alpha_obs), obsangle_cj(jet.cthetas[:,layer-1], phi_cell, alpha_obs)
alpha, alpha_cj = arccos(calpha), arccos(calpha_cj)
ttobs = obsTime_offAxis_General_NEXP(jet.RRs, jet.TTs[:,layer-1], alpha)
ttobs_cj = obsTime_offAxis_General_NEXP(jet.RRs, jet.TTs[:,layer-1], alpha_cj)
"""
calphaR, calphaR_cj = obsangle(jet.cthetas[:,layer-1], phi_cell, alpha_obs), obsangle_cj(jet.cthetas[:,layer-1], phi_cell, alpha_obs)
ttobs = jet.TTs[:,layer-1] + jet.RRs/cc * (1.-calphaR)
#ttobs_cj = obsTime_offAxis_General_EXP(jet.RRs, jet.TTs[:,layer-1], calphaR_cj)
ttobs_cj = jet.TTs[:,layer-1] + jet.RRs/cc * (1.-calphaR_cj)
Rint = interp1d(ttobs, jet.RRs)
Rint_cj = interp1d(ttobs_cj, jet.RRs)
Robs, Robs_cj = Rint(tt_obs), Rint_cj(tt_obs)
RRs[ii], RRs[ii+ncells] = Robs, Robs_cj
Gams[ii], Gams[ii+ncells] = jet.GamInt[layer-1](Robs), jet.GamInt[layer-1](Robs_cj)
TTs[ii], TTs[ii+ncells] = jet.TTInt[layer-1](Robs), jet.TTInt[layer-1](Robs_cj)
thetas[ii], thetas[ii+ncells] = jet.cthetasI[layer-1](Robs), jet.cthetasI[layer-1](Robs_cj)
thetas0[ii] = jet.cthetas0[layer-1]
nE[ii], nE[ii+ncells] = jet.neI[layer-1](Robs), jet.neI[layer-1](Robs_cj)
Betas = sqrt(1.-Gams**(-2))
calphas[:ncells], calphas[ncells:] = obsangle(thetas[:ncells], jet.cphis, alpha_obs), obsangle_cj(thetas[ncells:], jet.cphis, alpha_obs)
#alphas = arccos(calphas)
Bfield = Bfield_modified(Gams, Betas, jet.nn, jet.epB)
gamMobs, nuMobs = minGam_modified(Gams, jet.epE, jet.epB, jet.nn, jet.pp, Bfield, jet.Xp)
gamCobs, nuCobs = critGam_modified(Gams, jet.epE, jet.epB, jet.nn, jet.pp, Bfield, TTs)
Fnuobs = fluxMax_modified(Robs, Gams, nE, Bfield, jet.PhiP)
dopFacs= dopplerFactor(calphas, Betas)
obsFreqs = freq/dopFacs
#print(shape(nuMobs), shape(nuCobs), shape(Fnuobs), shape(obsFreqs))
fluxes = 1./(DD**2.) * (Gams*(1.-Betas*calphas))**(-3.) * FluxNuSC_arr(jet.pp, nuMobs, nuCobs, Fnuobs, obsFreqs)
return fluxes, thetas, thetas0, calphas, Gams
def skymapTH(jet, alpha_obs, tt_obs, freq):
ncells = jet.ncells
#calpha = zeros([2*jet.ncells])
#alpha = zeros([2*jet.ncells])
TTs, RRs, Gams= zeros(2*ncells), zeros(2*ncells), zeros(2*ncells)
thetas = zeros(2*ncells)
#nuMs, nuCs, fluxes = zeros(2.*self.ncells), zeros(2.*self.ncells), zeros(2.*self.ncells)
#fluxes = zeros(2*ncells)
calphas = zeros(2*ncells)
im_xxs, im_yys = zeros(2*ncells), zeros(2*ncells)
nE = zeros(2*ncells)
for ii in tqdm(range(jet.ncells)):
layer = jet.layer[ii]
phi_cell = jet.cphis[ii]
calphaR, calphaR_cj = obsangle(jet.cthetas[:,layer-1], phi_cell, alpha_obs), obsangle_cj(jet.cthetas[:,layer-1], phi_cell, alpha_obs)
ttobs = jet.TTs + jet.RRs/cc * (1.-calphaR)
#ttobs_cj = obsTime_offAxis_General_EXP(jet.RRs, jet.TTs[:,layer-1], calphaR_cj)
ttobs_cj = jet.TTs + jet.RRs/cc * (1.-calphaR_cj)
Rint = interp1d(ttobs, jet.RRs)
Rint_cj = interp1d(ttobs_cj, jet.RRs)
Robs, Robs_cj = Rint(tt_obs), Rint_cj(tt_obs)
RRs[ii], RRs[ii+ncells] = Robs, Robs_cj
Gams[ii], Gams[ii+ncells] = jet.GamInt(Robs), jet.GamInt(Robs_cj)
TTs[ii], TTs[ii+ncells] = jet.TTInt(Robs), jet.TTInt(Robs_cj)
thetas[ii], thetas[ii+ncells] = jet.cthetasI[layer-1](Robs), jet.cthetasI[layer-1](Robs_cj)
nE[ii], nE[ii+ncells] = jet.neI[layer-1](Robs), jet.neI[layer-1](Robs_cj)
Betas = sqrt(1.-Gams**(-2))
calphas[:ncells], calphas[ncells:] = obsangle(thetas[:ncells], jet.cphis, alpha_obs), obsangle_cj(thetas[ncells:], jet.cphis, alpha_obs)
#alphas = arccos(calphas)
# Principal jet
im_xxs[:ncells] = -1.*cos(alpha_obs)*sin(thetas[:ncells])*sin(jet.cphis) + sin(alpha_obs)*cos(thetas[:ncells])
im_yys[:ncells] = sin(thetas[:ncells])*cos(jet.cphis)
# Counter jet
im_xxs[ncells:] = -1.*cos(alpha_obs)*sin(pi-thetas[ncells:])*sin(jet.cphis) + sin(alpha_obs)*cos(pi-thetas[ncells:])
im_yys[ncells:] = sin(pi-thetas[ncells:])*cos(jet.cphis)
Bfield = Bfield_modified(Gams, Betas, jet.nn, jet.epB)
gamMobs, nuMobs = minGam_modified(Gams, jet.epE, jet.epB, jet.nn, jet.pp, Bfield, jet.Xp)
gamCobs, nuCobs = critGam_modified(Gams, jet.epE, jet.epB, jet.nn, jet.pp, Bfield, TTs)
Fnuobs = fluxMax_modified(Robs, Gams, nE, Bfield, jet.PhiP)
dopFacs= dopplerFactor(calphas, Betas)
obsFreqs = freq/dopFacs
#print(shape(nuMobs), shape(nuCobs), shape(Fnuobs), shape(obsFreqs))
fluxes = 1./(abs(calphas)*RRs**2.) * (Gams*(1.-Betas*calphas))**(-3.) * FluxNuSC_arr(jet.pp, nuMobs, nuCobs, Fnuobs, obsFreqs)
return fluxes, RRs*im_xxs, RRs*im_yys, RRs, Gams, calphas, TTs
def skymapSJ(jet, alpha_obs, tt_obs, freq, velocity=False):
ncells = jet.ncells
#calpha = zeros([2*jet.ncells])
#alpha = zeros([2*jet.ncells])
TTs, RRs, Gams= zeros(2*ncells), zeros(2*ncells), zeros(2*ncells)
thetas, calphasR = zeros(2*ncells), zeros(2*ncells)
#nuMs, nuCs, fluxes = zeros(2.*self.ncells), zeros(2.*self.ncells), zeros(2.*self.ncells)
#fluxes = zeros(2*ncells)
calphas = zeros(2*ncells)
im_xxs, im_yys = zeros(2*ncells), zeros(2*ncells)
nE = zeros(2*ncells)
if velocity:
velX = zeros(2*ncells)
velY = zeros(2*ncells)
for ii in tqdm(range(jet.ncells)):
layer = jet.layer[ii]
phi_cell = jet.cphis[ii]
#onAxisTint = interp1d(jet.RRs, jet.TTs[:,layer-1])
"""
if jet.aa >= 0:
#cthetas_cell = jet.cthetas[:,layer-1]
calphaR, calphaR_cj = obsangle(jet.cthetas[:,layer-1], phi_cell, alpha_obs), obsangle_cj(jet.cthetas[:,layer-1], phi_cell, alpha_obs)
ttobs = jet.TTs[:,layer-1] + jet.RRs/cc * (1.-calphaR)
#ttobs_cj = obsTime_offAxis_General_EXP(jet.RRs, jet.TTs[:,layer-1], calphaR_cj)
ttobs_cj = jet.TTs[:,layer-1] + jet.RRs/cc * (1.-calphaR_cj)
else:
calpha, calpha_cj = obsangle(jet.cthetas[:,layer-1], phi_cell, alpha_obs), obsangle_cj(jet.cthetas[:,layer-1], phi_cell, alpha_obs)
alpha, alpha_cj = arccos(calpha), arccos(calpha_cj)
ttobs = obsTime_offAxis_General_NEXP(jet.RRs, jet.TTs[:,layer-1], alpha)
ttobs_cj = obsTime_offAxis_General_NEXP(jet.RRs, jet.TTs[:,layer-1], alpha_cj)
"""
#calphaR, calphaR_cj = obsangle(jet.cthetas[:,layer-1], phi_cell, alpha_obs), obsangle_cj(jet.cthetas[:,layer-1], phi_cell, alpha_obs)
calphasR[ii], calphasR[ii+ncells] = obsangle(jet.cthetas0[layer-1], phi_cell, alpha_obs), obsangle_cj(jet.cthetas0[layer-1], phi_cell, alpha_obs)
ttobs = jet.TTs[:,layer-1] + jet.RRs/cc * (1.-calphasR[ii])
#ttobs_cj = obsTime_offAxis_General_EXP(jet.RRs, jet.TTs[:,layer-1], calphaR_cj)
ttobs_cj = jet.TTs[:,layer-1] + jet.RRs/cc * (1.-calphasR[ii+ncells])
Rint = interp1d(ttobs, jet.RRs, copy=False)
Rint_cj = interp1d(ttobs_cj, jet.RRs, copy=False)
Robs = Rint(tt_obs)
Robs_cj = Rint_cj(tt_obs)
RRs[ii], RRs[ii+ncells] = Robs, Robs_cj
Gams[ii], Gams[ii+ncells] = jet.GamInt[layer-1](Robs), jet.GamInt[layer-1](Robs_cj)
TTs[ii], TTs[ii+ncells] = jet.TTInt[layer-1](Robs), jet.TTInt[layer-1](Robs_cj)
thetas[ii], thetas[ii+ncells] = jet.cthetasI[layer-1](Robs), jet.cthetasI[layer-1](Robs_cj)
nE[ii], nE[ii+ncells] = jet.neI[layer-1](Robs), jet.neI[layer-1](Robs_cj)
Betas = sqrt(1.-Gams**(-2))
calphas[:ncells], calphas[ncells:] = obsangle(thetas[:ncells], jet.cphis, alpha_obs), obsangle_cj(thetas[ncells:], jet.cphis, alpha_obs)
#alphas = arccos(calphas)
# Principal jet
im_xxs[:ncells] = -1.*cos(alpha_obs)*sin(thetas[:ncells])*sin(jet.cphis) + sin(alpha_obs)*cos(thetas[:ncells])
im_yys[:ncells] = sin(thetas[:ncells])*cos(jet.cphis)
# Counter jet
im_xxs[ncells:] = -1.*cos(alpha_obs)*sin(pi-thetas[ncells:])*sin(jet.cphis) + sin(alpha_obs)*cos(pi-thetas[ncells:])
im_yys[ncells:] = sin(pi-thetas[ncells:])*cos(jet.cphis)
Bfield = Bfield_modified(Gams, Betas, jet.nn, jet.epB)
gamMobs, nuMobs = minGam_modified(Gams, jet.epE, jet.epB, jet.nn, jet.pp, Bfield, jet.Xp)
gamCobs, nuCobs = critGam_modified(Gams, jet.epE, jet.epB, jet.nn, jet.pp, Bfield, TTs)
Fnuobs = fluxMax_modified(Robs, Gams, nE, Bfield, jet.PhiP)
#dopFacs= dopplerFactor(calphas, Betas)
dopFacs = dopplerFactor(calphasR, Betas)
obsFreqs = freq/dopFacs
#print(shape(nuMobs), shape(nuCobs), shape(Fnuobs), shape(obsFreqs))
fluxes = 1./(abs(calphasR)*RRs**2.) * (Gams*(1.-Betas*calphasR))**(-3.) * FluxNuSC_arr(jet.pp, nuMobs, nuCobs, Fnuobs, obsFreqs)
return fluxes, RRs*im_xxs, RRs*im_yys, RRs, Gams, calphas, TTs
def skyMap_to_Grid(fluxes, xxs, yys, nx, ny=1, fac=1, scale=False, inter='linear'):
# Function maps the coordinate output of the skymap functions to a grid.
# Basically a wrapper for gdd and mgrid
if (ny==1 and not scale):
ny = nx
elif (scale):
dX = xxs.max()-xxs.min()
dY = yys.max()-yys.min()
fac = max(dX,dY)/min(dX,dY)
print(fac)
if(dY>=dX):
ny = round(fac*nx)
else:
ny = nx
nx = round(fac*nx)
else:
pass
nx = complex(0,nx)
ny = complex(0,ny)
grid_x, grid_y = mgrid[xxs.min():xxs.max():nx, yys.min():yys.max():ny]
image = gdd(array([xxs,yys]).T*fac, fluxes,
(grid_x*fac, grid_y*fac), method=inter, fill_value=0)
# RETURNS ARRAY WITH NX ROWS AND NY COLUMS (i.e. each row, which represents a horizontal position in x has ny divisions)
return grid_x[:,0], grid_y[0,:], image
def lateral_distributions(image, collapse_axis):
if collapse_axis == 'x':
points = image.shape[1]
latAvDist = array([image[:,ii].mean() for ii in range(points)])
latMaxDist = array([image[:,ii].max() for ii in range(points)])
elif collapse_axis == 'y':
points = image.shape[0]
latAvDist = array([image[ii,:].mean() for ii in range(points)])
latMaxDist = array([image[ii,:].max() for ii in range(points)])
return latAvDist, latMaxDist
def image_slice(fluxes, xxs, yys, position, nn=50, axis='y', inter='linear', fac=1):
nn = complex(0,nn)
if axis=='y':
grid_x, grid_y = mgrid[position:position:1j, yys.min():yys.max():nn]
else:
grid_x, grid_y = mgrid[xxs.min():xxs.max():nn, position:position:1j]
slice = gdd(array([xxs,yys]).T*fac, fluxes,
(grid_x*fac, grid_y*fac), method=inter, fill_value=0)
return grid_x, grid_y, slice
```
#### File: joelib/physics/jethead_expansion.py
```python
from numpy import *
#import joelib.constants.constants as ctns
from joelib.constants.constants import cc, mp
from joelib.physics.grb_observables import *
from joelib.physics.afterglow_properties import *
from joelib.physics.afterglow_dynamics import *
from scipy.interpolate import interp1d
#from tqdm import tqdm
#from jethead import jetHeadUD, jetHeadGauss
#################################################################################################################################################################
######################### Evolution of the blast wave as given in Pe'er 2012 inckuding a simple description of lateral expansion ################################
#################################################################################################################################################################
def cellsInLayer(ii):
"""
Return number of cells in layer ii
"""
return (2*ii+1)
class jetHeadUD():
def __init__(self, EE, Gam0, nn, epE, epB, pp, steps, Rmin, Rmax,
evolution, nlayers, initJoAngle, aa=-1, shell_type='thin',
Rb=1., withSpread=True):
self.evolution = evolution
self.nlayers = nlayers
self.initJoAngle = initJoAngle
self.__totalCells()
#self.__get_thetas()
self.EE = EE
self.Gam0 = Gam0
self.Beta0 = sqrt(1.-Gam0**(-2.))
self.nn = nn
self.epE, self.epB, self.pp = epE, epB, pp
self.Rmin, self.Rmax = Rmin, Rmax
self.aa = aa # Labels lateral expansion model
self.angExt0 = 2.*pi*(1.-cos(initJoAngle/2.))/self.ncells
self.steps = steps
self.shell_type = shell_type
self.withSpread = withSpread
self.Xp = Xint(pp)
self.PhiP = PhiPint(pp)
self.__correct_energy()
self.__shell_evolution()
self.__shell_division()
self.__make_layers()
self.__peakParamsRS()
#self.__get_thetas()
#self.__make_layers()
#self.__totalCells()
self.__cell_size()
def __correct_energy(self): # Change energy from spherical equivalent (inherited from parent class) to energy of segment
self.EE = self.EE*self.angExt0/(4.*pi)
self.MM0 = self.EE/(self.Gam0*cc**2.) # This mass is the mass of the ejecta
def __shell_evolution(self):
self.Rd = (3./self.angExt0 * 1./(cc**2.*mp) *
self.EE/(self.nn*self.Gam0**2.))**(1./3.)
self.Td = self.Rd*(1.-self.Beta0)/(cc*self.Beta0)
self.RRs = logspace(log10(self.Rmin), log10(self.Rmax), self.steps)
self.Gams, self.mms = zeros([len(self.RRs)]), zeros([len(self.RRs)])
#self.thetas, self.angExt = zeros([len(self.RRs)]), zeros([len(self.RRs)])
#self.thetas[0], self.angExt[0] = self.initJoAngle/.2, 2*pi*(1.-cos(self.initJoAngle/.2))
self.TTs = zeros([len(self.RRs)])
if self.aa>=0 :
if self.evolution == 'peer':
# self.MMs contains the swept-up ISM mass, not to be confused with the ejecta mass self.MM0
#self.Gams, self.Betas, self.joAngle, self.MMs, self.TTs, __ = solver_GP12(
# self.MM0, self.Gam0, 0., self.initJoAngle/2., self.RRs, self.nn,
# self.aa, self.steps, self.angExt0, self.ncells, withSpread = self.withSpread)
self.Gams, self.Betas, self.joAngle, self.MMs, self.TTs, __ = solver_expanding_shell(
self.MM0, self.Gam0, 0., self.initJoAngle/2., self.RRs, self.nn,
self.aa, self.steps, self.angExt0, self.ncells, self.Rd, withSpread = self.withSpread)
elif self.evolution == 'BM':
self.Gams, self.Betas, self.joAngle, self.MMs, self.TTs, __ = BMsolver_expanding_shell(
self.MM0, self.Gam0, 0., self.initJoAngle/2., self.RRs, self.nn, self.aa, self.steps, self.angExt0, self.ncells)
self.joAngle = 2*self.joAngle
else:
if self.evolution == 'peer':
self.Gams, self.Betas, self.MMs, self.TTs = solver_collimated_shell(
self.MM0, self.Gam0, self.angExt0, self.RRs, self.nn, self.steps)
elif self.evolution == 'BM':
self.Gams, self.Betas, self.MMs, self.TTs = BMsolver_collimated_shell(
self.MM0, self.Gam0, self.angExt0, self.RRs, self.nn, self.steps)
#self.angExt = ones([len(self.RRs)])*self.angExt0/self.ncells
self.joAngle = ones([len(self.RRs)])*self.initJoAngle
#self.joAngle = self.thetas[-1]
self.Bfield = Bfield_modified(self.Gams, self.Betas, self.nn, self.epB)
self.gM, self.nuM = minGam_modified(self.Gams, self.epE, self.epB, self.nn, self.pp, self.Bfield, self.Xp)
self.gC, self.nuC = critGam_modified(self.Gams, self.epE, self.epB, self.nn, self.pp, self.Bfield, self.TTs)
self.FnuMax = fluxMax_modified(self.RRs, self.Gams, self.MMs/mp, self.Bfield, self.PhiP)
self.GamInt = interp1d(self.RRs, self.Gams)
self.gamMI, self.gamCI = interp1d(self.RRs, self.gM), interp1d(self.RRs, self.gC)
self.nuMI, self.nuCI = interp1d(self.RRs, self.nuM), interp1d(self.RRs, self.nuC)
self.FnuMaxI = interp1d(self.RRs, self.FnuMax)
self.neI = interp1d(self.RRs, self.MMs/mp) # Interpolated number of electrons as a function of R for the flux calculation
#self.angExtI = interp1d(self.RRs, self.angExt)
def get_thetas(self, joAngle):
fac = arange(0,self.nlayers+1)/float(self.nlayers) # Numerical factor for use during execution
thetas = 2.*arcsin(fac*sin(joAngle/4.)) # Calculate the propagation angle with respect to jet axis
cthetas = 0.5*(thetas[1:]+thetas[:-1])
return thetas, cthetas
def __shell_division(self):
thetas = zeros([self.steps, self.nlayers+1])
cthetas = zeros([self.steps, self.nlayers])
cthetasI = []
thetas0, cthetas0 = self.get_thetas(self.initJoAngle)
for ii in range(self.steps):
thetas[ii,:], cthetas[ii,:] = self.get_thetas(self.joAngle[ii])
#cthetas[ii,:] = cthetas0[:] + 0.5*(self.joAngle[ii]-self.initJoAngle)
for jj in range(self.nlayers):
cthetasI.append(interp1d(self.RRs, cthetas[:,jj]))
self.thetas, self.cthetas, self.cthetasI = thetas, cthetas, cthetasI
def __make_layers(self):
self.layer = array([])
self.phis = array([])
self.cphis = array([])
#self.cthetas0 = array([])
for ii in range(self.nlayers): # Loop over layers and populate the arrays
num = cellsInLayer(ii)
self.phis = append(self.phis, arange(0,num+1)*2.*pi/num) # Phi value of the edges
self.layer = append(self.layer,ones(num)*(ii+1)) # Layer on which the cells are
#self.cthetas0 = append(self.cthetas0,ones(num)*0.5*(self.thetas0[ii]+self.thetas0[ii+1])) # Central theta values of the cells
self.cphis = append(self.cphis,(arange(0,num)+0.5)*2.*pi/num ) # Central phi values of the cells
self.layer = self.layer.astype('int')
def __totalCells(self):
tot = 0
for ii in range(0,self.nlayers):
tot = tot + cellsInLayer(ii)
#tot = tot + int(round(cellsInLayer(ii)/2))
self.ncells = tot
def __cell_size(self):
self.angExt = 2.*pi*(1.-cos(self.joAngle/2.))/self.ncells
def __peakParamsRS(self):
Gam0 = self.Gam0
# These need to be scaled be the correspinding factor of Rb when calculating light curve
if self.shell_type=='thin':
#print("Settig up thin shell")
#self.RSpeak_nuM = 9.6e14 * epE**2. * epB**(1./2.) * nn**(1./2) * Gam0**2.
#self.RSpeak_nuC = 4.0e16 * epB**(-3./2.) * EE**(-2./3.) * nn**(-5./6.) * Gam0**(4./3.)
#self.RSpeak_Fnu = 5.2 * DD**(-2.) * epB**(1./2.) * EE * nn**(1./2.) * Gam0
self.RSpeak_nuM = self.nuMI(self.Rd)/(Gam0**2) #* self.Rb**(1./2.)
self.RSpeak_nuC = self.nuCI(self.Rd) #* self.Rb**(-3./2.)*
self.RSpeak_Fnu = Gam0*self.FnuMaxI(self.Rd)# * self.Rb**(1./2.)*
class jetHeadGauss():
def __init__(self, EEc0, Gamc0, nn, epE, epB, pp, steps, Rmin, Rmax,
evolution, nlayers, initJoAngle, coAngle, aa, structure='gaussian',
kk=0, shell_type='thin', Rb=1., withSpread=True):
self.nlayers = nlayers
self.steps = steps
self.EEc0 = EEc0
self.Gamc0 = Gamc0
self.Rmin, self.Rmax = Rmin, Rmax
self.nlayers = nlayers
self.coAngle = coAngle
thetaMax = 2.*sqrt(-2.*self.coAngle**2. * log(1e-8/(self.Gamc0-1.)))
self.structure = structure
self.kk = kk
self.initJoAngle = min(initJoAngle, thetaMax) # Make sure that Gamma > 1 throughout the jet
self.nn = nn
self.epE, self.epB, self.pp = epE, epB, pp
self.aa = aa
self.Xp = Xint(pp)
self.PhiP = PhiPint(pp)
self.shell_type = shell_type
self.withSpread = withSpread
self.__totalCells()
self.angExt0 = 2.*pi*(1.-cos(initJoAngle/2.))/self.ncells
self.thetas0, self.cthetas0 = self.get_thetas(self.initJoAngle)
self.__correct_energy()
self.__energies_and_LF()
self.__make_layers()
self.cell_Rds = (3./(4.*pi) * 1./(cc**2.*mp) *
self.cell_EEs/(self.nn*self.cell_Gam0s**2.))**(1./3.)
self.cell_Tds = self.cell_Rds/(cc*self.cell_Beta0s) * (1.-self.cell_Beta0s)
if self.Rmin>0.01 * self.cell_Rds.min(): self.Rmin = 0.01 * self.cell_Rds.min()
self.Gams, self.mms = zeros([self.steps, self.nlayers]), zeros([self.steps, self.nlayers])
self.Betas = zeros([self.steps, self.nlayers])
self.TTs = zeros([self.steps, self.nlayers])
self.theta_edges, self.cthetas = zeros([self.steps, self.nlayers]), zeros([self.steps, self.nlayers])
self.joAngles = zeros([self.steps, self.nlayers])
self.__shell_evolution()
#self.__shell_division()
self.__thetas_interpolation()
self.__peakParamsRS_struc()
def __totalCells(self):
tot = 0
for ii in range(0,self.nlayers):
tot = tot + cellsInLayer(ii)
#tot = tot + int(round(cellsInLayer(ii)/2))
self.ncells = tot
"""
def __cell_size(self):
self.angExt0 = 2.*pi*(1.-cos(self.joAngle/2.))/self.ncells
"""
def get_thetas_division(self, layer):
facs = arange(layer, layer+2)/float(self.nlayers)
#thetas = 2.*arcsin(facs*sin(self.joAngles[:,layer-1]))
#cthetas = 0.5*(thetas[:,0]+thetas[:,1])
cthetas = 0.5*(2.*arcsin(facs[0]*sin(self.joAngles[:,layer-1]/2.)) + 2.*arcsin(facs[1]*sin(self.joAngles[:,layer-1]/2.)))
return cthetas
def get_thetas(self, joAngle):
fac = arange(0,self.nlayers+1)/float(self.nlayers) # Numerical factor for use during execution
thetas = 2.*arcsin(fac*sin(joAngle/4.)) # Calculate the propagation angle with respect to jet axis
cthetas = 0.5*(thetas[1:]+thetas[:-1])
return thetas, cthetas
def __shell_division(self):
thetas = zeros([self.steps, self.nlayers+1])
cthetas = zeros([self.steps, self.nlayers])
for ii in range(self.steps):
cthetas[ii,:] = self.cthetas0 + (self.joAngles[ii,:]-self.initJoAngle)
#self.thetas0, self.cthetas0 = thetas[0,:], cthetas[0,:]
self.theta_edges0 = thetas[0,:] # Initial outmost edges of each cell
#self.thetas, self.cthetas = thetas, cthetas
def __make_layers(self):
self.layer = array([])
self.phis = array([])
self.cphis = array([])
#self.cthetas0 = array([])
for ii in range(self.nlayers): # Loop over layers and populate the arrays
num = cellsInLayer(ii)
self.phis = append(self.phis, arange(0,num+1)*2.*pi/num) # Phi value of the edges
self.layer = append(self.layer,ones(num)*(ii+1)) # Layer on which the cells are
#self.cthetas0 = append(self.cthetas0,ones(num)*0.5*(self.thetas0[ii]+self.thetas0[ii+1])) # Central theta values of the cells
self.cphis = append(self.cphis,(arange(0,num)+0.5)*2.*pi/num ) # Central phi values of the cells
self.layer = self.layer.astype('int')
def __correct_energy(self): # Change energy from spherical equivalent (inherited from parent class) to energy of segment
self.EEc0 = self.EEc0*self.angExt0/(4.*pi)
self.MMc0 = self.EEc0/(self.Gamc0*cc**2.)
def __energies_and_LF(self):
if self.structure=='gaussian':
#AngFacs = exp(-1.*self.cthetas**2./(2.*self.coAngle**2.))
self.cell_EEs = self.EEc0 * exp(-1.*self.cthetas0**2./(self.coAngle**2.)) # Just for texting
#self.cell_EEs = self.EE * exp(-1.*self.cthetas**2./(self.coAngle**2.))
#print shape(self.cthetas0)
self.cell_Gam0s = 1.+(self.Gamc0-1)*exp(-1.*self.cthetas0**2./(2.*self.coAngle**2.))
#self.cell_Gam0s[self.cell_Gam0s<=1.+1e-6] == 1.+1.e-6
elif self.structure=='power-law':
self.cell_EEs = zeros(self.nlayers)
self.cell_Gam0s = zeros(self.nlayers)
self.cell_EEs[self.cthetas0<=self.coAngle] = self.EEc0
self.cell_Gam0s[self.cthetas0<=self.coAngle] = self.Gamc0
wings = self.cthetas0>self.coAngle
self.cell_EEs[wings] = self.EEc0*(self.cthetas0[wings]/self.coAngle)**(-1.*self.kk)
self.cell_Gam0s[wings] = 1. + (self.Gamc0-1.)*(self.cthetas0[wings]/self.coAngle)**(-1.*self.kk)
self.cell_Beta0s = sqrt(1.-(self.cell_Gam0s)**(-2.))
#self.cell_Beta0s[self.cell_Beta0s<=1e-6] = 1.e-6
self.cell_MM0s = self.cell_EEs/(self.cell_Gam0s*cc**2.)
def __thetas_interpolation(self):
cthetasI = []
for jj in range(self.nlayers):
cthetasI.append(interp1d(self.RRs, self.cthetas[:,jj]))
self.cthetasI = cthetasI
def __shell_evolution(self):
self.RRs = logspace(log10(self.Rmin), log10(self.Rmax), self.steps)
self.Gams, self.mms = zeros([len(self.RRs), len(self.cthetas0)]), zeros([len(self.RRs), len(self.cthetas0)])
self.thetas, self.angExt = zeros([len(self.RRs), len(self.cthetas0)]), zeros([len(self.RRs), len(self.cthetas0)])
self.TTs = zeros([len(self.RRs), len(self.cthetas0)])
self.Bfield = []
self.TTInt = []
self.GamInt = []
self.neI = []
self.angExtI = []
self.gamMI, self.gamCI, self.nuMI, self.nuCI = [], [], [], []
self.FnuMax = []
for ii in range(self.nlayers):
if self.aa>=0 :
#print shape(self.theta_edges0)
CIL = cellsInLayer(ii)
self.Gams[:,ii], self.Betas[:,ii], self.joAngles[:,ii], self.mms[:,ii], self.TTs[:,ii], self.angExt[:,ii] = solver_expanding_shell(
self.cell_MM0s[ii], self.cell_Gam0s[ii], 0., self.initJoAngle/2., self.RRs, self.nn, self.aa,
self.steps, self.angExt0, self.ncells, self.cell_Rds[ii], withSpread = self.withSpread)
#self.Gams[:,ii], self.Betas[:,ii], self.joAngles[:,ii], self.mms[:,ii], self.TTs[:,ii], __ = solver_GP12(
# self.cell_MM0s[ii], self.cell_Gam0s[ii], 0., self.initJoAngle/2., self.RRs, self.nn, self.aa,
# self.steps, self.angExt0, self.ncells, withSpread = self.withSpread)
#self.cthetas[:,ii] = self.cthetas0[ii] + 0.5*(self.theta_edges[:,ii] + self.theta_edges0[ii])
self.cthetas[:,ii] = self.get_thetas_division(ii)
#self.cthetas[:,ii] = self.cthetas0[ii] + 0.5*(self.joAngles[:,ii]-self.initJoAngle)
else:
#print shape(self.cell_Gam0s), shape(self.cell_Gam0s[ii])
self.Gams[:,ii], self.Betas[:,ii], self.mms[:,ii], self.TTs[:,ii] = solver_collimated_shell(
self.cell_MM0s[ii], self.cell_Gam0s[ii], self.angExt0, self.RRs, self.nn, self.steps)
self.cthetas[:,ii] = self.cthetas0[ii]
self.TTInt.append(interp1d(self.RRs, self.TTs[:,ii], copy=False))
self.GamInt.append(interp1d(self.RRs, self.Gams[:,ii], copy=False))
#self.angExt = ones([len(self.RRs)])*self.angExt0/self.ncells
self.neI.append(interp1d(self.RRs, self.mms[:,ii]/mp, copy=False)) # Interpolated number of electrons as a function of R for the flux calculation
self.angExtI.append(interp1d(self.RRs, self.angExt[:,ii], copy=False))
Bf = Bfield_modified(self.Gams[:,ii], self.Betas[:,ii], self.nn, self.epB)
self.Bfield.append(interp1d(self.RRs, Bf, copy=False))
gM, fM = minGam_modified(self.Gams[:,ii], self.epE, self.epB, self.nn, self.pp, Bf, self.Xp)
self.gamMI.append(interp1d(self.RRs, gM, copy=False))
self.nuMI.append(interp1d(self.RRs, fM, copy=False))
gC, fC = critGam_modified(self.Gams[:,ii], self.epE, self.epB, self.nn, self.pp, Bf, self.TTs[:,ii])
self.gamCI.append(interp1d(self.RRs, gC, copy=False))
self.nuCI.append(interp1d(self.RRs, fC, copy=False))
Fmax = fluxMax_modified(self.RRs, self.Gams[:,ii], self.mms[:,ii]/mp, Bf, self.PhiP)
self.FnuMax.append(interp1d(self.RRs, Fmax, copy=False))
def __peakParamsRS_struc(self):
RSpeak_nuM_struc = zeros(self.ncells)
RSpeak_nuC_struc = zeros(self.ncells)
RSpeak_Fnu_struc = zeros(self.ncells)
if self.shell_type=='thin':
#print("Setting up thin shell")
for ii in range(self.nlayers):
#self.RSpeak_nuM = 9.6e14 * epE**2. * epB**(1./2.) * nn**(1./2) * Gam0**2.
#self.RSpeak_nuC = 4.0e16 * epB**(-3./2.) * EE**(-2./3.) * nn**(-5./6.) * Gam0**(4./3.)
#self.RSpeak_Fnu = 5.2 * DD**(-2.) * epB**(1./2.) * EE * nn**(1./2.) * Gam0
Rd, Td = self.cell_Rds[ii], self.cell_Tds[ii]
#print Rd
#print shape(self.RRs), shape(self.Gams)
GamsInt = interp1d(self.RRs[:], self.Gams[:,ii])
Gam0 = GamsInt(Rd)
Beta0 = sqrt(1.-Gam0**(-2.))
Bf = Bfield_modified(Gam0, Beta0, self.nn, self.epB)
gamM, nuM = minGam_modified(Gam0, self.epE, self.epB, self.nn, self.pp, Bf, self.Xp)
gamC, nuC = critGam_modified(Gam0, self.epE, self.epB, self.nn, self.pp, Bf, Td)
Fnu = fluxMax_modified(Rd, Gam0, self.nn, Bf, self.PhiP)
RSpeak_nuM_struc[ii] = nuM/(Gam0**2)
RSpeak_nuC_struc[ii] = nuC
RSpeak_Fnu_struc[ii] = Gam0*Fnu
self.RSpeak_nuM_struc = RSpeak_nuM_struc #self.Rb**(1./2.)*RSpeak_nuM_struc
self.RSpeak_nuC_struc = RSpeak_nuC_struc #self.Rb**(-3./2.)*RSpeak_nuC_struc
self.RSpeak_Fnu_struc = RSpeak_Fnu_struc #self.Rb**(1./2.)*RSpeak_Fnu_struc
``` |
{
"source": "joefearnley/checklist",
"score": 2
} |
#### File: backend/account/serializers.py
```python
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from items.serializers import ItemSerializer
class UserSerializer(serializers.HyperlinkedModelSerializer):
items = ItemSerializer(many=True, read_only=False)
class Meta:
model = User
fields = ['url', 'username', 'email', 'groups', 'items']
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ['url', 'name']
class AccountSerializer(serializers.HyperlinkedModelSerializer):
email = serializers.EmailField(required=True,)
password = serializers.CharField(required=True, write_only=True,)
class Meta:
model = User
fields = ['id', 'username', 'email', 'password']
def create(self, validated_data):
user = User.objects.create_user(**validated_data)
user.set_password(validated_data['password'])
user.save()
return user
```
#### File: backend/items/views.py
```python
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework import viewsets, status, permissions
from rest_framework.decorators import action
from .models import Item
from .serializers import ItemSerializer
import datetime
class IsOwner(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return obj.user == request.user
class ItemViewSet(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated, IsOwner]
queryset = Item.objects.all().order_by('-created')
serializer_class = ItemSerializer
def list(self, request):
qs = self.get_queryset().filter(user=request.user)
serializer = self.get_serializer(qs, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def create(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
@action(detail=False, methods=['get'], url_path='upcoming', name='Upcoming Items')
def upcoming(self, request, *args, **kwargs):
qs = self.get_queryset().filter(
user=request.user,
complete=False,
due_date__gte=datetime.datetime.now(),
due_date__lte=datetime.datetime.now() + datetime.timedelta(weeks=1)
)[:5]
serializer = self.get_serializer(qs, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
``` |
{
"source": "joe-fennell/parthenium-predict",
"score": 3
} |
#### File: parthenium-predict/predict/predict.py
```python
import numpy as np
import os
import xarray as xr
import pickle
DEFAULT_MODEL = 'trained_classifier_2020-06-11T12_20_05.045413.pl'
DEFAULT_MODEL_PATH = os.path.join(os.path.dirname(__file__),
'models',
DEFAULT_MODEL)
def predict_NetCDF(dataset, feature_descriptor, pipeline=None,
scl_include=[4, 5, 7], predict_proba=False,
predict_proba_class=1):
"""
Maps an xarray S2 dataset to an sklearn-ready array and applied a pipeline.
Parameters
----------
dataset : xarray.Dataset
NetCDF xarray object containing all bands needed for classifier
feature_descriptor : dict
key is a dimension in `dataset` and value is a list of coordinates
pipeline : dask_ml.wrappers.ParallelPostFit
trained pipeline instance containing classifer/model
scl_include : list
list of SCL codes to leave unmasked
predict_proba : bool
If True return array of probability for class predict_proba_class
predict_proba_class : int
class to generate probability for if predict_proba is True. Ignored
otherwise.
"""
def _make_mask(mask):
_mask = mask.copy()
_mask -= _mask
for s in scl_include:
_mask = _mask + mask.where(mask == s, 0)
return _mask != 0
if pipeline is None:
pipeline = pickle.load(open(DEFAULT_MODEL_PATH,'rb'))
# generate a subset of dates, bands, positions
subset = dataset.sel(**feature_descriptor)
feature_dims = set(feature_descriptor.keys())
pix_dims = set(subset.dims).difference(feature_dims)
# stack data into features and observations
subset = subset.stack(obs=tuple(pix_dims), feats=tuple(feature_dims))
mask = _make_mask(subset.SCL)
reflectance = (subset.reflectance.astype(float)/10000).transpose(
'obs', ...)
# remove masked values from final array
input_ = reflectance.where(mask).dropna('obs')
# generate ouput array
out = xr.DataArray(np.empty(reflectance.shape[0]) * np.nan,
coords={'obs': reflectance.obs},
dims='obs')
if predict_proba:
out[mask] = pipeline.predict_proba(input_)[:, predict_proba_class]
else:
out[mask] = pipeline.predict(input_)
return out.unstack()
def generate_test_DataSet():
"""
Generate test S2 data
"""
shape = (12, 12, 10, 2)
bands = ['B02', 'B03', 'B04', 'B05', 'B06',
'B07', 'B08', 'B8A', 'B11', 'B12']
reflectance = xr.DataArray(np.random.randint(1, 10000, shape),
dims=['y', 'x', 'band', 'date'],
coords={'y': np.arange(12),
'x': np.arange(12),
'band': bands}
)
SCL = xr.DataArray(np.repeat(np.arange(12), 12*2).
reshape((12, 12, 2)),
dims=['y', 'x', 'date'],
coords={'y': np.arange(12),
'x': np.arange(12)})
return xr.Dataset({'reflectance': reflectance, 'SCL': SCL})
``` |
{
"source": "joeferg425/ws281x_lightberries",
"score": 4
} |
#### File: ws281x_lightberries/Examples/FunctionGUI.py
```python
import time
import multiprocessing
from tkinter.colorchooser import askcolor
import tkinter as tk
from LightBerries.LightControls import LightController
from LightBerries.LightPixels import Pixel
from LightBerries.LightPatterns import ConvertPixelArrayToNumpyArray, PixelArray
# the number of pixels in the light string
PIXEL_COUNT = 100
# GPIO pin to use for PWM signal
GPIO_PWM_PIN = 18
# DMA channel
DMA_CHANNEL = 10
# frequency to run the PWM signal at
PWM_FREQUENCY = 800000
# brightness of LEDs in range [0.0, 1.0]
BRIGHTNESS = 0.75
# to understand the rest of these arguments read their
# documentation: https://github.com/rpi-ws281x/rpi-ws281x-python
GAMMA = None
LED_STRIP_TYPE = None
INVERT = False
PWM_CHANNEL = 0
class LightsProcess:
"""Handles LightBerries functions in a seperate process."""
def __init__(self, app) -> None:
"""Handles LightBerries functions in a seperate process.
Args:
app: the tkinter app
"""
self.app = app
self.inQ = multiprocessing.Queue(2)
self.outQ = multiprocessing.Queue(2)
self.process = multiprocessing.Process(
target=LightsProcess.mainLoop, args=[self, self.inQ, self.outQ]
)
self.process.start()
def __del__(self) -> None:
"""Cleans up ws281X memory."""
self.process.terminate()
print("goodbye")
def mainLoop(self, inQ, _):
"""The main loop.
Args:
inQ: multiprocess queue for getting input
_ : [description]
"""
try:
# set up LightBerries controller
lightControl = LightController(
ledCount=PIXEL_COUNT,
pwmGPIOpin=GPIO_PWM_PIN,
channelDMA=DMA_CHANNEL,
frequencyPWM=PWM_FREQUENCY,
channelPWM=PWM_CHANNEL,
invertSignalPWM=INVERT,
gamma=GAMMA,
stripTypeLED=LED_STRIP_TYPE,
ledBrightnessFloat=BRIGHTNESS,
debug=True,
)
# create virtual LED array
lightControl.setVirtualLEDArray(ConvertPixelArrayToNumpyArray(PixelArray(PIXEL_COUNT)))
lightControl.copyVirtualLedsToWS281X()
lightControl.refreshLEDs()
time.sleep(0.05)
count = PIXEL_COUNT
color = 0
duration = 0
pattern = self.app.patternChoices[0]
function = self.app.functionChoices[0]
while True:
# check for user input
msg = None
try:
msg = inQ.get()
except Exception:
pass
if msg is not None:
print(msg)
if msg[0] == "go":
try:
# reset LightBerry controller
lightControl.reset()
# get color pattern method by name, run it
getattr(lightControl, pattern)()
# get function method by name, run it
getattr(lightControl, function)()
# set duration
lightControl.secondsPerMode = duration
# run
lightControl.run()
# turn lights off when (if) method exits
lightControl.off()
lightControl.copyVirtualLedsToWS281X()
lightControl.refreshLEDs()
time.sleep(0.05)
except Exception as ex:
print(ex)
elif msg[0] == "color":
try:
color = msg[1]
print("setting color")
# turn all LEDs off, then set them to new color
lightControl.virtualLEDArray[:] *= 0
lightControl.virtualLEDArray[:] += Pixel(color).array
lightControl.copyVirtualLedsToWS281X()
lightControl.refreshLEDs()
time.sleep(0.05)
except Exception as ex:
print(ex)
elif msg[0] == "count":
try:
count = msg[1]
# turn off all LEDs
if count < lightControl.privateLEDCount:
lightControl.virtualLEDArray[:] *= 0
lightControl.copyVirtualLedsToWS281X()
lightControl.refreshLEDs()
time.sleep(0.05)
# create new LightBerry controller with new pixel count in
# underlying ws281x object
lightControl = LightController(
ledCount=count,
pwmGPIOpin=GPIO_PWM_PIN,
channelDMA=DMA_CHANNEL,
frequencyPWM=PWM_FREQUENCY,
channelPWM=PWM_CHANNEL,
invertSignalPWM=INVERT,
gamma=GAMMA,
stripTypeLED=LED_STRIP_TYPE,
ledBrightnessFloat=BRIGHTNESS,
debug=True,
)
lightControl.secondsPerMode = duration
lightControl.virtualLEDArray[:] += Pixel(color).array
lightControl.copyVirtualLedsToWS281X()
lightControl.refreshLEDs()
time.sleep(0.05)
except Exception as ex:
print(ex)
elif msg[0] == "duration":
try:
duration = msg[1]
except Exception as ex:
print(ex)
elif msg[0] == "function":
try:
function = msg[1]
except Exception as ex:
print(ex)
elif msg[0] == "pattern":
try:
pattern = msg[1]
except Exception as ex:
print(ex)
time.sleep(0.001)
except KeyboardInterrupt:
pass
except Exception as ex:
print(ex)
lightControl.__del__()
time.sleep(0.05)
class App:
"""The application object for tkinter GUI."""
def __init__(self) -> None:
"""The application object for tkinter GUI."""
# create tKinter GUI
self.root = tk.Tk()
self.ledCountInt = tk.IntVar()
self.ledCountlabel = tk.Label(text="LED Count")
self.ledCountlabel.grid(row=0, column=0)
self.ledCountslider = tk.Scale(
self.root, from_=0, to=500, variable=self.ledCountInt, orient="horizontal"
)
self.ledCountslider.grid(row=0, column=1)
self.ledCountPressed = False
self.ledCounttext = tk.Entry(self.root, textvariable=self.ledCountInt)
self.ledCounttext.grid(row=0, column=2)
self.ledCountInt.set(PIXEL_COUNT)
self.colorInt = tk.IntVar()
self.colorString = tk.StringVar()
self.colorlabel = tk.Label(text="Color")
self.colorlabel.grid(row=1, column=0)
self.colorslider = tk.Scale(
self.root, from_=0, to=0xFFFFFF, variable=self.colorInt, orient="horizontal"
)
self.colorslider.grid(row=1, column=1)
self.colortext = tk.Entry(self.root, textvariable=self.colorString)
self.colortext.grid(row=1, column=2)
self.colorbutton = tk.Button(text="Select Color", command=self.getColor)
self.colorbutton.grid(row=1, column=3)
self.functionString = tk.StringVar()
self.functionChoices = [f for f in dir(LightController) if f[:11] == "useFunction"]
self.functionChoices.sort()
self.functionString.set(self.functionChoices[0])
self.functionDropdown = tk.OptionMenu(self.root, self.functionString, *self.functionChoices)
self.functionDropdown.grid(row=2, column=1)
self.patternString = tk.StringVar()
self.patternChoices = [f for f in dir(LightController) if f[:8] == "useColor"]
self.patternChoices.sort()
self.patternString.set(self.patternChoices[0])
self.patternDropdown = tk.OptionMenu(self.root, self.patternString, *self.patternChoices)
self.patternDropdown.grid(row=2, column=2)
self.durationInt = tk.IntVar()
self.durationInt.set(10)
self.durationLabel = tk.Label(text="Duration (Seconds)")
self.durationLabel.grid(row=3, column=1)
self.durationText = tk.Entry(self.root, textvariable=self.durationInt)
self.durationText.grid(row=3, column=2)
self.buttonGo = tk.Button(self.root, height=1, width=10, text="Go", command=self.goNow)
self.buttonGo.grid(row=3, column=3)
self.root.protocol("WM_DELETE_WINDOW", self.destroy)
self.root.title("LightBerries GUI")
# create seperate process for controlling lights
self.lights = LightsProcess(self)
# connect callbacks to GUI widgets/controls
self.colorInt.trace("w", lambda name, index, mode, var=self.colorInt: self.updateColor(var.get()))
self.colorString.trace(
"w", lambda name, index, mode, var=self.colorString: self.updateColorHex(var.get())
)
self.ledCountInt.trace(
"w", lambda name, index, mode, var=self.ledCountInt: self.updateLEDCount(var.get())
)
self.functionString.trace(
"w", lambda name, index, mode, var=self.functionString: self.updateFunction(var.get())
)
self.patternString.trace(
"w", lambda name, index, mode, var=self.patternString: self.updatePattern(var.get())
)
self.durationInt.trace(
"w", lambda name, index, mode, var=self.durationInt: self.updateDuration(var.get())
)
try:
self.lights.inQ.put_nowait(("count", PIXEL_COUNT))
self.lights.inQ.put_nowait(("duration", self.durationInt.get()))
except multiprocessing.queues.Full:
pass
self.updateColor(0xFF0000)
# run the GUI thread
self.root.mainloop()
def destroy(self) -> None:
"""Destroy this object cleanly."""
self.root.destroy()
self.__del__()
def __del__(self) -> None:
"""Destroy this object cleanly."""
del self.lights
def goNow(self):
"""Go."""
try:
self.lights.inQ.put_nowait(("go",))
except multiprocessing.queues.Full:
pass
def getColor(self) -> None:
"""Get a color, pass it through multiprocess queue."""
color = askcolor()
color = int(color[1][1:], 16)
self.colorInt.set(color)
try:
self.lights.inQ.put_nowait(("color", color))
except multiprocessing.queues.Full:
pass
def updateFunction(self, function: str) -> None:
"""Update the selected function, pass it through multiprocess queue.
Args:
function: the function name
"""
try:
self.lights.inQ.put_nowait(("function", function))
except multiprocessing.queues.Full:
pass
def updatePattern(self, pattern: str) -> None:
"""Update the selected pattern, pass it through multiprocess queue.
Args:
pattern: the pattern name
"""
try:
self.lights.inQ.put_nowait(("pattern", pattern))
except multiprocessing.queues.Full:
pass
def updateDuration(self, duration: float) -> None:
"""Update the selected duration, pass it through multiprocess queue.
Args:
duration: the duration in seconds
"""
try:
self.lights.inQ.put_nowait(("duration", duration))
except multiprocessing.queues.Full:
pass
def updateLEDCount(self, count: int) -> None:
"""Update the selected LED count, pass it through multiprocess queue.
Args:
count: the LED count
"""
try:
count = int(count)
self.lights.inQ.put_nowait(("count", count))
except multiprocessing.queues.Full:
pass
def updateColor(self, color: int) -> None:
"""Update color of all LEDs.
Args:
color: the LED colors
"""
if self.root.focus_get() != self.colortext:
self.colorString.set(f"{color:06X}")
try:
self.lights.inQ.put_nowait(("color", color))
except multiprocessing.queues.Full:
pass
def updateColorHex(self, color: str) -> None:
"""Update color of all LEDs.
Args:
color: the LED colors
"""
color = int(color, 16)
self.colorInt.set(color)
if __name__ == "__main__":
theApp = App()
del theApp
```
#### File: ws281x_lightberries/Examples/PixelGUI.py
```python
import time
import multiprocessing
import multiprocessing.queues
from tkinter.colorchooser import askcolor
import tkinter as tk
import LightBerries.LightPixels
from LightBerries.LightControls import LightController
from LightBerries.LightPixels import Pixel
from LightBerries.LightPatterns import ConvertPixelArrayToNumpyArray, SolidColorArray
# the number of pixels in the light string
PIXEL_COUNT = 196
# GPIO pin to use for PWM signal
GPIO_PWM_PIN = 18
# DMA channel
DMA_CHANNEL = 10
# frequency to run the PWM signal at
PWM_FREQUENCY = 800000
# brightness of LEDs in range [0.0, 1.0]
BRIGHTNESS = 0.75
# to understand the rest of these arguments read their
# documentation: https://github.com/rpi-ws281x/rpi-ws281x-python
GAMMA = None
LED_STRIP_TYPE = None
INVERT = False
PWM_CHANNEL = 0
LightBerries.LightPixels.DEFAULT_PIXEL_ORDER = LightBerries.LightPixels.EnumLEDOrder.RGB
class LightsProcess:
"""Handles LightBerries functions in a seperate process."""
selfObject = None
appObject = None
def __init__(self, app) -> None:
"""Handles LightBerries functions in a seperate process.
Args:
app: the tkinter app
"""
LightsProcess.selfObject = self
LightsProcess.appObject = app
self.inQ = multiprocessing.Queue(2)
self.outQ = multiprocessing.Queue(2)
self.process = multiprocessing.Process(target=LightsProcess.mainLoop, args=[self.inQ, self.outQ])
self.process.start()
def __del__(self) -> None:
"""Clean up memory."""
self.process.terminate()
@classmethod
def mainLoop(cls, inQ, _):
"""The main loop.
Args:
inQ: multiprocess queue for getting input
_ : [description]
"""
try:
# create LightBerry controller
lightControl = LightController(
ledCount=PIXEL_COUNT,
pwmGPIOpin=GPIO_PWM_PIN,
channelDMA=DMA_CHANNEL,
frequencyPWM=PWM_FREQUENCY,
channelPWM=PWM_CHANNEL,
invertSignalPWM=INVERT,
gamma=GAMMA,
stripTypeLED=LED_STRIP_TYPE,
ledBrightnessFloat=BRIGHTNESS,
debug=True,
)
lightControl.setVirtualLEDArray(
ConvertPixelArrayToNumpyArray(
SolidColorArray(arrayLength=PIXEL_COUNT, color=LightBerries.LightPixels.PixelColors.OFF)
)
)
lightControl.copyVirtualLedsToWS281X()
lightControl.refreshLEDs()
# run loop forever
while True:
# check for new user input
msg = None
try:
msg = inQ.get()
except Exception:
pass
if msg is not None:
print(msg)
if msg[0] == "color":
try:
index, color = msg[1:]
print("setting color")
lightControl.virtualLEDArray[index] = Pixel(
color, order=LightBerries.LightPixels.EnumLEDOrder.RGB
).array
lightControl.copyVirtualLedsToWS281X()
lightControl.refreshLEDs()
time.sleep(0.05)
except Exception as ex:
print(ex)
time.sleep(0.001)
except KeyboardInterrupt:
pass
except Exception as ex:
print(ex)
lightControl.__del__()
time.sleep(0.05)
class App:
"""The application for tkinter."""
def __init__(self) -> None:
"""The application for tkinter."""
# create tKinter GUI. This GUI could really use some help
self.root = tk.Tk()
self.canvas = tk.Canvas(self.root)
self.canvas.pack(side=tk.RIGHT, fill="both", expand=True)
self.scrollbarY = tk.Scrollbar(self.canvas, command=self.canvas.yview, orient=tk.VERTICAL)
self.scrollbarY.pack(side=tk.RIGHT, fill="y")
self.scrollbarX = tk.Scrollbar(self.canvas, command=self.canvas.xview, orient=tk.HORIZONTAL)
self.scrollbarX.pack(side=tk.BOTTOM, fill="y")
self.mainFrame = tk.Frame(self.canvas)
self.canvas.create_window((0, 0), window=self.mainFrame, anchor="nw")
self.canvas.configure(yscrollcommand=self.scrollbarY.set)
self.canvas.configure(xscrollcommand=self.scrollbarX.set)
self.mainFrame.pack(fill="both", anchor=tk.NW, expand=True)
self.mainFrame.rowconfigure(1, weight=1)
self.mainFrame.columnconfigure(0, weight=1)
self.mainFrame.columnconfigure(0, weight=1)
self.mainFrame.columnconfigure(1, weight=1)
self.mainFrame.columnconfigure(2, weight=1)
self.mainFrame.columnconfigure(3, weight=1)
self.mainFrame.columnconfigure(4, weight=1)
self.mainFrame.columnconfigure(5, weight=1)
self.mainFrame.columnconfigure(6, weight=1)
# update scrollregion after starting 'mainloop'
# when all widgets are in canvas
self.canvas.bind("<Configure>", lambda _: self.onConfigure())
self.rowInt = tk.IntVar(value=3)
self.rowString = tk.StringVar()
self.columnInt = tk.IntVar(value=3)
self.columnString = tk.StringVar()
self.rowlabel = tk.Label(
self.mainFrame,
text="Row Count",
)
self.rowlabel.grid(
row=0,
column=0,
sticky="news",
)
self.rowInput = tk.Entry(
self.mainFrame,
textvariable=self.rowString,
)
self.rowInput.grid(
row=0,
column=1,
sticky="news",
)
self.rowString.set(str(self.rowInt.get()))
self.columnLabel = tk.Label(
self.mainFrame,
text="Column Count",
)
self.columnLabel.grid(
row=0,
column=2,
sticky="news",
)
self.columnInput = tk.Entry(
self.mainFrame,
textvariable=self.columnString,
)
self.columnInput.grid(
row=0,
column=3,
sticky="news",
)
self.columnString.set(str(self.columnInt.get()))
self.configureButton = tk.Button(
self.mainFrame,
text="Configure",
command=self.configureLightBerries,
)
self.configureButton.grid(
row=0,
column=4,
sticky="news",
)
self.root.bind("<Return>", lambda event: self.configureLightBerries())
self.leftClickColorBtn = tk.Button(
self.mainFrame, bg="black", fg="white", text="Left-Click\nColor", width=5, height=2
)
self.leftClickColorBtn.grid(
row=0,
column=5,
sticky="news",
)
self.leftClickColorBtn.ledIndex = None
self.leftClickColorBtn.bind("<Button-1>", self.getColor)
self.rightClickColorBtn = tk.Button(
self.mainFrame, bg="black", fg="white", text="Right-Click\nColor", width=5, height=2
)
self.rightClickColorBtn.grid(
row=0,
column=6,
sticky="news",
)
self.rightClickColorBtn.ledIndex = None
self.rightClickColorBtn.bind("<Button-1>", self.getColor)
self.rightClickColorBtn.bind("<Button-3>", self.getColor)
self.buttonFrame = tk.Frame(
self.mainFrame,
)
self.buttonFrame.grid(
row=1,
column=0,
columnspan=5,
sticky="news",
)
self.lights = LightsProcess(self)
self.root.protocol("WM_DELETE_WINDOW", self.destroy)
self.root.title("LightBerries Pixel Color Chooser")
self.root.mainloop()
def onConfigure(self):
"""Configure the convas widget."""
# update scrollregion after starting 'mainloop'
# when all widgets are in canvas
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def configureLightBerries(self):
"""Configure LightBerries."""
counter = 0
try:
for row in range(int(self.rowString.get())):
self.buttonFrame.rowconfigure(row, weight=1)
for column in range(int(self.columnString.get())):
self.buttonFrame.columnconfigure(column, weight=1)
btn = tk.Button(
self.buttonFrame, bg="black", fg="white", text=str(counter), width=5, height=2
)
btn.grid(
row=row,
column=column,
sticky="nw",
)
btn.bind("<Button-1>", self.getColor)
btn.bind("<Button-3>", self.getColor2)
btn.grid(column=column, row=row, sticky="nw")
btn.ledIndex = counter
counter += 1
self.configureButton["state"] = "disabled"
except Exception:
pass
def destroy(self) -> None:
"""Destroy this object."""
self.root.destroy()
self.__del__()
def getColor(self, event) -> None:
"""Get a color from user, pass it to LightBerries.
Args:
event: tkinter widget event object
"""
if event.widget.ledIndex is None:
color = askcolor(event.widget["background"])
if color is not None:
color = int(color[1][1:], 16)
colorHex = f"#{color:06X}"
event.widget.configure(bg=colorHex)
invertColor = 0xFFFFFF - color
invertColorHex = "#" + f"{invertColor:06X}"[-6:]
event.widget.configure(fg=invertColorHex)
else:
color = self.leftClickColorBtn["background"]
event.widget.configure(bg=color)
invertColor = self.leftClickColorBtn["foreground"]
event.widget.configure(fg=invertColor)
try:
color = int(color[1:], 16)
self.lights.inQ.put_nowait(("color", event.widget.ledIndex, color))
except multiprocessing.queues.Full:
pass
def getColor2(self, event) -> None:
"""Get a color from user, pass it to LightBerries.
Args:
event: tkinter widget event object
"""
color = self.rightClickColorBtn["background"]
event.widget.configure(bg=color)
invertColor = self.rightClickColorBtn["foreground"]
event.widget.configure(fg=invertColor)
try:
color = int(color[1:], 16)
self.lights.inQ.put_nowait(("color", event.widget.ledIndex, color))
except multiprocessing.queues.Full:
pass
def __del__(self) -> None:
"""Destroy the object cleanly."""
del self.lights
if __name__ == "__main__":
theApp = App()
del theApp
```
#### File: ws281x_lightberries/Examples/Simulate.py
```python
import time
import multiprocessing
import multiprocessing.queues
import tkinter as tk
import matplotlib.pyplot as plt
from numpy import double
from LightBerries.LightControls import LightController
from LightBerries.LightPixels import Pixel
class LightOutput:
"""Outputs audio FFT to light controller object."""
def __init__(
self,
lightQ: multiprocessing.Queue,
plotQ: multiprocessing.Queue,
tkQ: multiprocessing.Queue,
exitQ: multiprocessing.Queue,
) -> None:
"""Outputs audio FFT to light controller object.
Args:
lightQ: multiprocessing queue for receiving data
plotQ: multiprocessing queue for sending data
tkQ: multiprocessing queue for sending data
exitQ: multiprocessing queue for sending data
"""
self.lightQ = lightQ
self.plotQ = plotQ
self.tkQ = tkQ
self.exitQ = exitQ
self.delay = 0.1
self.lightController = None
self.func = ""
self.colr = ""
# run routine
self.run()
def update(self):
"""Update the gui."""
# print("led refresh")
self.plotQ.put(self.lightController.virtualLEDArray)
time.sleep(self.delay)
def run(self):
"""Run the process."""
try:
ledCount = None
while ledCount is None:
try:
ledCount = self.lightQ.get_nowait()
except multiprocessing.queues.Empty:
pass
self.lightController = LightController(
ledCount,
18,
10,
800000,
simulate=True,
refreshCallback=lambda: self.update(),
)
# print("started lights")
while True:
try:
msg = self.lightQ.get_nowait()
# print(msg)
if isinstance(msg, int):
self.lightController.secondsPerMode = msg
if isinstance(msg, (float, double)):
self.delay = msg
elif isinstance(msg, str):
if len(msg) > 8 and msg[:8] == "useColor":
# print(msg)
self.colr = msg
elif len(msg) > 11 and msg[:11] == "useFunction":
# print(msg)
self.func = msg
elif len(msg) > 1 and msg == "go":
# print("run it")
self.lightController.reset()
self.lightController.off()
self.lightController.refreshLEDs()
getattr(self.lightController, self.colr)()
getattr(self.lightController, self.func)()
self.tkQ.put("running")
self.lightController.run()
self.tkQ.put("done")
except multiprocessing.queues.Empty:
pass
except KeyboardInterrupt:
pass
except Exception as ex:
print(f"Error in {LightOutput.__name__}: {str(ex)}")
finally:
# clean up the LightBerry object
self.lightController.off()
self.lightController.copyVirtualLedsToWS281X()
self.lightController.refreshLEDs()
# pause for object destruction
time.sleep(0.2)
# put any data in queue, this will signify "exit" status
self.exitQ.put("quit")
# double-check deletion
del self.lightController
class PlotOutput:
"""Plots audio FFT to matplotlib's pyplot graphic."""
def __init__(
self,
plotQ: multiprocessing.Queue,
tkQ: multiprocessing.Queue,
exitQ: multiprocessing.Queue,
) -> None:
"""Plots audio FFT to matplotlib's pyplot graphic.
Args:
plotQ: multiprocessing queue for receiving data
tkQ: multiprocessing queue for sending data
exitQ: multiprocessing queue for sending data
"""
self.plotQ = plotQ
self.tkQ = tkQ
self.exitQ = exitQ
self.buttonCallback = None
plt.ion()
self.run()
def run(self):
"""Run the process."""
try:
while True:
try:
# try to get new data
array = self.plotQ.get_nowait()
self.tkQ.put([Pixel(rgb).hexstr for rgb in array])
except multiprocessing.queues.Empty:
pass
except KeyboardInterrupt:
pass
except Exception as ex:
print(f"Error in {PlotOutput.__name__}: {str(ex)}")
finally:
self.exitQ.put("quit")
class App:
"""The application for tkinter."""
def __init__(self) -> None:
"""The application for tkinter."""
# create tKinter GUI. This GUI could really use some help
self.lightQ: multiprocessing.Queue = multiprocessing.Queue()
self.plotQ: multiprocessing.Queue = multiprocessing.Queue()
self.tkQ: multiprocessing.Queue = multiprocessing.Queue()
self.exitQ: multiprocessing.Queue = multiprocessing.Queue()
# create process objects
self.lightProcess = multiprocessing.Process(
target=LightOutput,
args=(
self.lightQ,
self.plotQ,
self.tkQ,
self.exitQ,
),
)
self.guiProcess = multiprocessing.Process(
target=PlotOutput,
args=(
self.plotQ,
self.tkQ,
self.exitQ,
),
)
# start the selected process
self.lightProcess.start()
self.guiProcess.start()
self.ledCount = None
self.buttons = []
self.running = False
self.root = tk.Tk()
self.canvas = tk.Canvas(self.root)
self.canvas.pack(side=tk.RIGHT, fill="both", expand=True)
self.scrollbarY = tk.Scrollbar(self.canvas, command=self.canvas.yview, orient=tk.VERTICAL)
self.scrollbarY.pack(side=tk.RIGHT, fill="y")
self.scrollbarX = tk.Scrollbar(self.canvas, command=self.canvas.xview, orient=tk.HORIZONTAL)
self.scrollbarX.pack(side=tk.BOTTOM, fill="y")
self.mainFrame = tk.Frame(self.canvas)
self.canvas.create_window((0, 0), window=self.mainFrame, anchor="nw")
self.canvas.configure(yscrollcommand=self.scrollbarY.set)
self.canvas.configure(xscrollcommand=self.scrollbarX.set)
self.mainFrame.pack(fill="both", anchor=tk.NW, expand=True)
self.mainFrame.rowconfigure(1, weight=1)
self.mainFrame.columnconfigure(0, weight=1)
# self.mainFrame.columnconfigure(0, weight=1)
self.mainFrame.columnconfigure(1, weight=1)
self.mainFrame.columnconfigure(2, weight=1)
self.mainFrame.columnconfigure(3, weight=1)
self.mainFrame.columnconfigure(4, weight=1)
self.ledCountInt = tk.IntVar()
self.ledCountlabel = tk.Label(
self.mainFrame,
text="LED Count",
)
self.ledCountlabel.grid(
row=0,
column=0,
)
self.ledCounttext = tk.Entry(
self.mainFrame,
textvariable=self.ledCountInt,
)
self.ledCounttext.grid(
row=0,
column=1,
)
self.ledCountInt.set(100)
self.functionString = tk.StringVar()
self.functionChoices = [f for f in dir(LightController) if f[:11] == "useFunction"]
self.functionChoices.sort()
self.functionString.set(self.functionChoices[0])
self.functionDropdown = tk.OptionMenu(
self.mainFrame,
self.functionString,
*self.functionChoices,
)
self.functionDropdown.grid(
row=0,
column=2,
)
self.patternString = tk.StringVar()
self.patternChoices = [f for f in dir(LightController) if f[:8] == "useColor"]
self.patternChoices.sort()
self.patternString.set(self.patternChoices[0])
self.patternDropdown = tk.OptionMenu(
self.mainFrame,
self.patternString,
*self.patternChoices,
)
self.patternDropdown.grid(
row=0,
column=3,
)
self.durationInt = tk.IntVar()
self.durationInt.set(10)
self.durationLabel = tk.Label(
self.mainFrame,
text="Test Duration (Seconds)",
)
self.durationLabel.grid(
row=0,
column=4,
)
self.durationText = tk.Entry(
self.mainFrame,
textvariable=self.durationInt,
)
self.durationText.grid(
row=0,
column=5,
)
self.delayFloat = tk.DoubleVar()
self.delayFloat.set(0.05)
self.delayLabel = tk.Label(
self.mainFrame,
text="Refresh Delay (Seconds)",
)
self.delayLabel.grid(
row=0,
column=6,
)
self.delayText = tk.Entry(
self.mainFrame,
textvariable=self.delayFloat,
)
self.delayText.grid(
row=0,
column=7,
)
self.buttonGo = tk.Button(
self.mainFrame,
height=1,
width=10,
text="Go",
command=self.configureLightBerries,
)
self.buttonGo.grid(
row=0,
column=8,
)
self.root.bind("<Return>", lambda event: self.configureLightBerries())
self.buttonFrame = tk.Frame(
self.mainFrame,
)
self.buttonFrame.grid(
row=1,
column=0,
columnspan=9,
sticky="news",
)
self.root.protocol("WM_DELETE_WINDOW", self.destroy)
self.root.title("LightBerries LED GUI Simulator (function parameters not included)")
self.root.after(1, self.checkQ)
self.root.mainloop()
def checkQ(self):
"""Method for checking whether other processes have sent us data."""
self.root.after(1, self.checkQ)
try:
data = self.tkQ.get_nowait()
if isinstance(data, str):
if data == "running":
self.running = True
self.ledCounttext["state"] = "disabled"
self.buttonGo["state"] = "disabled"
elif data == "done":
self.running = False
self.ledCounttext["state"] = "normal"
self.buttonGo["state"] = "normal"
else:
for index, btn in enumerate(self.buttons):
btn.configure(bg="#" + data[index])
# print(f"app got data: {len(data)}")
except multiprocessing.queues.Empty:
pass
def onConfigure(self):
"""Configure the convas widget."""
# update scrollregion after starting 'mainloop'
# when all widgets are in canvas
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def configureLightBerries(self):
"""Configure LightBerries."""
# print("configuring")
if self.ledCount is None:
ledCount = int(self.ledCountInt.get())
for column in range(ledCount):
self.buttonFrame.columnconfigure(column, weight=1)
btn = tk.Button(self.buttonFrame, bg="black", fg="white", width=1, height=1)
btn.grid(
row=1,
column=column,
sticky="nw",
)
self.buttons.append(btn)
self.lightQ.put_nowait(ledCount)
self.ledCount = ledCount
# print("sending data")
if self.running is False:
self.lightQ.put(self.durationInt.get())
self.lightQ.put(self.delayFloat.get())
self.lightQ.put(self.patternString.get())
self.lightQ.put(self.functionString.get())
self.lightQ.put("go")
def destroy(self) -> None:
"""Destroy this object."""
self.root.destroy()
self.__del__()
def __del__(self) -> None:
"""Destroy the object cleanly."""
pass
if __name__ == "__main__":
theApp = App()
del theApp
```
#### File: ws281x_lightberries/LightBerries/LightControls.py
```python
import time
import random
import logging
from typing import (
Callable,
Dict,
List,
Optional,
Union,
Any,
)
from nptyping import NDArray
import numpy as np
from LightBerries import LightPatterns
from LightBerries.RpiWS281xPatch import rpi_ws281x
from LightBerries.LightBerryExceptions import LightControlException
from LightBerries.LightPixels import Pixel, PixelColors
from LightBerries.LightStrings import LightString
from LightBerries.LightFunctions import LightFunction, LEDFadeType, RaindropStates, SpriteState, ThingMoves
from LightBerries.LightPatterns import (
SolidColorArray,
ConvertPixelArrayToNumpyArray,
RepeatingColorSequenceArray,
ColorTransitionArray,
RainbowArray,
RepeatingRainbowArray,
ReflectArray,
DefaultColorSequence,
DEFAULT_BACKGROUND_COLOR,
)
LOGGER = logging.getLogger("LightBerries")
DEFAULT_REFRESH_DELAY = 50
class LightController:
"""This library wraps the rpi_ws281x library and provides some lighting functions.
See https://github.com/rpi-ws281x/rpi-ws281x-python for questions about rpi_ws281x library.
Quick Start:
1: Create a LightController object specifying ledCount:int, pwmGPIOpin:int,
channelDMA:int, frequencyPWM:int
lights = LightController(10, 18, 10, 800000)
2: Choose a color pattern
lights.useColorRainbow()
3: Choose a function
lights.useFunctionCylon()
4: Choose a duration to run
lights.secondsPerMode = 60
5: Run
lights.run()
"""
def __init__(
self,
ledCount: int = 100,
pwmGPIOpin: int = 18,
channelDMA: int = 10,
frequencyPWM: int = 800000,
invertSignalPWM: bool = False,
ledBrightnessFloat: float = 0.75,
channelPWM: int = 0,
stripTypeLED: Any = None,
gamma: Any = None,
debug: bool = False,
verbose: bool = False,
refreshCallback: Callable = None,
simulate: bool = False,
) -> None:
"""Create a LightController object for running patterns across a rpi_ws281x LED string.
Args:
ledCount: the number of Pixels in your string of LEDs
pwmGPIOpin: the GPIO pin number your lights are hooked up to
(18 is a good choice since it does PWM)
channelDMA: the DMA channel to use (5 is a good option)
frequencyPWM: try 800,000
invertSignalPWM: set true to invert the PWM signal
ledBrightnessFloat: set to a value between 0.0 (OFF), and 1.0 (ON).
This setting tends to introduce flicker the lower it is
channelPWM: defaults to 0, see https://github.com/rpi-ws281x/rpi-ws281x-python
stripTypeLED: see https://github.com/rpi-ws281x/rpi-ws281x-python
gamma: see https://github.com/rpi-ws281x/rpi-ws281x-python
debug: set true for some debugging messages
verbose: set true for even more information
refreshCallback: callback method is called whenever new LED values are sent to LED string
simulate: only call refreshCallback, dont use GPIO
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
# configure logging
if debug is True or verbose is True:
LOGGER.setLevel(logging.DEBUG)
if verbose is True:
LOGGER.setLevel(5)
self.simulate = simulate
# create ws281x pixel strip
pixelStrip = None
if self.simulate is False:
pixelStrip = rpi_ws281x.PixelStrip(
pin=pwmGPIOpin,
dma=channelDMA,
num=ledCount,
freq_hz=frequencyPWM,
channel=channelPWM,
invert=invertSignalPWM,
gamma=gamma,
strip_type=stripTypeLED,
brightness=int(255 * ledBrightnessFloat),
)
# wrap pixel strip in my own interface object
self.ws28xxLightString: Optional[LightString] = LightString(
pixelStrip=pixelStrip, ledCount=ledCount, simulate=self.simulate
)
# initialize instance variables
self.privateLEDCount: int = len(self.ws28xxLightString)
self.virtualLEDArray: NDArray[(3, Any), np.int32] = SolidColorArray(
arrayLength=self.privateLEDCount,
color=PixelColors.OFF,
)
self.virtualLEDIndexArray: NDArray[(Any,), np.int32] = np.array(
range(len(self.ws28xxLightString))
)
self.privateOverlayDict: Dict[int, NDArray[(3,), np.int32]] = {}
self.privateVirtualLEDCount: int = len(self.virtualLEDArray)
self.privateVirtualLEDIndexCount: int = len(self.virtualLEDIndexArray)
self.privateLastModeChange: float = time.time() - 1000
self.privateNextModeChange: float = time.time()
self.privateRefreshDelay: float = 0.001
self.privateSecondsPerMode: float = 120.0
self.privateBackgroundColor: NDArray[(3,), np.int32] = PixelColors.OFF.array
self.privateColorSequence: NDArray[(3, Any), np.int32] = ConvertPixelArrayToNumpyArray([])
self.privateColorSequenceCount: int = 0
self.privateColorSequenceIndex: int = 0
self.privateLoopForever: bool = False
self.privateLightFunctions: List[LightFunction] = []
# give LightFunction class a pointer to this class
LightFunction.Controller = self
self.refreshCallback: Callable = refreshCallback
# initialize stuff
self.reset()
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
"__init__",
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def __del__(
self,
) -> None:
"""Disposes of the rpi_ws281x object (if it exists) to prevent memory leaks.
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
if hasattr(self, "_LEDArray") and self.ws28xxLightString is not None:
self.off()
self.copyVirtualLedsToWS281X()
self.refreshLEDs()
self.ws28xxLightString.__del__()
self.ws28xxLightString = None
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.__del__.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
@property
def virtualLEDCount(self) -> int:
"""The number of virtual LEDs. These include ones that won't display.
Returns:
the number of virtual LEDs
"""
return self.privateVirtualLEDCount
@property
def realLEDCount(self) -> int:
"""The number of LEDs in the LED string.
Returns:
the number of actual LEDs in the string (as configured)
"""
return self.privateLEDCount
@property
def refreshDelay(
self,
) -> float:
"""The delay between starting LED refreshes.
Returns:
the delay between refreshes
"""
return self.privateRefreshDelay
@refreshDelay.setter
def refreshDelay(
self,
delay: float,
) -> None:
"""Set the refresh delay.
Args:
delay: the delay in seconds
"""
self.privateRefreshDelay = float(delay)
@property
def backgroundColor(
self,
) -> NDArray[(3,), np.int32]:
"""The defined background, or "Off" color for the LED string.
Returns:
the rgb value
"""
return self.privateBackgroundColor
@backgroundColor.setter
def backgroundColor(
self,
color: NDArray[(3,), np.int32],
) -> None:
"""Set the background color.
Args:
color: an RGB value
"""
self.privateBackgroundColor = Pixel(color).array
@property
def secondsPerMode(
self,
) -> float:
"""The number of seconds to run the configuration.
Returns:
the seconds to run the current configuration
"""
return self.privateSecondsPerMode
@secondsPerMode.setter
def secondsPerMode(
self,
seconds: float,
) -> None:
"""Set the seconds per mode.
Args:
seconds: the number of seconds
"""
self.privateSecondsPerMode = float(seconds)
@property
def colorSequence(
self,
) -> NDArray[(3, Any), np.int32]:
"""The sequence of RGB values to use for generating patterns when using the functions.
Returns:
the sequence of RGB values
"""
return self.privateColorSequence
@colorSequence.setter
def colorSequence(
self,
colorSequence: NDArray[(3, Any), np.int32],
) -> None:
"""Set the color sequence.
Args:
colorSequence: the sequence of RGB values
"""
self.privateColorSequence = np.copy(ConvertPixelArrayToNumpyArray(colorSequence))
self.colorSequenceCount = len(self.privateColorSequence)
self.colorSequenceIndex = 0
@property
def colorSequenceCount(
self,
) -> int:
"""The number of colors in the defined sequence.
Returns:
the number of LEDs in the sequence
"""
return self.privateColorSequenceCount
@colorSequenceCount.setter
def colorSequenceCount(
self,
colorSequenceCount: int,
) -> None:
"""Set the Color sequence count.
Args:
colorSequenceCount: the number of colors in the sequence
"""
self.privateColorSequenceCount = colorSequenceCount
@property
def colorSequenceIndex(
self,
) -> int:
"""The index we are on in the current color sequence.
Returns:
the current index into the color sequence
"""
return self.privateColorSequenceIndex
@colorSequenceIndex.setter
def colorSequenceIndex(
self,
colorSequenceIndex: int,
) -> None:
"""Set the color sequence index.
Args:
colorSequenceIndex: the new index
"""
self.privateColorSequenceIndex = colorSequenceIndex
@property
def colorSequenceNext(
self,
) -> NDArray[(3,), np.int32]:
"""Get the next color in the sequence.
Returns:
the next RGB value
"""
temp = self.colorSequence[self.colorSequenceIndex]
self.colorSequenceIndex += 1
if self.colorSequenceIndex >= self.colorSequenceCount:
self.colorSequenceIndex = 0
if isinstance(temp, Pixel):
return temp.array
else:
return temp
@property
def functionList(self) -> List[LightFunction]:
"""The list of function objects that will be used to modify the light pattern.
Returns:
the list of functions
"""
return self.privateLightFunctions
@property
def overlayDictionary(self) -> Dict[int, Any]:
"""The list of indices and associated colors to temporarily assign LEDs.
Returns:
the dictionary of LEDs and values
"""
return self.privateOverlayDict
def getColorMethodsList(self) -> List[str]:
"""Get the list of methods in this class (by name) that set the color sequence.
Returns:
a list of method name strings
"""
attrs = list(dir(self))
colors = [c for c in attrs if c[:8] == "useColor"]
colors.sort()
return colors
def getFunctionMethodsList(self) -> List[str]:
"""Get the list of methods in this class (by name) that set the color functions.
Returns:
a list of method name strings
"""
attrs = list(dir(self))
functions = [f for f in attrs if f[:11] == "useFunction"]
functions.sort()
return functions
def reset(
self,
) -> None:
"""Reset class variables to default state.
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("%s.%s:", self.__class__.__name__, self.reset.__name__)
self.privateLightFunctions = []
if self.virtualLEDCount > self.realLEDCount:
self.setVirtualLEDArray(self.virtualLEDArray[: self.realLEDCount])
elif self.virtualLEDCount < self.realLEDCount:
array = LightPatterns.SolidColorArray(arrayLength=self.realLEDCount, color=PixelColors.OFF)
array[: self.virtualLEDCount] = self.virtualLEDArray
self.setVirtualLEDArray(array)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.reset.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def setVirtualLEDArray(
self,
ledArray: Union[List[Pixel], NDArray[(3, Any), np.int32]],
) -> None:
"""Assign a sequence of pixel data to the LED.
Args:
ledArray: array of RGB values
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
# make sure the passed LED array is the correct type
if isinstance(ledArray, list):
_ledArray = ConvertPixelArrayToNumpyArray(ledArray)
elif isinstance(ledArray, np.ndarray):
_ledArray = ledArray
else:
_ledArray = SolidColorArray(arrayLength=self.realLEDCount, color=self.backgroundColor)
# check assignment length
if len(_ledArray) >= self.realLEDCount:
self.virtualLEDArray = _ledArray
else:
self.virtualLEDArray[: len(_ledArray)] = _ledArray
# assign new LED array to virtual LEDs
self.privateVirtualLEDCount = len(self.virtualLEDArray)
# set our indices for virtual LEDs
self.privateVirtualLEDIndexCount = self.virtualLEDCount
# create array of index values for manipulation if needed
self.virtualLEDIndexArray = np.arange(self.virtualLEDCount)
# if the array is smaller than the actual light strand, make our entire strand addressable
if self.privateVirtualLEDIndexCount < self.realLEDCount:
self.privateVirtualLEDIndexCount = self.realLEDCount
self.virtualLEDIndexArray = np.arange(self.privateVirtualLEDIndexCount)
self.virtualLEDArray = np.concatenate(
(
self.virtualLEDArray,
np.array(
[PixelColors.OFF.tuple for i in range(self.realLEDCount - self.virtualLEDCount)]
),
)
)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.setVirtualLEDArray.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def copyVirtualLedsToWS281X(
self,
) -> None:
"""Sets each Pixel in the rpi_ws281x object to the buffered array value.
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
# callback function to do work
def SetPixel(irgb):
i = irgb[0]
rgb = irgb[1]
if i < self.realLEDCount:
self.ws28xxLightString[i] = rgb
# fast method of calling the callback method on each index of LED array
list(
map(
SetPixel,
enumerate(
self.virtualLEDArray[self.virtualLEDIndexArray][
np.where(self.virtualLEDIndexArray < self.realLEDCount)
]
),
)
)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.copyVirtualLedsToWS281X.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def refreshLEDs(
self,
) -> None:
"""Display current LED buffer.
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
# call light string's refresh method to send the communications out to the addressable LEDs
if isinstance(self.refreshCallback, Callable):
self.refreshCallback()
if self.ws28xxLightString is not None:
self.ws28xxLightString.refresh()
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.refreshLEDs.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def off(
self,
) -> None:
"""Set all Pixels to RGD background color.
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
# clear all current values
self.virtualLEDArray *= 0
# set to background color
self.virtualLEDArray[:] += self.backgroundColor
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.off.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def _runFunctions(
self,
) -> None:
"""Run each function in the configured function list.
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
# invoke the function pointer saved in the light data object
for function in self.privateLightFunctions:
function.runFunction(function)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self._runFunctions.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def _copyOverlays(
self,
):
"""Copy overlays directly to output array, bypassing the buffer.
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
# iterate over the dictionary key-value pairs, assign LED values
# directly to output buffer skipping the virtual LED copies.
# This ensures that overlays are temporary and get overwritten
# next refresh.
for index, ledValue in self.privateOverlayDict.items():
self.ws28xxLightString[index] = ledValue
self.privateOverlayDict = {}
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self._runFunctions.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def getRandomIndex(
self,
) -> int:
"""Retrieve a random Pixel index.
Returns:
a random index into the virtual LED buffer
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
return random.randint(0, (self.virtualLEDCount - 1))
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.getRandomIndex.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def getRandomIndices(
self,
count: int,
) -> NDArray[(Any), np.int32]:
"""Retrieve a random list of Pixel indices.
Args:
count: the number of random indices to get
Returns:
a list of random indices into the virtual LED buffer
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
temp = []
for _ in range(count):
temp.append(self.getRandomIndex())
return np.array(temp)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.getRandomIndices.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def getRandomDirection(self) -> int:
"""Get a random one or negative one to determine direction for light functions.
Returns:
one or negative one, randomly
"""
return [-1, 1][random.randint(0, 1)]
def getRandomBoolean(self) -> bool:
"""Get a random true or false value.
Returns:
True or False, randomly
"""
return [True, False][random.randint(0, 1)]
def fadeColor(
self, color: NDArray[(3,), np.int32], colorNext: NDArray[(3,), np.int32], fadeCount: int
) -> NDArray[(3,), np.int32]:
"""Fade an LED's color by the given amount and return the new RGB value.
Args:
color: current color
colorNext: desired color
fadeCount: amount to adjust each RGB value by
Returns:
new RGB value
"""
# copy it to make sure we dont change the original by reference
_color: NDArray[(3,), np.int32] = np.copy(color)
# loop through RGB values
for rgbIndex in range(len(_color)):
# the values closest to the target color might match already
if _color[rgbIndex] != colorNext[rgbIndex]:
# subtract or add as appropriate in order to get closer to target color
if _color[rgbIndex] - fadeCount > colorNext[rgbIndex]:
_color[rgbIndex] -= fadeCount
elif _color[rgbIndex] + fadeCount < colorNext[rgbIndex]:
_color[rgbIndex] += fadeCount
else:
_color[rgbIndex] = colorNext[rgbIndex]
return _color
def run(self):
"""Run the configured color pattern and function either forever or for self.secondsPerMode.
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("%s.%s:", self.__class__.__name__, self.run.__name__)
# set start time
self.privateLastModeChange = time.time()
# set a target time to change
if self.secondsPerMode is None:
self.privateNextModeChange = self.privateLastModeChange + (random.uniform(30, 120))
else:
self.privateNextModeChange = self.privateLastModeChange + (self.secondsPerMode)
# loop
while time.time() < self.privateNextModeChange or self.privateLoopForever:
try:
# run the selected functions using LightFunction object callbacks
self._runFunctions()
# copy the resulting RGB values to the ws28xx LED buffer
self.copyVirtualLedsToWS281X()
# copy temporary changes (not buffered in this class) to the ws28xx LED buffer
self._copyOverlays()
# tell the ws28xx controller to transmit the new data
self.refreshLEDs()
except KeyboardInterrupt:
raise
except SystemExit:
raise
except Exception as ex:
LOGGER.exception("_Run Loop Error: %s", (str(ex),))
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
self.privateLastModeChange = time.time()
if self.secondsPerMode is None:
self.privateNextModeChange = self.privateLastModeChange + (random.random(30, 120))
else:
self.privateNextModeChange = self.privateLastModeChange + (self.secondsPerMode)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.run.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useColorSingle(
self,
foregroundColor: Pixel = None,
backgroundColor: Pixel = None,
) -> None:
"""Sets the the color sequence used by light functions to a single color of your choice.
Args:
foregroundColor: the color that each pixel will be set to
backgroundColor: the "off" color
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("\n%s.%s:", self.__class__.__name__, self.useColorSingle.__name__)
# defaults
_sequence: NDArray[(Any, 3), np.int32] = DefaultColorSequence()
_foregroundColor: NDArray[(3,), np.int32] = _sequence[random.randint(0, len(_sequence) - 1)]
_backgroundColor = DEFAULT_BACKGROUND_COLOR.array
# use the passed in color
if foregroundColor is not None:
_foregroundColor = Pixel(foregroundColor).array
# use the passed in color
if backgroundColor is not None:
_backgroundColor = Pixel(backgroundColor).array
# assign temporary values to instance variables
self.backgroundColor = _backgroundColor
self.colorSequence = ConvertPixelArrayToNumpyArray([_foregroundColor])
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useColorSingle.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useColorSinglePseudoRandom(
self,
backgroundColor: Pixel = None,
) -> None:
"""Sets the the color sequence used by light functions to a single random named color.
Args:
backgroundColor: the "off" color
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("\n%s.%s:", self.__class__.__name__, self.useColorSinglePseudoRandom.__name__)
_backgroundColor: NDArray[(3,), np.int32] = DEFAULT_BACKGROUND_COLOR.array
# set background color
if backgroundColor is not None:
_backgroundColor = Pixel(backgroundColor).array
self.backgroundColor = _backgroundColor
# set the color sequence
self.colorSequence = ConvertPixelArrayToNumpyArray([PixelColors.pseudoRandom()])
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useColorSinglePseudoRandom.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useColorSingleRandom(
self,
backgroundColor: Pixel = None,
) -> None:
"""Sets the the color sequence to a single random RGB value.
Args:
backgroundColor: the "off" color
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("\n%s.%s:", self.__class__.__name__, self.useColorSingleRandom.__name__)
_backgroundColor: NDArray[(3,), np.int32] = DEFAULT_BACKGROUND_COLOR.array
# set the background color to the default values
if backgroundColor is not None:
_backgroundColor = Pixel(backgroundColor).array
self.backgroundColor = _backgroundColor
# set the color sequence to a single random value
self.colorSequence = ConvertPixelArrayToNumpyArray([PixelColors.random()])
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useColorSingleRandom.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useColorSequence(
self,
colorSequence: List[Pixel] = None,
backgroundColor: Pixel = None,
) -> None:
"""Sets the the color sequence used by light functions to one of your choice.
Args:
colorSequence: list of colors in the pattern
backgroundColor: the "off" color
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("\n%s.%s:", self.__class__.__name__, self.useColorSequence.__name__)
_backgroundColor: NDArray[(3,), np.int32] = DEFAULT_BACKGROUND_COLOR.array
_colorSequence: NDArray[(Any, 3), np.int32] = DefaultColorSequence()
# set the color sequence to the default one for this month, or use the passed in argument
if colorSequence is not None:
_colorSequence = [Pixel(p) for p in colorSequence]
# assign the background color its default value
if backgroundColor is not None:
_backgroundColor = Pixel(backgroundColor).array
self.backgroundColor = _backgroundColor
# set the color sequence
self.colorSequence = ConvertPixelArrayToNumpyArray(_colorSequence)
except KeyboardInterrupt:
raise
except SystemExit:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useColorSequence.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useColorSequencePseudoRandom(
self,
sequenceLength: int = None,
backgroundColor: Pixel = None,
) -> None:
"""Sets the color sequence used in light functions to a random list of named colors.
Args:
sequenceLength: the number of random colors to use in the generated sequence
backgroundColor: the "off" color
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("\n%s.%s:", self.__class__.__name__, self.useColorSequencePseudoRandom.__name__)
_backgroundColor: NDArray[(3,), np.int32] = DEFAULT_BACKGROUND_COLOR.array
_sequenceLength: int = random.randint(self.realLEDCount // 20, self.realLEDCount // 10)
# either calculate a sequence length or use the passed value
if sequenceLength is not None:
_sequenceLength = int(sequenceLength)
# set background color
if backgroundColor is not None:
_backgroundColor = Pixel(backgroundColor).array
# assign the color sequence
self.backgroundColor = _backgroundColor
self.colorSequence = ConvertPixelArrayToNumpyArray(
[PixelColors.pseudoRandom() for i in range(_sequenceLength)]
)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useColorSequencePseudoRandom.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useColorSequenceRandom(
self,
sequenceLength: int = None,
backgroundColor: Pixel = None,
) -> None:
"""Sets the color sequence used in light functions to a random list of RGB values.
Args:
sequenceLength: the number of random colors to use in the generated sequence
backgroundColor: the "off" color
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("\n%s.%s:", self.__class__.__name__, self.useColorSequenceRandom.__name__)
_backgroundColor: NDArray[(3,), np.int32] = DEFAULT_BACKGROUND_COLOR.array
_sequenceLength: int = random.randint(self.realLEDCount // 20, self.realLEDCount // 10)
# set background color
if backgroundColor is not None:
self.backgroundColor = Pixel(backgroundColor).array
# calculate sequence length or use argument
if sequenceLength is not None:
_sequenceLength = int(sequenceLength)
# create color sequence
self.backgroundColor = _backgroundColor
self.colorSequence = ConvertPixelArrayToNumpyArray(
[PixelColors.random() for i in range(_sequenceLength)]
)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useColorSequenceRandom.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useColorSequenceRepeating(
self,
colorSequence: List[Pixel] = None,
backgroundColor: Pixel = None,
) -> None:
"""Sets the color sequence used by light functions.
Repeats it across the entire light string. If the sequence will not
fill perfectly when repeated, the virtual LED string is extended until it fits.
Args:
colorSequence: list of colors to in the pattern being shifted across the LED string
backgroundColor: the "off" color
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("\n%s.%s:", self.__class__.__name__, self.useColorSequenceRepeating.__name__)
_backgroundColor: NDArray[(3,), np.int32] = DEFAULT_BACKGROUND_COLOR.array
_colorSequence: NDArray[(Any, 3), np.int32] = DefaultColorSequence()
# use argument or default
if colorSequence is not None:
_colorSequence = [Pixel(p) for p in colorSequence]
# use argument or default
if backgroundColor is not None:
_backgroundColor = Pixel(backgroundColor).array
# calculate required virtual LED count to allow for even multiple of this sequence
_arrayLength: int = np.ceil(self.realLEDCount / len(_colorSequence)) * len(_colorSequence)
self.backgroundColor = _backgroundColor
# create color sequence
self.colorSequence = RepeatingColorSequenceArray(
arrayLength=_arrayLength, colorSequence=_colorSequence
)
except KeyboardInterrupt:
raise
except SystemExit:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useColorSequenceRepeating.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useColorTransition(
self,
colorSequence: List[Pixel] = None,
stepsPerTransition: int = None,
wrap: bool = None,
backgroundColor: Pixel = None,
) -> None:
"""Makes a smooth transition from one color to the next over the length specified.
Args:
colorSequence: list of colors to transition between
stepsPerTransition: how many pixels it takes to
transition from one color to the next
stepsPerTransition: number of steps to fade for
wrap: if true, the last color of the sequence will
transition to the first color as the final transition
backgroundColor: the "off" color
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("\n%s.%s:", self.__class__.__name__, self.useColorTransition.__name__)
_backgroundColor: NDArray[(3,), np.int32] = DEFAULT_BACKGROUND_COLOR.array
_colorSequence: NDArray[(Any, 3), np.int32] = DefaultColorSequence()
_stepsPerTransition: int = random.randint(3, 7)
_wrap: bool = self.getRandomBoolean()
# set color sequence
if colorSequence is not None:
_colorSequence = colorSequence
# set background color
if backgroundColor is not None:
_backgroundColor = Pixel(backgroundColor).array
if stepsPerTransition is not None:
_stepsPerTransition = int(stepsPerTransition)
if wrap is not None:
_wrap = bool(wrap)
self.backgroundColor = _backgroundColor
self.colorSequence = ColorTransitionArray(
arrayLength=len(_colorSequence) * int(_stepsPerTransition),
wrap=_wrap,
colorSequence=_colorSequence,
)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useColorTransition.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useColorTransitionRepeating(
self,
colorSequence: List[Pixel] = None,
stepsPerTransition: int = None,
wrap: bool = None,
backgroundColor: Pixel = None,
) -> None:
"""Makes a smooth transition from one color to the next over the length specified.
Repeats the sequence as neccesary
Args:
colorSequence: list of colors to in the pattern being shifted across the LED string
stepsPerTransition: number of steps per transition
wrap: wrap
backgroundColor: off color
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("\n%s.%s:", self.__class__.__name__, self.useColorTransitionRepeating.__name__)
_backgroundColor: NDArray[(3,), np.int32] = DEFAULT_BACKGROUND_COLOR.array
_colorSequence: NDArray[(3, Any), np.int32] = DefaultColorSequence()
_stepsPerTransition: int = random.randint(3, 7)
_wrap: bool = self.getRandomBoolean()
if colorSequence is not None:
_colorSequence = [Pixel(p) for p in colorSequence]
if stepsPerTransition is not None:
_stepsPerTransition = int(stepsPerTransition)
if wrap is not None:
_wrap = bool(wrap)
if backgroundColor is not None:
_backgroundColor = Pixel(backgroundColor).array
_tempColorSequence: NDArray[(3, Any), np.int32] = ColorTransitionArray(
arrayLength=(len(_colorSequence) * _stepsPerTransition),
wrap=_wrap,
colorSequence=_colorSequence,
)
_arrayLength: int = np.ceil(self.realLEDCount / len(_tempColorSequence)) * len(_tempColorSequence)
self.backgroundColor = _backgroundColor
self.colorSequence = RepeatingColorSequenceArray(
arrayLength=_arrayLength, colorSequence=_tempColorSequence
)
except KeyboardInterrupt:
raise
except SystemExit:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useColorTransitionRepeating.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useColorRainbow(
self,
rainbowPixelCount: int = None,
backgroundColor: Pixel = None,
) -> None:
"""Cycle through the colors of the rainbow.
Args:
rainbowPixelCount: when creating the rainbow gradient, make the
transition through ROYGBIV take this many steps
backgroundColor: off color
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("\n%s.%s:", self.__class__.__name__, self.useColorRainbow.__name__)
_backgroundColor: NDArray[(3,), np.int32] = DEFAULT_BACKGROUND_COLOR.array
_rainbowPixelCount: int = random.randint(10, self.realLEDCount // 2)
if backgroundColor is not None:
_backgroundColor = Pixel(backgroundColor).array
if rainbowPixelCount is not None:
_rainbowPixelCount = int(rainbowPixelCount)
self.backgroundColor = _backgroundColor
self.colorSequence = np.array(RainbowArray(arrayLength=_rainbowPixelCount))
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useColorRainbow.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useColorRainbowRepeating(
self,
rainbowPixelCount: int = None,
backgroundColor: Pixel = None,
) -> None:
"""Cycle through the colors of the rainbow repeatedly.
Args:
rainbowPixelCount: when creating the rainbow gradient, make the
transition through ROYGBIV take this many steps
backgroundColor: off color
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("\n%s.%s:", self.__class__.__name__, self.useColorRainbowRepeating.__name__)
_backgroundColor: NDArray[(3,), np.int32] = DEFAULT_BACKGROUND_COLOR.array
_rainbowPixelCount: int = random.randint(10, self.realLEDCount // 2)
if backgroundColor is not None:
_backgroundColor = Pixel(backgroundColor).array
if rainbowPixelCount is not None:
_rainbowPixelCount = int(rainbowPixelCount)
_arrayLength: int = np.ceil(self.realLEDCount / _rainbowPixelCount) * _rainbowPixelCount
self.backgroundColor = _backgroundColor
self.colorSequence = np.copy(
RepeatingRainbowArray(arrayLength=_arrayLength, segmentLength=_rainbowPixelCount)
)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useColorRainbowRepeating.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useFunctionNone(
self,
) -> None:
"""Use the "do nothing" function.
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("%s.%s:", self.__class__.__name__, self.useFunctionNone.__name__)
# create an object to put in the light data list so we dont just abort the run
nothing = LightFunction(LightFunction.functionNone, self.colorSequence)
self.privateLightFunctions.append(nothing)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useFunctionNone.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useFunctionSolidColorCycle(
self,
delayCount: int = None,
) -> None:
"""Set all LEDs to a single color at once, but cycle between entries in a list of colors.
Args:
delayCount: number of led updates between color updates
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("%s.%s:", self.__class__.__name__, self.useFunctionSolidColorCycle.__name__)
_delayCount: int = random.randint(50, 100)
if delayCount is not None:
_delayCount = int(delayCount)
# create the tracking object
cycle: LightFunction = LightFunction(LightFunction.functionSolidColorCycle, self.colorSequence)
# set refresh counter
cycle.delayCounter = _delayCount
# set refresh limit (after which this function will execute)
cycle.delayCountMax = _delayCount
# add this function to our function list
self.privateLightFunctions.append(cycle)
# clear LEDs, assign first color in sequence to all LEDs
self.virtualLEDArray *= 0
self.virtualLEDArray += self.colorSequence[0, :]
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useFunctionSolidColorCycle.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useFunctionMarquee(
self,
shiftAmount: int = None,
delayCount: int = None,
initialDirection: int = None,
) -> None:
"""Shifts a color pattern across the LED string marquee style.
Args:
shiftAmount: the number of pixels the marquee shifts on each update
delayCount: number of refreshes to delay for each cycle
initialDirection: a positive or negative value for marquee start direction
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("%s.%s:", self.__class__.__name__, self.useFunctionMarquee.__name__)
_shiftAmount: int = random.randint(1, 2)
_delayCount: int = random.randint(0, 6)
_initialDirection: int = self.getRandomDirection()
if shiftAmount is not None:
_shiftAmount = int(shiftAmount)
if delayCount is not None:
_delayCount = int(delayCount)
if initialDirection is not None:
_initialDirection: int = 1 if (initialDirection >= 1) else -1
# turn off all LEDs every time so we can turn on new ones
off: LightFunction = LightFunction(LightFunction.functionOff, self.colorSequence)
# add this function to list
self.privateLightFunctions.append(off)
# create tracking object
marquee: LightFunction = LightFunction(LightFunction.functionMarquee, self.colorSequence)
# store the size of the color sequence being shifted back and forth
marquee.size = self.colorSequenceCount
# assign starting direction
marquee.direction = _initialDirection
# this is how much the LEDs will move by each time
marquee.step = _shiftAmount
# this is how many LED updates will be ignored before doing another LED shift
marquee.delayCountMax = _delayCount
# add this function to list
self.privateLightFunctions.append(marquee)
# this function just shifts the existing virtual LED buffer,
# so make sure the virtual LED buffer is initialized here
if self.colorSequenceCount >= self.virtualLEDCount - 10:
array = LightPatterns.SolidColorArray(
arrayLength=self.colorSequenceCount + 10, color=PixelColors.OFF
)
array[: self.colorSequenceCount] = self.colorSequence
self.setVirtualLEDArray(array)
else:
self.setVirtualLEDArray(self.colorSequence)
except KeyboardInterrupt:
raise
except SystemExit:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useFunctionMarquee.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useFunctionCylon(
self,
fadeAmount: int = None,
delayCount: int = None,
) -> None:
"""Shift a pixel across the LED string marquee style and then bounce back leaving a comet tail.
Args:
fadeAmount: how much each pixel fades per refresh
smaller numbers = larger tails on the cylon eye fade
delayCount: number of delays
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("%s.%s:", self.__class__.__name__, self.useFunctionCylon.__name__)
_fadeAmount: float = random.randint(5, 75) / 255.0
_delayCount: int = random.randint(1, 6)
if fadeAmount is not None:
_fadeAmount = int(fadeAmount)
# make sure fade is valid
if _fadeAmount > 0 and _fadeAmount < 1:
# do nothing
pass
elif _fadeAmount > 0 and _fadeAmount < 256:
_fadeAmount /= 255
if _fadeAmount < 0 or _fadeAmount > 1:
_fadeAmount = 0.1
if delayCount is not None:
_delayCount = int(delayCount)
# fade the whole LED strand
fade: LightFunction = LightFunction(LightFunction.functionFadeOff, self.colorSequence)
# by this amount
fade.fadeAmount = _fadeAmount
# add function to list
self.privateLightFunctions.append(fade)
# use cylon function
cylon: LightFunction = LightFunction(LightFunction.functionCylon, self.colorSequence)
# shift eye by this much for each update
cylon.size = self.colorSequenceCount
# adjust virtual LED buffer if necesary so that the cylon can actually move
if self.virtualLEDCount < cylon.size:
array = LightPatterns.SolidColorArray(arrayLength=cylon.size + 3, color=PixelColors.OFF)
array[: self.virtualLEDCount] = self.virtualLEDArray
self.setVirtualLEDArray(array)
# set start and next indices
cylon.index = self.virtualLEDCount - cylon.size - 3
cylon.indexNext = cylon.index
# set delay
cylon.delayCounter = _delayCount
cylon.delayCountMax = _delayCount
# add function to function list
self.privateLightFunctions.append(cylon)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useFunctionCylon.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useFunctionMerge(
self,
shiftAmount: int = None,
delayCount: int = None,
) -> None:
"""Reflect a color sequence and shift the reflections toward each other in the middle.
Args:
shiftAmount: amount the merge will shift in each update
delayCount: length of reflected segments
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("%s.%s:", self.__class__.__name__, self.useFunctionMerge.__name__)
_delayCount: int = random.randint(6, 12)
_shiftAmount: int = 1
if delayCount is not None:
_delayCount = int(delayCount)
if shiftAmount is not None:
_shiftAmount = int(shiftAmount)
# make sure doing a merge function would be visible
if self.colorSequenceCount >= self.realLEDCount:
# if seqeuence is too long, cut it in half
self.colorSequence = self.colorSequence[: int(self.colorSequenceCount // 2)]
# dont remember offhand why this is here
if self.colorSequenceCount % 2 == 1:
if self.colorSequenceCount == 1:
self.colorSequence = np.concatenate(self.colorSequence, self.colorSequence)
else:
self.colorSequence = self.colorSequence[:-1]
# calculate modulo length
_arrayLength = np.ceil(self.realLEDCount / self.colorSequenceCount) * self.colorSequenceCount
# update LED buffer with any changes we had to make
self.setVirtualLEDArray(
ReflectArray(
arrayLength=_arrayLength,
colorSequence=self.colorSequence,
foldLength=self.colorSequenceCount,
)
)
# create tracking object
merge: LightFunction = LightFunction(LightFunction.functionMerge, self.colorSequence)
# set merge size
merge.size = self.colorSequenceCount
# set shift amount
merge.step = _shiftAmount
# set the number of LED refreshes to skip
merge.delayCountMax = _delayCount
# add function to list
self.privateLightFunctions.append(merge)
except KeyboardInterrupt:
raise
except SystemExit:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useFunctionMerge.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useFunctionAccelerate(
self,
delayCountMax: int = None,
stepCountMax: int = None,
fadeAmount: float = None,
cycleColors: bool = None,
) -> None:
"""Shifts a color pattern across the LED string accelerating as it goes.
Args:
delayCountMax: max delay between color updates
stepCountMax: speed limit
fadeAmount: speed of color fade
cycleColors: set true to cycle as the LED goes across
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("%s.%s:", self.__class__.__name__, self.useFunctionAccelerate.__name__)
_delayCountMax: int = random.randint(5, 10)
_stepCountMax: int = random.randint(4, 10)
_fadeAmount: float = random.randint(15, 35) / 255.0
_cycleColors: bool = self.getRandomBoolean()
if delayCountMax is not None:
_delayCountMax = int(delayCountMax)
if stepCountMax is not None:
_stepCountMax = int(stepCountMax)
if fadeAmount is not None:
_fadeAmount = float(fadeAmount)
# make sure fade amount is valid
if _fadeAmount > 0 and _fadeAmount < 1:
# do nothing
pass
elif _fadeAmount > 0 and _fadeAmount < 256:
_fadeAmount /= 255
if _fadeAmount < 0 or _fadeAmount > 1:
_fadeAmount = 0.1
if cycleColors is not None:
_cycleColors = bool(cycleColors)
# we want comet trails, so fade the buffer each time through
fade: LightFunction = LightFunction(LightFunction.functionFadeOff, self.colorSequence)
fade.fadeAmount = _fadeAmount
self.privateLightFunctions.append(fade)
# create tracking object
accelerate: LightFunction = LightFunction(LightFunction.functionAccelerate, self.colorSequence)
# this determines the maximum that the LED can jump in a single step as it speeds up
accelerate.stepCountMax = _stepCountMax
# set the number of updates to skip
accelerate.delayCountMax = _delayCountMax
# this determines the number of times the LED will speed up
accelerate.stateMax = accelerate.delayCountMax
# set color cycle setting
accelerate.colorCycle = _cycleColors
# randomize direction
accelerate.direction = self.getRandomDirection()
# randomize start index
accelerate.index = self.getRandomIndex()
# add to list
self.privateLightFunctions.append(accelerate)
except KeyboardInterrupt:
raise
except SystemExit:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useFunctionAccelerate.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useFunctionRandomChange(
self,
delayCount: int = None,
changeCount: int = None,
fadeStepCount: int = None,
fadeType: LEDFadeType = None,
) -> None:
"""Randomly changes pixels from one color to the next.
Args:
delayCount: refresh delay
changeCount: how many LEDs to have in the change queue at once
fadeStepCount: number of steps in the transition from one color to the next
fadeType: set to fade colors, or instant on/off
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("%s.%s:", self.__class__.__name__, self.useFunctionRandomChange.__name__)
_changeCount: int = random.randint(self.virtualLEDCount // 5, self.virtualLEDCount)
_fadeStepCount: int = random.randint(5, 20)
_delayCountMax: int = random.randint(30, 50)
fadeTypes: List[LEDFadeType] = list(LEDFadeType)
_fadeType: LEDFadeType = fadeTypes[random.randint(0, len(fadeTypes) - 1)]
if changeCount is not None:
_changeCount = int(changeCount)
if fadeStepCount is not None:
_fadeStepCount = int(fadeStepCount)
_fadeAmount: float = _fadeStepCount / 255.0
# make sure fade amount is valid
if _fadeAmount > 0 and _fadeAmount < 1:
# do nothing
pass
elif _fadeAmount > 0 and _fadeAmount < 256:
_fadeAmount /= 255
if _fadeAmount < 0 or _fadeAmount > 1:
_fadeAmount = 0.1
if delayCount is not None:
_delayCountMax = int(delayCount)
if fadeType is not None:
_fadeType = LEDFadeType(fadeType)
# make comet trails
if _fadeType == LEDFadeType.FADE_OFF:
fade: LightFunction = LightFunction(LightFunction.functionFadeOff, self.colorSequence)
fade.fadeAmount = _fadeAmount
self.privateLightFunctions.append(fade)
elif _fadeType == LEDFadeType.INSTANT_OFF:
off: LightFunction = LightFunction(LightFunction.functionOff, self.colorSequence)
self.privateLightFunctions.append(off)
else:
# do nothing
pass
# create a bunch of tracking objects
for index in self.getRandomIndices(int(_changeCount)):
if index < self.virtualLEDCount:
change: LightFunction = LightFunction(
LightFunction.functionRandomChange, self.colorSequence
)
# set the index from our random number
change.index = int(index)
# set the fade to off amount
change.fadeAmount = _fadeAmount
# set the color fade
change.colorFade = _fadeStepCount
# this is used to help calculate fade duration in the function
change.stepCountMax = _fadeStepCount
# copy the current color of this LED index
change.color = np.copy(self.virtualLEDArray[change.index])
# randomly set the color we are fading toward
if random.randint(0, 1) == 1:
change.colorNext = self.colorSequenceNext
else:
change.colorNext = change.color
# set the refresh delay
change.delayCountMax = _delayCountMax
# we want all the delays random, so dont start them all at zero
change.delayCounter = random.randint(0, change.delayCountMax)
# set true to fade, false to "instant on/off"
change.fadeType = _fadeType
# add function to list
self.privateLightFunctions.append(change)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useFunctionRandomChange.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useFunctionMeteors(
self,
fadeAmount: int = None,
maxSpeed: int = None,
explode: bool = True,
meteorCount: int = None,
collide: bool = None,
cycleColors: bool = None,
delayCount: int = None,
fadeType: LEDFadeType = None,
) -> None:
"""Creates several 'meteors' that will fly around.
Args:
fadeAmount: the amount by which meteors are faded
maxSpeed: the amount be which the meteor moves each refresh
explode: if True, the meteors will light up in an explosion when they collide
meteorCount: number of meteors
collide: set true to make them bounce off each other randomly
cycleColors: set true to make the meteors shift color as they move
delayCount: refresh delay
fadeType: set the type of fade to use using the enumeration
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("%s.%s:", self.__class__.__name__, self.useFunctionMeteors.__name__)
_fadeAmount: float = random.randint(20, 40) / 100.0
_explode: bool = self.getRandomBoolean()
_maxSpeed: int = random.randint(1, 3)
_delayCount: int = random.randint(1, 3)
_meteorCount: int = random.randint(2, 6)
_collide: bool = self.getRandomBoolean()
_cycleColors: bool = self.getRandomBoolean()
fadeTypes: List[LEDFadeType] = list(LEDFadeType)
_fadeType: LEDFadeType = fadeTypes[random.randint(0, len(fadeTypes) - 1)]
if self.colorSequenceCount >= 2 and self.colorSequenceCount <= 6:
_meteorCount = self.colorSequenceCount
if fadeAmount is not None:
_fadeAmount = float(fadeAmount)
# make sure fade amount is valid
if _fadeAmount > 0 and _fadeAmount < 1:
pass
elif _fadeAmount > 0 and _fadeAmount < 256:
_fadeAmount /= 255
if _fadeAmount < 0 or _fadeAmount > 1:
_fadeAmount = 0.1
if explode is not None:
_explode = bool(explode)
if maxSpeed is not None:
_maxSpeed = int(maxSpeed)
if delayCount is not None:
_delayCount = int(delayCount)
if meteorCount is not None:
_meteorCount = int(meteorCount)
if collide is not None:
_collide = bool(collide)
if cycleColors is not None:
_cycleColors = bool(cycleColors)
if fadeType is not None:
_fadeType = LEDFadeType(fadeType)
# make comet trails
if _fadeType == LEDFadeType.FADE_OFF:
fade: LightFunction = LightFunction(LightFunction.functionFadeOff, self.colorSequence)
fade.fadeAmount = _fadeAmount
self.privateLightFunctions.append(fade)
elif _fadeType == LEDFadeType.INSTANT_OFF:
off: LightFunction = LightFunction(LightFunction.functionOff, self.colorSequence)
self.privateLightFunctions.append(off)
else:
# do nothing
pass
for _ in range(_meteorCount):
meteor: LightFunction = LightFunction(LightFunction.functionMeteors, self.colorSequence)
# assign meteor color
meteor.color = self.colorSequenceNext
# initialize "previous" index, for math's sake later
meteor.indexPrevious = random.randint(0, self.virtualLEDCount - 1)
# set the number of LEDs it will move in one step
meteor.stepSizeMax = _maxSpeed
# set the maximum number of LEDs it could move in one step
meteor.step = random.randint(1, max(2, meteor.stepSizeMax))
# randomly initialize the direction
meteor.direction = self.getRandomDirection()
# set the refresh delay
meteor.delayCountMax = _delayCount
# randomly assign starting index
meteor.index = (meteor.index + (meteor.step * meteor.direction)) % self.virtualLEDCount
# set boolean to cycle each meteor through the color sequence as it moves
meteor.colorCycle = _cycleColors
# assign the color sequence
meteor.colorSequence = np.copy(self.colorSequence)
# add function to list
self.privateLightFunctions.append(meteor)
# make sure there are at least two going to collide
if self.privateLightFunctions[0].direction * self.privateLightFunctions[1].direction > 0:
self.privateLightFunctions[1].direction *= -1
# this object calculates collisions between other objects based on index and previous/next index
if _collide is True:
collision = LightFunction(LightFunction.functionCollisionDetection, self.colorSequence)
collision.explode = _explode
self.privateLightFunctions.append(collision)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useFunctionMeteors.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useFunctionSprites(
self,
fadeSteps: int = None,
) -> None:
"""Meteors fade in and out in short bursts of random length and direction.
Args:
fadeSteps: amount to fade
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("%s.%s:", self.__class__.__name__, self.useFunctionSprites.__name__)
_fadeSteps: int = random.randint(1, 6)
if fadeSteps is not None:
_fadeSteps = int(fadeSteps)
_fadeAmount = np.ceil(255 / _fadeSteps)
# make sure fade amount is valid
if _fadeAmount > 0 and _fadeAmount < 1:
# do nothing
pass
elif _fadeAmount > 0 and _fadeAmount < 256:
_fadeAmount /= 255
if _fadeAmount < 0 or _fadeAmount > 1:
_fadeAmount = 0.1
for _ in range(max(min(self.colorSequenceCount, 10), 2)):
sprite: LightFunction = LightFunction(LightFunction.functionSprites, self.colorSequence)
# randomize index
sprite.index = random.randint(0, self.virtualLEDCount - 1)
# initialize previous index
sprite.indexPrevious = sprite.index
# randomize direction
sprite.direction = self.getRandomDirection()
# assign the target color
sprite.colorGoal = self.colorSequenceNext
# initialize sprite to
sprite.color = DEFAULT_BACKGROUND_COLOR.array
# copy color sequence
sprite.colorSequence = self.colorSequence
# set next color
sprite.colorNext = PixelColors.OFF.array
# set fade step/amount
sprite.fadeSteps = _fadeSteps
sprite.fadeAmount = _fadeAmount
sprite.state = SpriteState.OFF.value
self.privateLightFunctions.append(sprite)
# set one sprite to "fading on"
self.privateLightFunctions[0].state = SpriteState.FADING_ON.value
# add LED fading for comet trails
fade = LightFunction(LightFunction.functionFadeOff, self.colorSequence)
fade.fadeAmount = _fadeAmount
self.privateLightFunctions.append(fade)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useFunctionSprites.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useFunctionRaindrops(
self,
maxSize: int = None,
raindropChance: float = None,
stepSize: int = None,
maxRaindrops: int = None,
fadeAmount: float = None,
):
"""Cause random "splashes" across the LED strand.
Args:
maxSize: max splash size
raindropChance: chance of raindrop
stepSize: splash speed
maxRaindrops: number of raindrops
fadeAmount: amount to fade LED each refresh
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("%s.%s:", self.__class__.__name__, self.useFunctionRaindrops.__name__)
_maxSize: int = random.randint(2, int(self.virtualLEDCount // 8))
_raindropChance: float = random.uniform(0.005, 0.1)
_stepSize: int = random.randint(2, 5)
_fadeAmount: float = random.uniform(0.25, 0.65)
_maxRaindrops: int = max(min(self.colorSequenceCount, 10), 2)
if maxSize is not None:
_maxSize = int(maxSize)
_fadeAmount = ((255 / _maxSize) / 255) * 2
if raindropChance is not None:
_raindropChance = float(raindropChance)
if stepSize is not None:
_stepSize = int(stepSize)
if _stepSize > 3:
_raindropChance /= 3.0
if fadeAmount is not None:
_fadeAmount = float(fadeAmount)
# make sure fade amount is valid
if _fadeAmount > 0 and _fadeAmount < 1:
# do nothing
pass
elif _fadeAmount > 0 and _fadeAmount < 256:
_fadeAmount /= 255
if _fadeAmount < 0 or _fadeAmount > 1:
_fadeAmount = 0.1
if maxRaindrops is not None:
_maxRaindrops = int(maxRaindrops)
for _ in range(_maxRaindrops):
raindrop: LightFunction = LightFunction(LightFunction.functionRaindrops, self.colorSequence)
# randomize start index
raindrop.index = random.randint(0, self.virtualLEDCount - 1)
# assign raindrop growth speed
raindrop.step = _stepSize
# max raindrop "splash"
raindrop.sizeMax = _maxSize
# max size
raindrop.stepCountMax = random.randint(2, raindrop.sizeMax)
# chance of raindrop
raindrop.activeChance = _raindropChance
# assign color
raindrop.color = self.colorSequenceNext
raindrop.colorSequence = self.colorSequence
raindrop.fadeAmount = _fadeAmount
# set raindrop to be inactive initially
raindrop.state = RaindropStates.OFF.value
self.privateLightFunctions.append(raindrop)
# set first raindrop active
self.privateLightFunctions[0].state = RaindropStates.SPLASH.value
# add fading
fade: LightFunction = LightFunction(LightFunction.functionFadeOff, self.colorSequence)
fade.fadeAmount = _fadeAmount
self.privateLightFunctions.append(fade)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useFunctionRaindrops.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useFunctionAlive(
self,
fadeAmount: float = None,
sizeMax: int = None,
stepCountMax: int = None,
stepSizeMax: int = None,
) -> None:
"""Use the function that uses a series of behaviors that move around in odd ways.
Args:
fadeAmount: amount of fade
sizeMax: max size of LED pattern
stepCountMax: max duration of effect
stepSizeMax: max speed
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("%s.%s:", self.__class__.__name__, self.useFunctionAlive.__name__)
_fadeAmount: float = random.uniform(0.20, 0.75)
_sizeMax: int = random.randint(self.virtualLEDCount // 6, self.virtualLEDCount // 3)
_stepCountMax: int = random.randint(self.virtualLEDCount // 10, self.virtualLEDCount)
_stepSizeMax: int = random.randint(6, 10)
if fadeAmount is not None:
_fadeAmount = float(fadeAmount)
# make sure fade amount is valid
if _fadeAmount > 0 and _fadeAmount < 1:
# do nothing
pass
elif _fadeAmount > 0 and _fadeAmount < 256:
_fadeAmount /= 255
if _fadeAmount < 0 or _fadeAmount > 1:
_fadeAmount = 0.1
if sizeMax is not None:
_sizeMax = int(sizeMax)
if stepCountMax is not None:
_stepCountMax = int(stepCountMax)
if stepSizeMax is not None:
_stepSizeMax = int(stepSizeMax)
for _ in range(random.randint(2, 5)):
thing: LightFunction = LightFunction(LightFunction.functionAlive, self.colorSequence)
# randomize start index
thing.index = self.getRandomIndex()
# randomize direction
thing.direction = self.getRandomDirection()
# copy color sequence
thing.colorSequence = self.colorSequence
# assign color
thing.color = thing.colorSequenceNext
# set max step count before possible state change
thing.stepCountMax = _stepCountMax
# set max step size in normal condition
thing.stepSizeMax = _stepSizeMax
# randomize speed
thing.step = random.randint(1, thing.stepSizeMax)
# set refresh speed
thing.delayCountMax = random.randint(6, 15)
# set initial size
thing.size = random.randint(1, int(_sizeMax // 2))
# set max size
thing.sizeMax = _sizeMax
# start the state at 1
thing.state = ThingMoves.METEOR.value
# calculate random next state immediately
thing.stepCounter = 1000
thing.delayCounter = 1000
self.privateLightFunctions.append(thing)
self.privateLightFunctions[0].active = True
# add a fade
fade = LightFunction(LightFunction.functionFadeOff, self.colorSequence)
fade.fadeAmount = _fadeAmount
self.privateLightFunctions.append(fade)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useFunctionAlive.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useOverlayTwinkle(
self,
twinkleChance: float = None,
colorSequence: NDArray[(3, Any), np.int32] = None,
) -> None:
"""Randomly sets some lights to 'twinkleColor' temporarily.
Args:
twinkleChance: chance of a twinkle
colorSequence: the list of colors to be used when briefly flashing an LED
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("%s.%s:", self.__class__.__name__, self.useOverlayTwinkle.__name__)
_twinkleChance: float = random.uniform(0.991, 0.995)
_colorSequence = self.colorSequence.copy()
if twinkleChance is not None:
_twinkleChance = float(twinkleChance)
if colorSequence is not None:
_colorSequence = colorSequence
twinkle: LightFunction = LightFunction(LightFunction.overlayTwinkle, _colorSequence)
twinkle.random = _twinkleChance
self.privateLightFunctions.append(twinkle)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useOverlayTwinkle.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def useOverlayBlink(
self,
blinkChance: float = None,
) -> None:
"""Use the overlay that causes all LEDs to light up the same color at once.
Args:
blinkChance: chance of a blink
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("%s.%s:", self.__class__.__name__, self.useOverlayBlink.__name__)
_blinkChance: float = random.uniform(0.991, 0.995)
if blinkChance is not None:
_blinkChance = float(blinkChance)
blink: LightFunction = LightFunction(LightFunction.overlayBlink, self.colorSequence)
blink.random = _blinkChance
blink.colorSequence = self.colorSequence
self.privateLightFunctions.append(blink)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.useOverlayBlink.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def demo(
self,
secondsPerMode: float = 0.5,
functionNames: List[str] = None,
colorNames: List[str] = None,
skipFunctions: List[str] = None,
skipColors: List[str] = None,
):
"""Run colors and functions semi-randomly.
Args:
secondsPerMode: seconds to run current function
functionNames: function names to run
colorNames: color pattern names to run
skipFunctions: function strings to omit (run if "skipFunction not in name")
skipColors: color pattern strings to omit (run if "skipColor not in name")
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
_secondsPerMode: int = 60
if secondsPerMode is not None:
_secondsPerMode = int(secondsPerMode)
self.secondsPerMode = _secondsPerMode
if functionNames is None:
functionNames = []
if colorNames is None:
colorNames = []
if skipFunctions is None:
skipFunctions = []
if skipColors is None:
skipColors = []
functions = self.getFunctionMethodsList()
colors = self.getColorMethodsList()
# get methods that match user's string
if len(functionNames) > 0:
matches = []
for name in functionNames:
matches.extend([f for f in functions if name.lower() in f.lower()])
functions = matches
# get methods that match user's string
if len(colorNames) > 0:
matches = []
for name in colorNames:
matches.extend([f for f in colors if name.lower() in f.lower()])
colors = matches
# remove methods that user requested
if len(skipFunctions) > 0:
matches = []
for name in skipFunctions:
for function in functions:
if name.lower() in function.lower():
functions.remove(function)
# remove methods that user requested
if len(skipColors) > 0:
matches = []
for name in skipColors:
for color in colors:
if name.lower() in color.lower():
colors.remove(color)
if len(functions) == 0:
raise LightControlException("No functions selected in demo")
elif len(colors) == 0:
raise LightControlException("No colors selected in demo")
else:
while True:
try:
# make a temporary copy (so we can go through each one)
functionsCopy = functions.copy()
colorsCopy = colors.copy()
# loop while we still have a color and a function
while (len(functionsCopy) * len(colorsCopy)) > 0:
# get a new function if there is one
if len(functionsCopy) > 0:
function = functionsCopy[random.randint(0, len(functionsCopy) - 1)]
functionsCopy.remove(function)
# get a new color pattern if there is one
if len(colorsCopy) > 0:
color = colorsCopy[random.randint(0, len(colorsCopy) - 1)]
colorsCopy.remove(color)
# reset
self.reset()
# apply color
getattr(self, color)()
# configure function
getattr(self, function)()
# run the combination
self.run()
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.demo.__name__,
ex,
)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.demo.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
``` |
{
"source": "joefernandez/irrduino",
"score": 2
} |
#### File: irrduinoserver/handlers/loghandler.py
```python
from google.appengine.ext import webapp
from irrduinoserver import model
from irrduinoserver.utils import web as webutils
from irrduinoserver.utils import irrduino as irrduinoutils
from irrduinoserver.utils import ui as uiutils
class LogHandler(webapp.RequestHandler):
def get(self):
"""Give the user information about the zone runs.
This also supports ?format=JSON.
"""
template_params = {}
template_params["tabs"] = uiutils.generate_tabs("log")
template_params["zone_runs"] = model.get_recent_zone_runs(
num_zone_runs_to_show=16)
for zone_run in template_params["zone_runs"]:
zone_run.localized_date = uiutils.localize_date(zone_run.created_at)
if webutils.is_format_json(self):
template_params["zone_runs"] = map(
webutils.entity_to_dict, template_params["zone_runs"])
webutils.render_json_to_response(self, template_params)
else:
webutils.render_to_response(self, "log.html", template_params)
def post(self):
"""Accept data from IrrduinoController.
Store it in the datastore and just respond "OK".
"""
try:
zone = int(self.request.get("zone"))
if zone not in irrduinoutils.ZONES:
raise ValueError("Invalid zone: %s" % zone)
runtime = int(self.request.get("runtime"))
if runtime <= 0:
raise ValueError("runtime out of range: %s" % runtime)
except (ValueError, TypeError), e:
webutils.error_response(self, msg="Invalid request: %r" % e)
else:
zone_run = model.ZoneRun(zone=zone, runtime_seconds=runtime)
zone_run.put()
self.response.out.write("OK")
``` |
{
"source": "Joeffison/coding_challenges",
"score": 4
} |
#### File: lessons/q002/odd_occurrences_in_array_v001.py
```python
def solution(array):
"""
Returns the only number occurring k times, given k is an odd number.
"""
# x XOR x = 0
# x XOR y = z, with bit 1 in the positions where only x or y has 1
# x XOR 0 = x
response = 0
for i in array:
# Intermediate values of response might not be in array,
# because of conflicting bits 1 in two neighbor elements,
# but it won't be a problem
# because at least one of the two elements is repeated in array
# (by the end, only the bits in the odd occurring element will be left)
response ^= i
return response
```
#### File: lessons/q004/tape_equilibrium_test.py
```python
import random
import unittest
import numpy as np
from challenges.codility.lessons.q004.tape_equilibrium_v001 import *
MIN_ELEMENT = -1000
MAX_ELEMENT = 1000
def get_random_list(n, min=MIN_ELEMENT, max=MAX_ELEMENT):
return list(np.random.random_integers(min, max, (1, n))[0])
def get_random_positive_list(n):
return get_random_list(n, min=1)
def get_random_negative_list(n):
return get_random_list(n, max=-1)
def brute_solution(array):
return min([abs(sum(array[:i]) - sum(array[i:])) for i in range(1, len(array))])
class TapeEquilibriumTestCase(unittest.TestCase):
# Correctness
def test_description_examples(self):
self.assertEqual(solution([3, 1, 2, 4, 3]), 1)
def test_double(self):
array = get_random_list(2)
self.assertEqual(solution(array), brute_solution(array))
def test_simple_positive(self):
array = get_random_positive_list(5)
self.assertEqual(solution(array), brute_solution(array))
def test_simple_negative(self):
array = get_random_negative_list(5)
self.assertEqual(solution(array), brute_solution(array))
def test_small_random(self):
array = get_random_negative_list(100)
self.assertEqual(solution(array), brute_solution(array))
def test_small_range(self):
array = get_random_negative_list(1000)
self.assertEqual(solution(array), brute_solution(array))
def test_small(self):
array = get_random_list(1000, -50, 50)
self.assertEqual(solution(array), brute_solution(array))
# Performance
def test_medium_random1(self):
array = get_random_list(10000, 0, 100)
self.assertEqual(solution(array), brute_solution(array))
def test_medium_random2(self):
array = get_random_list(10000, -1000, 50)
self.assertEqual(solution(array), brute_solution(array))
def test_large_ones(self):
array = get_random_list(100000, -1, 1)
self.assertEqual(solution(array), brute_solution(array))
def test_large_random(self):
array = get_random_list(100000)
self.assertEqual(solution(array), brute_solution(array))
def test_large_sequence(self):
array = list(range(100000))
self.assertEqual(solution(array), brute_solution(array))
def test_large_extreme(self):
array = get_random_list(100000-2) + [MIN_ELEMENT, MAX_ELEMENT]
random.shuffle(array)
self.assertEqual(solution(array), brute_solution(array))
if __name__ == '__main__':
unittest.main()
```
#### File: lessons/q005/frog_jump_v001.py
```python
import math
def solution(x, y, jump_size):
"""
Returns the minimum number of times to add jump_size to x
in order to reach y.
Solution in O(1) for "Frog Jump".
"""
return math.ceil((y - x) / jump_size)
```
#### File: lessons/q007/permutation_check_test.py
```python
import random
import unittest
from challenges.codility.lessons.q007.permutation_check_v001 import *
MIN_ELEMENT = 1
MAX_ELEMENT = 1000000000
class PermutationCheckTestCase(unittest.TestCase):
def test_description_examples(self):
self.assertEqual(1, solution([4, 1, 3, 2]))
self.assertEqual(0, solution([4, 1, 3]))
def test_example1(self):
self.assertEqual(1, solution([4, 1, 3, 2]))
def test_example2(self):
self.assertEqual(0, solution([4, 1, 3]))
# Correctness
def test_extreme_min_max(self):
self.assertEqual(1, solution([MIN_ELEMENT]))
self.assertEqual(0, solution([100]))
def test_single(self):
array = [1]
with self.subTest():
self.assertEqual(1, solution(array))
array = [2]
with self.subTest():
self.assertEqual(0, solution(array))
def test_double(self):
array = [2, 1]
with self.subTest():
self.assertEqual(1, solution(array))
array = [1, 2]
with self.subTest():
self.assertEqual(1, solution(array))
array = [1, 3]
with self.subTest():
self.assertEqual(0, solution(array))
array = [3, 1]
with self.subTest():
self.assertEqual(0, solution(array))
def test_anti_sum1(self):
# total sum is correct, but it is not a permutation, N <= 10
self.__test_anti_sum(10)
def test_small_permutation(self):
# permutation + one element occurs twice, N = ~100
self.__test_permutation_plus_duplicated_values(100, 1)
# Performance
def test_medium_permutation(self):
# permutation + few elements occur twice, N = ~10,000
self.__test_permutation_plus_duplicated_values(10000, 3)
def test_anti_sum2(self):
# total sum is correct, but it is not a permutation, N = ~100,000
self.__test_anti_sum(100000)
def test_large_not_permutation(self):
# permutation + one element occurs three times, N = ~100,000
self.__test_permutation_plus_duplicated_values(100000, 3, single_duplicated=True)
def test_large_range(self):
# sequence 1, 2, ..., N, N = ~100,000
n = 100000
self.assertEqual(1, solution(list(range(1, n + 1))))
def test_extreme_values(self):
# all the same values, N = ~100,000
n = 100000
for i in range(3):
duplicated = random.randint(MIN_ELEMENT, n)
with self.subTest(n=n, duplicated=duplicated):
self.assertEqual(0, solution([duplicated] * n))
def test_various_permutations(self):
# all sequences are permutations
n = 100000
for i in range(n - 4, n + 1):
array = list(range(1, i))
with self.subTest(n=i):
self.assertEqual(1, solution(array))
# Utils
def __test_permutation_plus_duplicated_values(self, n, n_duplications=1, n_tests=2, single_duplicated=False):
for i in range(n_tests):
array = list(range(1, n + 1))
duplicated = [random.choice(array)] * n_duplications if single_duplicated \
else [random.choice(array) for i in range(n_duplications)]
array += duplicated
random.shuffle(array)
with self.subTest(duplicated=duplicated):
self.assertEqual(0, solution(array))
def __test_anti_sum(self, n=10):
# total sum is correct, but it is not a permutation.
for i in range(n - 3, n + 1):
array = list(range(1, n + 1))
delta = random.randint(1, i)
array[array.index(i)] = i - delta
array[array.index(i-1)] = i + delta
with self.subTest(i=i, delta=delta):
self.assertEqual(0, solution(array))
if __name__ == '__main__':
unittest.main()
```
#### File: lessons/q009/missing_integer_v001.py
```python
def solution(array):
"""
Finds the smallest positive integer not in array.
The smallest possible answer is 1.
:param array: list of integers
:return: smallest positive integer not in array.
"""
# smallest answer so far
i = 1
# visited values
visited = []
for value in array:
visited.append(value)
# if i is equal to value,
# we have to increment it before we visit the next item in array.
if value == i:
# we have to increment i to a number that we did not visit before.
while True:
value += 1
if value not in visited:
break
i = value
return i
```
#### File: lessons/q009/missing_integer_v002.py
```python
def replace_negative(l, default_value=0):
"""
Replaces all negative values with default_value
:param l: Original list
:param default_value: The value to replace negatives values with. Default is 0.
:return: Number of values replaced
"""
n_replaced = 0
for i in range(len(l)):
if l[i] < 0:
l[i] = default_value
n_replaced += 1
return n_replaced
def solution(A):
"""
Finds the smallest positive integer not in A.
The smallest possible answer is 1.
:param A: list of integers
:return: smallest positive integer not in A.
"""
length = len(A)
# When there is no positive value in A, default answer is 1
if replace_negative(A) is length:
return 1
for value in A:
# We mark a value v as visited by setting the value indexed by v as negative
# We can ignore v, when it is out of the range,
# because it means that v > N, thus there is a 0 < x < v and x < N and x not in A.
if (value is not 0) and (abs(value) - 1 < length) and (A[abs(value)-1] >= 0):
A[abs(value) - 1] = -A[abs(value) - 1] if A[abs(value) - 1] is not 0 else -1
# The first index v with a positive value found means that
# v + 1 is not in A (otherwise we would have visited and marked it as negative)
for i in range(length):
if A[i] >= 0:
return i + 1
# when A contains all numbers between 1 and N, we must return N + 1
return length + 1
```
#### File: lessons/q010/max_counters_test.py
```python
import random
import unittest
import numpy as np
import timeout_decorator
from challenges.codility.lessons.q010.max_counters_v002 import *
MIN_ELEMENT = 1
MAX_ELEMENT = 100000
class MaxCountersTestCase(unittest.TestCase):
def test_description_examples(self):
self.assertEqual([3, 2, 2, 4, 2], solution(5, [3, 4, 4, 6, 1, 4, 4]))
# Correctness
def test_extreme_small(self):
self.__test_all_max(10, 100)
def test_single(self):
array = [random.randint(1, 2) for i in range(40)]
self.assertEqual([array.count(1)], solution(1, array))
array = [random.randint(1, 2) for i in range(100)]
self.assertEqual([array.count(1)], solution(1, array))
def test_small_random1(self):
self.__test_random(10, 50, 6)
def test_small_random2(self):
self.__test_random(10, 50, 10)
# Performance
@timeout_decorator.timeout(0.5)
def test_medium_random1(self):
self.__test_random(10, 50, 50)
@timeout_decorator.timeout(0.5)
def test_medium_random2(self):
self.__test_random(10, 50, 500)
@timeout_decorator.timeout(0.5)
def test_large_random1(self):
self.__test_random(50, 100, 2120)
@timeout_decorator.timeout(0.91)
def test_large_random2(self):
self.__test_random(50, 100, 10000)
def test_extreme_large(self):
self.__test_extreme_large_p1()
self.__test_extreme_large_p2()
@timeout_decorator.timeout(0.93)
def __test_extreme_large_p1(self):
self.__test_all_max(50, 10000)
@timeout_decorator.timeout(0.93)
def __test_extreme_large_p2(self):
self.__test_all_max(50, 10000)
# Utils
@staticmethod
def __brute_solution(n, array):
counters = [0] * n
# max_count = 0
for i in range(len(array)):
if array[i] == n + 1:
max_count = max(counters)
counters = [max_count] * n
else:
counters[array[i] - 1] += 1
return counters
def __test_all_max(self, n, m):
with self.subTest(n=n, m=m):
self.assertEqual([0] * n, solution(n, [n + 1] * m))
def __test_random(self, n, m, n_max_counter):
array = list(np.random.random_integers(1, n, m))
array += [n + 1] * n_max_counter
random.shuffle(array)
with self.subTest(n=n, m=m + n_max_counter, n_max_counter=n_max_counter):
self.assertEqual(self.__brute_solution(n, array), solution(n, array))
if __name__ == '__main__':
unittest.main()
```
#### File: lessons/q012/passing_cars_test.py
```python
import random
import unittest
import numpy as np
from challenges.codility.lessons.q012.passing_cars_v001 import *
MAX_N = 100000
class PassingCarsTestCase(unittest.TestCase):
def test_description_examples(self):
self.assertEqual(5, solution([0, 1, 0, 1, 1]))
# Correctness
def test_single(self):
self.assertEqual(0, solution([0]))
self.assertEqual(0, solution([1]))
def test_double(self):
self.assertEqual(0, solution([0, 0]))
self.assertEqual(1, solution([0, 1]))
self.assertEqual(0, solution([1, 1]))
self.assertEqual(0, solution([1, 0]))
def test_small_simple(self):
self.assertEqual(3, solution([0, 1, 1, 1]))
def test_small_random1(self):
self.__test_random(100)
def test_small_random2(self):
self.__test_random(1000)
# Performance
def test_medium_random(self):
self.__test_random(MAX_N)
def test_large_random(self):
self.__test_random(MAX_N)
def test_large_big_answer(self):
self.__test_random(MAX_N)
self.__test_random(MAX_N)
def test_large_alternate(self):
array = [0, 1] * (MAX_N // 2)
self.assertEqual(self.__brute_solution(array), solution(array))
array = [1, 0] * (MAX_N // 2)
self.assertEqual(self.__brute_solution(array), solution(array))
def test_large_extreme(self):
self.assertEqual(0, solution([0] * MAX_N))
self.assertEqual(0, solution([1] * MAX_N))
self.assertEqual(-1, solution(([0] * (MAX_N // 2)) + ([1] * (MAX_N // 2))))
self.assertEqual(0, solution(([1] * (MAX_N // 2)) + ([0] * (MAX_N // 2))))
# Utils
@staticmethod
def __brute_solution(array):
result = 0
saw_one = False
for i in range(len(array) - 1):
saw_one |= array[i]
if array[i] == 0:
result += sum(array[i + 1:])
if result > 1000000000:
return -1
return result
def __test_random(self, n):
array = list(np.random.random_integers(0, 1, n))
random.shuffle(array)
with self.subTest(n=n):
self.assertEqual(self.__brute_solution(array), solution(array))
if __name__ == '__main__':
unittest.main()
```
#### File: lessons/q013/genomic_range_query_v001.py
```python
from util.challenges.array_challenges import __count_total
def __class_based_prefix_sums(array, classes):
"""
Given N = len(array) and M = len(classes):
Imagine that array is split into M different lists of length N,
for each list l, l[i][k] == 1 iff array[k] == classes[i], 0 otherwise.
This method returns the prefix_sum for each list l explained above.
"""
n = len(array)
m = len(classes)
sums = [[0] * (n + 1) for i in range(m)]
for i in range(n):
for j in range(m):
if array[i] == classes[j]:
sums[j][i + 1] = sums[j][i] + 1
else:
sums[j][i + 1] = sums[j][i]
return sums
def __class_is_present(prefix_sums, class_index, start_pos, end_pos):
return __count_total(prefix_sums[class_index], start_pos, end_pos) > 0
def solution(s, start_pos, end_pos):
"""
Find the minimal nucleotide from a range of sequence DNA.
:param s: String consisting of the letters A, C, G and T,
which correspond to the types of successive nucleotides in the sequence
:param start_pos: array with the start indexes for the intervals to check
:param end_pos: array with the end indexes for the intervals to check
:return: a list with the minimal nucleotide for each interval defined by start_pos and end_pos
"""
highest_class = 'T'
highest_class_value = 4
# The array below must be in ascending order regarding the value assigned to the classes in the challenge description
# (not necessarily in alphabetic order)
other_classes = ['A', 'C', 'G']
other_classes_values = [1, 2, 3]
# We create a prefix_sum list for each class, so we can identify when a range has that specific class
prefix_sums = __class_based_prefix_sums(s, other_classes)
result = []
for i in range(len(start_pos)):
# We don't need to create a prefix_sum list for the class with highest value,
# because we can always use it as a fallback
current_result = highest_class_value
for j in range(len(other_classes)):
if __class_is_present(prefix_sums, j, start_pos[i], end_pos[i]):
current_result = other_classes_values[j]
break
result.append(current_result)
return result
```
#### File: lessons/q014/min_avg_two_slice_test.py
```python
import random
import unittest
import numpy as np
from challenges.codility.lessons.q014.min_avg_two_slice_v001 import *
MAX_N = 100000
MIN_ELEMENT = -10000
MAX_ELEMENT = 10000
class MinAvgTwoSliceTestCase(unittest.TestCase):
def test_description_examples(self):
self.assertEqual(1, solution([4, 2, 2, 5, 1, 5, 8]))
# Correctness
def test_double_quadruple(self):
self.assertEqual(0, solution([8, 2]))
self.assertEqual(0, solution([-1, 1, 2, 3]))
self.assertEqual(2, solution([2, 3, -1, 1]))
self.assertEqual(1, solution([7, 2, 4, 1]))
def test_simple1(self):
self.assertEqual(1, solution([9, 2, 4, 1]))
self.assertEqual(0, solution([2, 3, 1]))
def test_simple2(self):
self.assertEqual(0, solution([5, 9, 1]))
def test_small_random(self):
self.__test_random(100)
def test_medium_range(self):
array = list(range(0, 200, 4)) + list(range(400, 300, -2))
self.assertEqual(0, solution(array))
array = list(range(100, 300, 4)) + list(range(400, 300, -2))
self.assertEqual(0, solution(array))
array = list(range(0, 90, 3)) + list(range(650, 300, -5))
self.assertEqual(0, solution(array))
# Performance
def test_medium_random(self):
self.__test_random(700)
def test_large_ones(self):
self.__test_random(MAX_N, -1, 1)
self.__test_random(MAX_N, -1, 1)
def test_large_random(self):
self.__test_random(MAX_N)
def test_extreme_values(self):
self.assertEqual(0, solution([MAX_ELEMENT]*MAX_N))
self.assertEqual(0, solution([MIN_ELEMENT]*MAX_N))
array = [MIN_ELEMENT]*(MAX_N//2) + [MAX_ELEMENT]*(MAX_N//2)
random.shuffle(array)
pos = array.index(MIN_ELEMENT)
self.assertEqual(self.__brute_solution(array), solution(array))
def test_large_sequence(self):
self.assertEqual(0, solution(list(range(MAX_N))))
# Utils
@staticmethod
def __brute_solution(array):
min_avg = (array[0] + array[1]) / 2.0
min_pos = 0
for i in range(len(array)-1):
for j in range(i + 2, len(array) + 1):
current_avg = sum(array[i:j])/(j-i)
if current_avg < min_avg:
min_avg = current_avg
min_pos = i
return min_pos
def __test_random(self, n, min=MIN_ELEMENT, max=MAX_ELEMENT):
array = list(np.random.random_integers(min, max, n))
with self.subTest(n=n):
self.assertEqual(self.__brute_solution(array), solution(array))
if __name__ == '__main__':
unittest.main()
```
#### File: lessons/q018/number_of_disc_intersections_v001.py
```python
from operator import itemgetter
def solution(array):
"""
Computes the number of intersections of closed intervals
Time Complexity: O(n*log(n)), given n = len(array),
because we sort the list of endpoints in O(2n*log(2n)) and iterate over it in O(2n):
O(2n*log(2n) + 2n) is a function of the class O(n*log(n)).
Space Complexity: O(n), because we create a list of endpoints and 5 other variables (including 'i') in O(2n + 5).
:param array:
list [a1, a2, ..., an], item ai represents a closed interval with center i and size 2*list[i], i.e,
represent the interval [x, y], given x = i - list[i] and y = i + list[i]
:return:
number of intersections if it does not exceed 10,000,000, and returns -1 otherwise
"""
LEFT = 0
RIGHT = 1
# creates a list with all the endpoints (left and right) as separate elements
endpoints = []
for i in range(len(array)):
endpoints.append({'value': i - array[i], 'side': LEFT})
endpoints.append({'value': i + array[i], 'side': RIGHT})
# sorts the endpoints, but making sure the left sides come first
endpoints.sort(key=itemgetter('value', 'side'))
response = 0
# represents the number of potential inner intersections
# (it starts with -1, so it can become 0 on the first left side)
inner_candidates = -1
for i in range(len(endpoints)):
# when we see a new interval opening, we increment the number of possible inner intersections
if endpoints[i]['side'] == LEFT:
inner_candidates += 1
# and if this number is greater than 0 (we saw at least one interval opening after the last non-ended interval)
# it means that we saw an interval starting inside of another
if inner_candidates > 0:
response += inner_candidates
if response > 10000000:
return -1
else:
inner_candidates -= 1
return response
```
#### File: lessons/q023/dominator_v001.py
```python
from util.challenges.array_challenges import get_leader
def solution(array):
leader, indexes = get_leader(array)
if len(indexes) > (len(array) // 2):
return indexes[0]
else:
return -1
```
#### File: lessons/q024/equi_leader_v001.py
```python
from util.challenges.array_challenges import get_leader
def solution(array):
n = len(array)
leader, indexes = get_leader(array)
n_leader = len(indexes)
count = 0
for i in range(n_leader - 1):
for j in range(indexes[i], indexes[i+1]):
if (i + 1) > (j + 1)/2 and (n_leader - i - 1) > (n - j - 1)/2:
count += 1
return count
```
#### File: lessons/q027/max_double_slice_sum_v001.py
```python
def solution(array):
"""
Returns the maximal sum of a double slice.
Double Slice is a triplet (X, Y, Z), such that 0 ≤ X < Y < Z < N
and its sum is the sum of the elements between array[X+1] and array[Z-1] minus array[Y]
"""
n = len(array)
max_ending = [0] * n
max_starting = [0] * n
# finds the sums of the left and the right side of the double slice, respectively
for i in range(1, n - 1):
max_ending[i] = max(0, max_ending[i-1] + array[i])
max_starting[n - 1 - i] = max(0, max_starting[n - i] + array[n - 1 - i])
# combines the possible sums
result = 0
for i in range(1, n - 1):
result = max(result, max_ending[i-1] + max_starting[i+1])
return result
```
#### File: demo/q001/find_number_v001.py
```python
def findNumber(arr, k):
"""
Check if number is in a list.
:param arr: list to verify
:param k: element to look for
:return: YES, if k is in arr. NO, otherwise.
"""
return 'YES' if k in arr else 'NO'
"""
#!/bin/python3
import sys
import os
def findNumber(arr, k):
f = open(os.environ['OUTPUT_PATH'], 'w')
_arr_cnt = 0
_arr_cnt = int(input())
_arr_i = 0
_arr = []
while _arr_i < _arr_cnt:
_arr_item = int(input());
_arr.append(_arr_item)
_arr_i += 1
_k = int(input());
res = findNumber(_arr, _k)
f.write(res + "\n")
f.close()
"""
```
#### File: honeypot/q002/twins_v001.py
```python
def __twins(a, b):
even_a = a[::2]
odd_a = a[1::2]
even_a = sorted(even_a)
odd_a = sorted(odd_a)
even_b = b[::2]
odd_b = b[1::2]
even_b = sorted(even_b)
odd_b = sorted(odd_b)
return 'Yes' if (even_a == even_b) and (odd_a == odd_b) else 'No'
def twins(a, b):
return [__twins(a[i], b[i]) for i in range(len(a))]
"""
#!/bin/python3
import sys
import os
# Complete the function below.
def twins(a, b):
if __name__ == "__main__":
f = open(os.environ['OUTPUT_PATH'], 'w')
a_cnt = 0
a_cnt = int(input())
a_i = 0
a = []
while a_i < a_cnt:
try:
a_item = str(input())
except:
a_item = None
a.append(a_item)
a_i += 1
b_cnt = 0
b_cnt = int(input())
b_i = 0
b = []
while b_i < b_cnt:
try:
b_item = str(input())
except:
b_item = None
b.append(b_item)
b_i += 1
res = twins(a, b);
for res_cur in res:
f.write( str(res_cur) + "\n" )
f.close()
"""
```
#### File: spoj/ngm/ngm_test.py
```python
import unittest
from challenges.spoj.ngm.ngm_v001 import solution
class AGameWithNumbersTestCase(unittest.TestCase):
def test_description_examples(self):
self.assertEqual(solution(14), "1\n4")
def test_simple(self):
for i in range(1, 10):
self.assertEqual(solution(i * 10), "2")
self.assertEqual(solution(i * 10 + i), "1\n" + str(i))
self.assertEqual(solution(i), "1\n" + str(i))
if __name__ == '__main__':
unittest.main()
```
#### File: spoj/permut2/permut2_v001.py
```python
import fileinput
import sys
if sys.version_info[0] >= 3:
map = lambda func, l: [func(i) for i in l]
else:
range = xrange
def is_ambiguous(array, n):
for i in range(n):
if array[array[i]] != i:
return False
return True
# already decreasing 1 to simplify checks and minimizes the subtractions to 50%
def cast_input(s):
return int(s) - 1
if __name__ == '__main__':
f_in = fileinput.input()
while True:
n = f_in.readline()
if n[0] == '0':
break
else:
n = int(n)
if is_ambiguous(map(cast_input, f_in.readline().split()), n):
print('ambiguous')
else:
print('not ambiguous')
```
#### File: spoj/stamps/stamps_v002.py
```python
import fileinput
def read_integers_from_input():
for line in fileinput.input():
line = line.strip()
if line:
for number in line.split(' '):
yield int(number)
if __name__ == '__main__':
int_input = read_integers_from_input()
n_test_cases = next(int_input)
sum_result = 0
for i in range(1, n_test_cases + 1):
n_stamps, n_friends = next(int_input), next(int_input)
friends = []
j, sum_result = 0, 0
if n_stamps == 0 or n_friends == 0:
# discard input
while n_friends:
n_friends -= 1
next(int_input)
else:
for j in range(n_friends):
friends.append(next(int_input))
# as the stamp counts are ordered in descending order,
# j will be the minimum number of friends from whom Lucy can borrow stamps
friends.sort(reverse=True)
for j in range(1, n_friends + 1):
sum_result += friends[j - 1]
if sum_result >= n_stamps:
break
if sum_result < n_stamps:
print("Scenario #%d:\nimpossible\n" % i)
else:
print("Scenario #%d:\n%d\n" % (i, j))
```
#### File: job_applications/opus_one/power-calc.py
```python
import os
import sys
import math
import pandas as pd
# Allowable names for voltage, current, and power factor
V_NAMES = {'v', 'V', 'Volts', 'Voltage'}
I_NAMES = {'i', 'I', 'Amps', 'Amperes', 'Current'}
PF_NAMES = {'pf', 'PF', 'Power Factor'}
DICT_ORIENT = 'index'
CALCULATED_COLUMNS = ['s', 'p', 'q']
DEFAULT_POWER_FACTOR = 0.9
def calc_power(volts, amps, pf):
"""
Returns tuple of (p, q, s) powers from the inputs.
"""
try:
s = volts * amps
p = s * pf
q = math.sqrt(s**2 - p**2)
return (p, q, s)
except (ValueError, TypeError):
return (None, None, None)
def combine_columns(allowed_columns):
"""
Combines Columns which were named with multiple names
:return: column with the appropriate values
"""
v_columns = [v for v in allowed_columns if v in df.columns]
v_columns.sort()
for i in range(1, len(v_columns)):
df[v_columns[0]] = df[v_columns[0]].fillna(df[v_columns[i]])
df.drop(v_columns[i], 1, inplace=True)
return v_columns[0]
def __get_file_path(f):
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
return os.path.join(__location__, f)
# Run the program; expects a single argument which is the name of JSON file
if __name__ == "__main__":
# Firstly, we gather the data in the JSON file
f = __get_file_path(sys.argv[1])
df = pd.read_json(f, orient=DICT_ORIENT)
# print(df)
# Then reduce the columns to just one allowed column per *quantity* (measurement)
v = combine_columns(V_NAMES)
i = combine_columns(I_NAMES)
pf = combine_columns(PF_NAMES)
# print(df)
# Filling missing values for Power Factor to default
df[pf] = df[pf].fillna(DEFAULT_POWER_FACTOR)
# print(df)
# Create the columns for the *calculated quantities* (namely apparent power, real power and reactive power)
df[CALCULATED_COLUMNS] = df.apply(lambda x: pd.Series(calc_power(x[v], x[i], x[pf])), axis=1)
# print(df)
# and finally output the result
print(df[CALCULATED_COLUMNS].to_dict(DICT_ORIENT))
```
#### File: coding_challenges/util/array_utils.py
```python
import random
def remove_random(array, all_occurrences=False):
to_remove = array.pop(random.randint(1, len(array) - 1))
if all_occurrences:
while to_remove in array:
array.remove(to_remove)
return to_remove
``` |
{
"source": "Joeffison/InstagramUtility",
"score": 2
} |
#### File: instagram_utility/application_instagram_api/views.py
```python
from __future__ import unicode_literals
from django.http import HttpResponse, JsonResponse
from rest_framework.decorators import api_view
from rest_framework import status
from .facade import MyInstagramAPI
from .models import InstagramModel
from .serializers import InstagramSerializer
@api_view(['POST'])
def model_list(request):
if request.method == 'POST':
api = MyInstagramAPI(request.data['username'], request.data['password'])
return JsonResponse(data=api.get_current_user_profile(), safe=False)
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
@api_view(['POST'])
def follow(request):
api = MyInstagramAPI(request.data['username'], request.data['password'])
api.follow(request.data['users'])
return JsonResponse(data=api.get_current_user_profile(), safe=False)
@api_view(['POST'])
def unfollow(request):
api = MyInstagramAPI(request.data['username'], request.data['password'])
api.unfollow(request.data['users'])
return JsonResponse(data=api.get_current_user_profile(), safe=False)
``` |
{
"source": "joefg/SCAR",
"score": 3
} |
#### File: SCAR/SCAR/face_detect.py
```python
import cv2
import logging
import SCAR.Controls.control as control
class FaceDetect(control.Control):
"""
FaceDetect
Supposed to detect a face and draw a box around it.
"""
def apply_control(self):
# set our face cascade
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# get locations of faces
faces = face_cascade.detectMultiScale(
cv2.cvtColor(self.base_image, cv2.COLOR_BGR2GRAY),
scaleFactor = 1.1,
minNeighbors = 5,
minSize = (30, 30),
)
self.image_out = self.base_image
# draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(self.image_out, (x, y), (x + w, y + h), (0, 0, 255), 2)
logging.info("FaceDetect: {0} faces detected.".format(
len(faces)
))
return self.image_out
```
#### File: SCAR/SCAR/gauss_filter.py
```python
import logging
import numpy as np
import cv2
import SCAR.Controls.control as control
class GaussFilter(control.Control):
"""
GaussFilter
a simple Gaussian blur filter control
"""
def apply_control(self):
self.image_out = cv2.GaussianBlur(self.base_image, (25, 25), 0)
return self.image_out
```
#### File: SCAR/Harness/harness.py
```python
import logging
class Harness:
def __init__(self) -> None:
"""
Constructor.
"""
super().__init__()
self.controls = []
self.base_image = None
self.current_image = self.base_image
logging.info("Harness created.")
def __str__(self) -> str:
"""
ToString.
:return:
"""
return super().__str__() + "Harness."
def add_control(self, control):
"""
Method to add a control to the list.
:param control:
:return:
"""
if len(self.controls) == 0:
# if there are no controls, we create the first.
control.set_image(self.base_image)
self.controls.append(control)
logging.info("Added first control.")
else:
# set the current top image to be derived from the output of the control underneath it.
control.set_image(self.controls[-1].apply_control())
self.controls.append(control)
logging.info("Added control.")
# base case: the current top image is the top control's output
self.current_image = self.controls[-1].apply_control()
logging.info("Set current top image.")
def set_base_image(self, image):
"""
Method to set the base image.
:param image:
:return:
"""
# set base image
self.base_image = image
# set images in the filters to be derived from the base images of previous layers
if len(self.controls) > 0:
# set first image to be the base image
self.controls[0].set_image(self.base_image)
# set the rest of the controls to use the output from precious controls
for i in range(1, len(self.controls)):
self.controls[i].set_image(self.controls[i-1].apply_control())
logging.info("Base images set for filters.")
self.current_image = self.get_current_image()
logging.info("Images updated.")
def get_current_image(self):
"""
Method to get the image post filtering.
:return:
"""
if len(self.controls) > 0:
return self.controls[-1].apply_control()
else:
return self.base_image
``` |
{
"source": "joefinlon/Finlon_et_al_2021_DFR",
"score": 3
} |
#### File: joefinlon/Finlon_et_al_2021_DFR/dfr_enhancement.py
```python
import numpy as np
from scipy.signal import find_peaks, peak_prominences, peak_widths
from skimage.measure import label
def find_regions(matched_object, dfr, method='prominances', min_dfr=None, min_prom=2., rel_height=0.4):
'''
Inputs:
matched_object: Dictionary created from matcher routine
dfr: Masked array of DFR values computed from matched_object
method: Method for determining enhanced DFR regions/periods ('prominances')
min_dfr: Minimum DFR to consider for ID scheme (not used for 'prominances' method)
min_prom: Minimum prominance needed to consider DFR peaks (float)
rel_height: Relative height at which the peak width is measured as a percentage of its prominence (float between 0 and 1)
'''
regions_object = {}
peaks = np.array([], dtype='int'); prominences = np.array([]); width_heights = np.array([])
durations_p3 = np.array([]); durations_er2 = np.array([])
peak_starts_p3 = np.array([], dtype='datetime64[ns]'); peak_ends_p3 = np.array([], dtype='datetime64[ns]')
peak_starts_er2 = np.array([], dtype='datetime64[ns]'); peak_ends_er2 = np.array([], dtype='datetime64[ns]')
peak_count = 0
labels = label(~dfr.mask) # find contiguious regions/periods where valid (not masked) DFR values exist (peak ID is more robust this way)
for labelnum in range(1, len(np.unique(labels))+1):
peaks_temp, _ = find_peaks(dfr[labels==labelnum])
if len(peaks_temp)>0:
prominences_temp = peak_prominences(dfr[labels==labelnum], peaks_temp, wlen=None); prominences_temp = prominences_temp[0]
peaks_temp = peaks_temp[prominences_temp>=min_prom]; prominences_temp = prominences_temp[prominences_temp>=min_prom] # trim peaks and prominences
widths_temp = peak_widths(dfr[labels==labelnum], peaks_temp, rel_height=rel_height)
for peaknum in range(len(widths_temp[0])): # loop through each peak to get peak width start/end periods
peak_count += 1
width_heights = np.append(width_heights, widths_temp[1][peaknum])
peak_start_er2 = matched_object['matched']['time_rad']['data'][int(np.where(labels==labelnum)[0][0]+np.floor(widths_temp[2][peaknum]))]
peak_end_er2 = matched_object['matched']['time_rad']['data'][int(np.where(labels==labelnum)[0][0]+np.ceil(widths_temp[3][peaknum]))]
peak_start_p3 = matched_object['matched']['time_p3']['data'][int(np.where(labels==labelnum)[0][0]+np.floor(widths_temp[2][peaknum]))]
peak_end_p3 = matched_object['matched']['time_p3']['data'][int(np.where(labels==labelnum)[0][0]+np.ceil(widths_temp[3][peaknum]))]
if peak_end_er2<peak_start_er2: # fixes rare instance where peak end needs to be shortened (no matched data after this time)
peak_end_er2 = matched_object['matched']['time_rad']['data'][int(np.where(labels==labelnum)[0][0]+np.floor(widths_temp[3][peaknum]))]
peak_end_p3 = matched_object['matched']['time_p3']['data'][int(np.where(labels==labelnum)[0][0]+np.floor(widths_temp[3][peaknum]))]
durations_p3 = np.append(durations_p3, (peak_end_p3-peak_start_p3)/np.timedelta64(1,'s'))
durations_er2 = np.append(durations_er2, (peak_end_er2-peak_start_er2)/np.timedelta64(1,'s'))
print(' Peak #{} from {} - {} ({} sec)'.format(peak_count, peak_start_p3, peak_end_p3, durations_p3[-1]))
peak_starts_p3 = np.append(peak_starts_p3, peak_start_p3); peak_ends_p3 = np.append(peak_ends_p3, peak_end_p3)
peak_starts_er2 = np.append(peak_starts_er2, peak_start_er2); peak_ends_er2 = np.append(peak_ends_er2, peak_end_er2)
peaks = np.append(peaks, np.where(labels==labelnum)[0][0]+peaks_temp)
prominences = np.append(prominences, prominences_temp)
# Construct the object
regions_object['peak_start_p3'] = peak_starts_p3; regions_object['peak_end_p3'] = peak_ends_p3
regions_object['peak_start_er2'] = peak_starts_er2; regions_object['peak_end_er2'] = peak_ends_er2
regions_object['width_height'] = width_heights # height of the contour lines at which the widths where evaluated
regions_object['peak_index'] = peaks; regions_object['peak_value'] = dfr[peaks]; regions_object['peak_prominence'] = prominences
regions_object['duration_p3'] = durations_p3; regions_object['duration_er2'] = durations_er2
regions_object['stats'] = {}
regions_object['stats']['num_regions'] = peak_count
regions_object['stats']['mean_duration_p3'] = np.sum(durations_p3) / peak_count
regions_object['stats']['mean_duration_er2'] = np.sum(durations_er2) / peak_count
return regions_object
```
#### File: joefinlon/Finlon_et_al_2021_DFR/match.py
```python
import numpy as np
from pyproj import Proj
from scipy.spatial import cKDTree
def match(
er2obj, p3obj, radname, sphere_size, start_time, end_time, query_k=1,
outlier_method=None, return_indices=False):
'''
Get the matched radar data based on the P-3 lat, lon, alt.
Inputs:
er2_obj: ER-2 object obtained from the er2read() function
p3_obj: P-3 object obtained from the iwgread() and iwg_avg() functions
radname: Radar name ('CRS', 'HIWRAP')
sphere_size: Maximum distance [int in m] allowed in the kdTree search
start_time: Start time [str in YYYY-MM-DDTHH:MM:SS format] to consider in matching routine
end_time: End time [str in YYYY-MM-DDTHH:MM:SS format] to consider in matching routine
query_k: Number of gates (int) considered in the average (1 == use closest)
outlier_method: None (no outliers removed) or 'iqr', 'z', 'modz'
return_indices: True == returns the matched gates in 1d coords; False == does not
'''
# Load P-3 info and trim if needed
p3_time = p3obj['time']['data']
p3_lat = p3obj['Latitude']['data']
p3_lon = p3obj['Longitude']['data']
p3_alt = p3obj['GPS_Altitude']['data']
start_dt64 = np.datetime64(start_time)
end_dt64 = np.datetime64(end_time)
# Turn radar spatial data into 1-D arrays
er2_time = np.ravel(er2obj['time_gate'][:, :])
er2_x = np.ravel(er2obj['lon_gate'][:, :])
er2_y = np.ravel(er2obj['lat_gate'][:, :])
er2_alt = np.ravel(er2obj['alt_gate'][:, :])
# Turn radar data into 1-D arrays
if radname=='CRS':
radar_dbz = np.ma.ravel(er2obj['dbz_W'][:, :])
radar_ldr = np.ma.ravel(er2obj['ldr_W'][:, :])
radar_vel = np.ma.ravel(er2obj['vel_W'][:, :])
radar_width = np.ma.ravel(er2obj['width_W'][:, :])
elif radname=='HIWRAP':
radar_dbz = np.ma.ravel(er2obj['dbz_Ku'][:, :])
radar_ldr = np.ma.ravel(er2obj['ldr_Ku'][:, :])
radar_vel = np.ma.ravel(er2obj['vel_Ku'][:, :])
radar_width = np.ma.ravel(er2obj['width_Ku'][:, :])
radar2_dbz = np.ma.ravel(er2obj['dbz_Ka'][:, :])
radar2_ldr = np.ma.ravel(er2obj['ldr_Ka'][:, :])
radar2_vel = np.ma.ravel(er2obj['vel_Ka'][:, :])
radar2_width = np.ma.ravel(er2obj['width_Ka'][:, :])
elif (radname=='EXRAD'): # TODO: accomodate nadir beam argument (also: implement EXRAD-scanning to this?)
radar_dbz = np.ma.ravel(er2obj['dbz_X'][:, :])
radar_vel = np.ma.ravel(er2obj['vel_X'][:, :])
radar_width = np.ma.ravel(er2obj['width_X'][:, :])
# Remove radar gates where dbZ is masked (may decide to do this differently later, esp. if other var values remain masked)
# Also remove radar gates outside of the P-3 flight times (to only consider matches with that flight segment)
if radname=='CRS':
if outlier_method=='w':
time_inds = np.where((er2_time>=start_dt64) & (er2_time<=end_dt64))[0]
width_thresh = np.percentile(radar_width.compressed(), 5) # compute the 5th percentile to use as a threshold
remove_inds = np.logical_or.reduce((radar_dbz.mask, radar_width.data<width_thresh, er2_time<start_dt64, er2_time>end_dt64))
else:
remove_inds = np.logical_or.reduce((radar_dbz.mask, er2_time<start_dt64, er2_time>end_dt64))
radar_dbz = radar_dbz[~remove_inds]
radar_ldr = radar_ldr[~remove_inds]
radar_vel = radar_vel[~remove_inds]
radar_width = radar_width[~remove_inds]
elif radname=='HIWRAP':
# @joefinlon: See if the first logical argument in 'remove_inds' should be handled differently
# Currently requires both Ku- and Ka-band dbz to be masked in order to ignore radar gate
if outlier_method=='w':
time_inds = np.where((er2_time>=start_dt64) & (er2_time<=end_dt64))[0]
width_thresh = np.percentile(radar_width.compressed(), 5) # compute the 5th percentile to use as a threshold
width2_thresh = np.percentile(radar2_width.compressed(), 5) # compute the 5th percentile to use as a threshold
remove_inds = np.logical_or.reduce((radar_dbz.mask+radar2_dbz.mask, radar_width.data<width_thresh, radar2_width.data<width2_thresh, er2_time<start_dt64, er2_time>end_dt64))
else:
remove_inds = np.logical_or.reduce((radar_dbz.mask+radar2_dbz.mask, er2_time<start_dt64, er2_time>end_dt64))
radar_dbz = radar_dbz[~remove_inds]
radar_ldr = radar_ldr[~remove_inds]
radar_vel = radar_vel[~remove_inds]
radar_width = radar_width[~remove_inds]
radar2_dbz = radar2_dbz[~remove_inds]
radar2_ldr = radar2_ldr[~remove_inds]
radar2_vel = radar2_vel[~remove_inds]
radar2_width = radar2_width[~remove_inds]
elif radname=='EXRAD':
if outlier_method=='w':
time_inds = np.where((er2_time>=start_dt64) & (er2_time<=end_dt64))[0]
width_thresh = np.percentile(radar_width.compressed(), 5) # compute the 5th percentile to use as a threshold
remove_inds = np.logical_or.reduce((radar_dbz.mask, radar_width.data<width_thresh, er2_time<start_dt64, er2_time>end_dt64))
else:
remove_inds = np.logical_or.reduce((radar_dbz.mask, er2_time<start_dt64, er2_time>end_dt64))
radar_dbz = radar_dbz[~remove_inds]
#radar_ldr = radar_ldr[~remove_inds]
radar_vel = radar_vel[~remove_inds]
radar_width = radar_width[~remove_inds]
er2_time = er2_time[~remove_inds]
er2_x = er2_x[~remove_inds]
er2_y = er2_y[~remove_inds]
er2_alt = er2_alt[~remove_inds]
# Trim P-3 nav data with +/- 1 min buffer on either side of specified period (since P-3 legs differ from the ER-2)
start_dt64 = start_dt64 - np.timedelta64(1, 'm')
end_dt64 = end_dt64 + np.timedelta64(1, 'm')
time_inds = np.where((p3_time>=start_dt64) & (p3_time<=end_dt64))[0]
if ('time_midpoint' in p3obj.keys()) and (p3_time[time_inds[-1]]==end_dt64): # P-3 data averaged in N-sec intervals...need to remove the last ob in time_inds
time_inds = time_inds[:-1]
p3_time = p3_time[time_inds]
p3_lat = p3_lat[time_inds]
p3_lon = p3_lon[time_inds]
p3_alt = p3_alt[time_inds]
# This section may need to be populated to handle masked P-3 nav data (will assume everything is fine for now)
# Set reference point (currently Albany, NY)
lat_0 = 42.6526
lon_0 = -73.7562
# Define a map projection to calculate cartesian distances
p = Proj(proj='laea', zone=10, ellps='WGS84', lat_0=lat_0, lon_0=lon_0)
# Use a projection to get cartiesian distances between the datasets
er2_x2, er2_y2 = p(er2_x, er2_y)
p3_x2, p3_y2 = p(p3_lon, p3_lat)
# Set kdtree parameters
leafsize = 16
query_eps = 0
query_p = 2
query_distance_upper_bound = sphere_size
query_n_jobs = 1
K_d = sphere_size
# Perform the kdtree search
kdt = cKDTree(list(zip(er2_x2, er2_y2, er2_alt)), leafsize=leafsize)
prdistance, prind1d = kdt.query(list(zip(p3_x2, p3_y2, p3_alt)), k=query_k, eps=query_eps, p=query_p,
distance_upper_bound=query_distance_upper_bound, n_jobs=query_n_jobs)
# Perform the matching routine
if query_k==1: # closest gate approach (more simple)
# Mask matched data that is outside of the defined bounds
bad_inds = np.where(prind1d == radar_dbz.shape[0])
if len(bad_inds[0]) > 0:
print('Nearest radar gate was outside distance upper bound...eliminating those instances')
#mask inds and distances that are outside the search area
prind1d[bad_inds] = np.ma.masked
prdistance[bad_inds] = np.ma.masked
# Trim radar data to only include valid matched values
dbz_matched = radar_dbz[prind1d]
vel_matched = radar_vel[prind1d]
width_matched = radar_width[prind1d]
dbz_matched = np.ma.masked_where(prind1d == 0, dbz_matched)
vel_matched = np.ma.masked_where(prind1d == 0, vel_matched)
width_matched = np.ma.masked_where(prind1d == 0, width_matched)
if radname=='CRS':
ldr_matched = radar_ldr[prind1d]
ldr_matched = np.ma.masked_where(prind1d == 0, ldr_matched)
elif radname=='HIWRAP':
ldr_matched = radar_ldr[prind1d]
dbz2_matched = radar2_dbz[prind1d]
vel2_matched = radar2_vel[prind1d]
width2_matched = radar2_width[prind1d]
ldr2_matched = radar2_ldr[prind1d]
ldr_matched = np.ma.masked_where(prind1d == 0, ldr_matched)
dbz2_matched = np.ma.masked_where(prind1d == 0, dbz2_matched)
vel2_matched = np.ma.masked_where(prind1d == 0, vel2_matched)
width2_matched = np.ma.masked_where(prind1d == 0, width2_matched)
ldr2_matched = np.ma.masked_where(prind1d == 0, ldr2_matched)
# Get the current P-3 lat,lon and alt to save in the matched dictionary - maybe add other P-3 vars to this later
time_p3_matched = p3_time
lat_p3_matched = p3_lat
lon_p3_matched = p3_lon
alt_p3_matched = p3_alt
# Compute the time difference between matched radar obs and the P-3
time_offset_matched = (er2_time[prind1d] - p3_time) / np.timedelta64(1, 's') # [s]
# Get the current ER-2 nav and radar data to save in the matched dictionary - maybe add other vars to this later
time_er2_matched = er2_time[prind1d]
lat_er2_matched = er2_y[prind1d]
lon_er2_matched = er2_x[prind1d]
alt_er2_matched = er2_alt[prind1d]
dist_er2_matched = prdistance
ind_er2_matched = prind1d # TODO: This will be useful var in Barnes-weighted mean for query_k>1
else: # do a Barnes weighted mean of the radar gates
# Mask matched data that is outside of the defined bounds
bad_inds = np.where(prind1d == radar_dbz.shape[0])
if len(bad_inds[0]) > 0 or len(bad_inds[1]) > 0:
print('Nearest radar gate was outside distance upper bound...eliminating those instances')
#mask inds and distances that are outside the search area
prind1d[bad_inds] = np.ma.masked
prdistance[bad_inds] = np.ma.masked
# Trim radar data to only include valid matched values
dbz_matched = radar_dbz[prind1d]
dbz_matched = np.ma.masked_where(prind1d == 0, dbz_matched)
# vel_matched = radar_vel[prind1d]
# vel_matched = np.ma.masked_where(prind1d == 0, vel_matched)
width_matched = radar_width[prind1d]
width_matched = np.ma.masked_where(prind1d == 0, width_matched)
if radname=='CRS':
ldr_matched = radar_ldr[prind1d]
ldr_matched = np.ma.masked_where(prind1d == 0, ldr_matched)
elif radname=='HIWRAP':
ldr_matched = radar_ldr[prind1d]
ldr_matched = np.ma.masked_where(prind1d == 0, ldr_matched)
dbz2_matched = radar2_dbz[prind1d]
dbz2_matched = np.ma.masked_where(prind1d == 0, dbz2_matched)
# vel2_matched = radar2_vel[prind1d]
# vel2_matched = np.ma.masked_where(prind1d == 0, vel2_matched)
width2_matched = radar2_width[prind1d]
width2_matched = np.ma.masked_where(prind1d == 0, width2_matched)
ldr2_matched = radar2_ldr[prind1d]
ldr2_matched = np.ma.masked_where(prind1d == 0, ldr2_matched)
# Eliminate observations that are outliers (e.g., skin paints) before averaging the data
# Follows Chase et al. (2018, JGR; https://github.com/dopplerchase/Chase_et_al_2018/blob/master/apr3tocit_tools.py)
# See http://colingorrie.github.io/outlier-detection.html for more info
# dbz
if outlier_method=='iqr':
IQR = np.array([])
for i in range(dbz_matched.shape[0]):
dbz_matched_sub = dbz_matched[i, :]
dbz_matched_sub = dbz_matched_sub[~dbz_matched_sub.mask] # remove masked matched values
if len(dbz_matched_sub)==0:
IQR = np.append(IQR, np.nan)
else: # mask gates where dbz > 1.5*IQR above 75th percentile
centiles = np.nanpercentile(dbz_matched_sub, [25, 75])
if isinstance(centiles, np.ndarray):
IQR = np.append(IQR, centiles[1] - centiles[0])
dbz_matched_sub = np.ma.masked_where(dbz_matched_sub > centiles[1]+1.5*IQR[-1], dbz_matched_sub)
dbz_matched[i, :] = dbz_matched_sub
IQR = np.ma.masked_invalid(IQR)
elif outlier_method=='ldr':
IQR = np.array([])
for i in range(dbz_matched.shape[0]):
dbz_matched_sub = dbz_matched[i, :]
ldr_matched_sub = ldr_matched[i, :]
'''
if len(~dbz_matched_sub.mask)!=len(ldr_matched_sub):
print(dbz_matched_sub)
print(dbz_matched_sub.mask)
print(ldr_matched_sub)
'''
#ldr_matched_sub = ldr_matched_sub[~dbz_matched_sub.mask] # remove masked matched values
#dbz_matched_sub = dbz_matched_sub[~dbz_matched_sub.mask] # remove masked matched values
if len(dbz_matched_sub)==0:
IQR = np.append(IQR, np.nan)
else:
#centiles = np.nanpercentile(dbz_matched_sub, [25, 75])
centiles = np.nanpercentile(dbz_matched_sub.compressed(), [25, 75])
if isinstance(centiles, np.ndarray):
IQR = np.append(IQR, centiles[1] - centiles[0])
if (centiles[1]-centiles[0])>5.: # to impose strict LDR criteria, need to ensure we're truly removing a skin paint
ldr_thresh = -20. if radname=='CRS' else -40. # use lower (more negative) LDR threshold for Ku-band
dbz_matched_sub = np.ma.masked_where(np.ma.masked_where(dbz_matched_sub.mask, ldr_matched_sub)>ldr_thresh, dbz_matched_sub)
dbz_matched[i, :] = dbz_matched_sub
IQR = np.ma.masked_invalid(IQR)
elif outlier_method=='modz':
IQR = np.array([])
for i in range(dbz_matched.shape[0]):
dbz_matched_sub = dbz_matched[i, :]
dbz_matched_sub = dbz_matched_sub[~dbz_matched_sub.mask] # remove masked matched values
if len(dbz_matched_sub)==0:
IQR = np.append(IQR, np.nan)
else:
centiles = np.nanpercentile(dbz_matched_sub, [25, 75])
if isinstance(centiles, np.ndarray):
IQR = np.append(IQR, centiles[1] - centiles[0])
zthresh = 3.5
mad = np.ma.median(np.abs(dbz_matched_sub - np.ma.median(dbz_matched_sub))) # median absolute difference
zscore = 0.6745 * (dbz_matched_sub - np.ma.median(dbz_matched_sub)) / mad # modified z-score
dbz_matched_sub = np.ma.masked_where(zscore>zthresh, dbz_matched_sub)
dbz_matched[i, :] = dbz_matched_sub
IQR = np.ma.masked_invalid(IQR)
elif outlier_method=='w': # spectrum width skin paint detection
#width_thresh = np.percentile(radar_width.compressed(), 5) # compute the 5th percentile to use as a threshold
#print(width_thresh)
IQR = np.array([])
for i in range(dbz_matched.shape[0]):
dbz_matched_sub = dbz_matched[i, :]
#width_matched_sub = width_matched[i, :]
#width_matched_sub = width_matched_sub[~dbz_matched_sub.mask] # remove masked matched values
#dbz_matched_sub = dbz_matched_sub[~dbz_matched_sub.mask] # remove masked matched values
if len(dbz_matched_sub)==0:
IQR = np.append(IQR, np.nan)
else:
#centiles = np.nanpercentile(dbz_matched_sub, [25, 75])
centiles = np.nanpercentile(dbz_matched_sub.compressed(), [25, 75])
if isinstance(centiles, np.ndarray):
IQR = np.append(IQR, centiles[1] - centiles[0])
#dbz_thresh = 25. # CAUTION: only tested on EXRAD
#dbz_matched_sub = np.ma.masked_where(np.ma.masked_where(dbz_matched_sub.mask, width_matched_sub)<width_thresh, dbz_matched_sub)
#dbz_matched_sub = np.ma.masked_where((dbz_matched_sub>=dbz_thresh) & (width_matched_sub<width_thresh), dbz_matched_sub)
#dbz_matched[i, :] = dbz_matched_sub
IQR = np.ma.masked_invalid(IQR)
# dbz2 (HIWRAP only)
if radname=='HIWRAP':
if outlier_method=='iqr':
IQR2 = np.array([])
for i in range(dbz2_matched.shape[0]):
dbz2_matched_sub = dbz2_matched[i, :]
dbz2_matched_sub = dbz2_matched_sub[~dbz2_matched_sub.mask] # remove masked matched values
if len(dbz2_matched_sub)==0:
IQR2 = np.append(IQR2, np.nan)
else: # mask gates where dbz > 1.5*IQR above 75th percentile
centiles = np.nanpercentile(dbz2_matched_sub, [25, 75])
if isinstance(centiles, np.ndarray):
IQR2 = np.append(IQR2, centiles[1] - centiles[0])
dbz2_matched_sub = np.ma.masked_where(dbz2_matched_sub > centiles[1]+1.5*IQR2[-1], dbz2_matched_sub)
dbz2_matched[i, :] = dbz2_matched_sub
IQR2 = np.ma.masked_invalid(IQR2)
elif outlier_method=='ldr':
IQR2 = np.array([])
for i in range(dbz2_matched.shape[0]):
dbz2_matched_sub = dbz2_matched[i, :]
ldr2_matched_sub = ldr2_matched[i, :]
#ldr2_matched_sub = ldr2_matched_sub[~dbz2_matched_sub.mask] # remove masked matched values
#dbz2_matched_sub = dbz2_matched_sub[~dbz2_matched_sub.mask] # remove masked matched values
if len(dbz2_matched_sub)==0:
IQR2 = np.append(IQR2, np.nan)
else:
#centiles = np.nanpercentile(dbz2_matched_sub, [25, 75])
centiles = np.nanpercentile(dbz2_matched_sub.compressed(), [25, 75])
if isinstance(centiles, np.ndarray):
IQR2 = np.append(IQR2, centiles[1] - centiles[0])
if (centiles[1]-centiles[0])>5.: # to impose strict LDR criteria, need to ensure we're truly removing a skin paint
ldr_thresh = -20. # for Ka-band
dbz2_matched_sub = np.ma.masked_where(np.ma.masked_where(dbz2_matched_sub.mask, ldr2_matched_sub)>ldr_thresh, dbz2_matched_sub)
dbz2_matched[i, :] = dbz2_matched_sub
IQR2 = np.ma.masked_invalid(IQR2)
elif outlier_method=='modz':
IQR2 = np.array([])
for i in range(dbz2_matched.shape[0]):
dbz2_matched_sub = dbz2_matched[i, :]
dbz2_matched_sub = dbz2_matched_sub[~dbz2_matched_sub.mask] # remove masked matched values
if len(dbz2_matched_sub)==0:
IQR2 = np.append(IQR2, np.nan)
else:
centiles = np.nanpercentile(dbz2_matched_sub, [25, 75])
if isinstance(centiles, np.ndarray):
IQR2 = np.append(IQR2, centiles[1] - centiles[0])
zthresh = 3.5
mad = np.ma.median(np.abs(dbz2_matched_sub - np.ma.median(dbz2_matched_sub))) # median absolute difference
zscore = 0.6745 * (dbz2_matched_sub - np.ma.median(dbz2_matched_sub)) / mad # modified z-score
dbz2_matched_sub = np.ma.masked_where(zscore>zthresh, dbz2_matched_sub)
dbz2_matched[i, :] = dbz2_matched_sub
IQR2 = np.ma.masked_invalid(IQR2)
elif outlier_method=='w': # spectrum width skin paint detection
#width2_thresh = np.percentile(radar2_width.compressed(), 5) # compute the 5th percentile to use as a threshold
#print(width2_thresh)
IQR2 = np.array([])
for i in range(dbz2_matched.shape[0]):
dbz2_matched_sub = dbz2_matched[i, :]
#width2_matched_sub = width2_matched[i, :]
#width2_matched_sub = width2_matched_sub[~dbz2_matched_sub.mask] # remove masked matched values
#dbz2_matched_sub = dbz2_matched_sub[~dbz2_matched_sub.mask] # remove masked matched values
if len(dbz2_matched_sub)==0:
IQR2 = np.append(IQR2, np.nan)
else:
#centiles = np.nanpercentile(dbz2_matched_sub, [25, 75])
centiles = np.nanpercentile(dbz2_matched_sub.compressed(), [25, 75])
if isinstance(centiles, np.ndarray):
IQR2 = np.append(IQR2, centiles[1] - centiles[0])
#dbz2_thresh = 25. # CAUTION: only tested on EXRAD
#dbz2_matched_sub = np.ma.masked_where(np.ma.masked_where(dbz2_matched_sub.mask, width2_matched_sub)<width2_thresh, dbz2_matched_sub)
#dbz2_matched[i, :] = dbz2_matched_sub
IQR2 = np.ma.masked_invalid(IQR2)
# Barnes-weighted mean and n-gate standard deviation from the mean
# dbz
dbz_matched = np.ma.masked_where(np.isnan(dbz_matched), dbz_matched)
W_d_k = np.ma.array(np.exp(-1 * prdistance**2. / K_d**2.)) # obtain distance weights
W_d_k2 = np.ma.masked_where(np.ma.getmask(dbz_matched), W_d_k.copy()) # mask weights where dbz is masked
w1 = np.ma.sum(W_d_k2 * 10.**(dbz_matched/10.), axis=1) # weighted sum of linear reflectivity (mm^6 m^-3) per matched period
w2 = np.ma.sum(W_d_k2, axis=1) # sum of weights for each matched period (n-sec interval)
dbz_matched_temp = dbz_matched.copy()
dbz_matched = 10. * np.ma.log10(w1 / w2) # matched dbz will now be 1-D array instead of 2 (was nTimes x query_k)
dbz_stdev = np.ma.zeros(dbz_matched.shape[0])
for i in range(dbz_matched_temp.shape[0]):
square_diff = (dbz_matched_temp[i, :] - dbz_matched[i])**2. # squared differences between gates and weighted mean
ssd = np.nansum(square_diff) # sum of squared differences between gates and weighted mean
if np.isnan(ssd):
dbz_stdev[i] = np.nan
else:
num_goodvals = len(dbz_matched_temp[i, :]) - np.sum(np.isnan(square_diff))
dbz_stdev[i] = np.sqrt(ssd / num_goodvals)
dbz_stdev = np.ma.masked_invalid(dbz_stdev)
dbz_matched = np.ma.masked_where(dbz_stdev>5., dbz_matched) # found to be suspected skin paint artifact
# dbz2 (HIWRAP only)
if radname=='HIWRAP':
dbz2_matched = np.ma.masked_where(np.isnan(dbz2_matched), dbz2_matched)
W_d_k = np.ma.array(np.exp(-1* prdistance**2. / K_d**2.)) # obtain distance weights
W_d_k2 = np.ma.masked_where(np.ma.getmask(dbz2_matched), W_d_k.copy()) # mask weights where dbz is masked
w1 = np.ma.sum(W_d_k2 * 10.**(dbz2_matched/10.), axis=1) # weighted sum of linear reflectivity (mm^6 m^-3) per matched period
w2 = np.ma.sum(W_d_k2, axis=1) # sum of weights for each matched period (n-sec interval)
dbz2_matched_temp = dbz2_matched.copy()
dbz2_matched = 10. * np.ma.log10(w1 / w2) # matched dbz will now be 1-D array instead of 2 (was nTimes x query_k)
dbz2_stdev = np.ma.zeros(dbz2_matched.shape[0])
for i in range(dbz2_matched_temp.shape[0]):
square_diff = (dbz2_matched_temp[i, :] - dbz2_matched[i])**2. # squared differences between gates and weighted mean
ssd = np.nansum(square_diff) # sum of squared differences between gates and weighted mean
if np.isnan(ssd):
dbz2_stdev[i] = np.nan
else:
num_goodvals = len(dbz2_matched_temp[i, :]) - np.sum(np.isnan(square_diff))
dbz2_stdev[i] = np.sqrt(ssd / num_goodvals)
dbz2_stdev = np.ma.masked_invalid(dbz2_stdev)
dbz2_matched = np.ma.masked_where(dbz2_stdev>5., dbz2_matched) # found to be suspected skin paint artifact
# Get the current P-3 lat,lon and alt to save in the matched dictionary - maybe add other P-3 vars to this later
time_p3_matched = p3_time
lat_p3_matched = p3_lat
lon_p3_matched = p3_lon
alt_p3_matched = p3_alt
# Compute time difference, using same Barnes weighting technique
p3_time_tile = np.tile(np.reshape(p3_time, (len(p3_time), 1)), (1, query_k))
time_offset_tile = (er2_time[prind1d] - p3_time_tile) / np.timedelta64(1, 's') # [s]
W_d_k = np.ma.array(np.exp(-1 * prdistance**2. / K_d**2.))
W_d_k2 = np.ma.masked_where(np.ma.getmask(time_offset_tile), W_d_k.copy())
w1 = np.ma.sum(W_d_k2 * time_offset_tile, axis=1)
w2 = np.ma.sum(W_d_k2, axis=1)
time_offset_matched = w1 / w2
time_er2_matched = np.array([], dtype='datetime64[ns]')
# print(p3_time.shape, time_offset_matched.shape)
for i in range(len(time_offset_matched)):
# print(p3_time[i], time_offset_matched[i], p3_time[i]+np.timedelta64(int(time_offset_matched[i]), 's'))
time_er2_matched = np.append(time_er2_matched, p3_time[i] + np.timedelta64(int(time_offset_matched[i]), 's'))
# Compute distance between P-3 and ER-2 gates, using same Barnes weighting technique
W_d_k = np.ma.array(np.exp(-1 * prdistance**2. / K_d**2.))
W_d_k2 = np.ma.masked_where(np.ma.getmask(prdistance), W_d_k.copy())
w1 = np.ma.sum(W_d_k2 * prdistance, axis=1)
w2 = np.ma.sum(W_d_k2, axis=1)
dist_er2_matched = w1 / w2
# Compute ER-2 matched latitude, longitude, and altitude, using same Barnes weighting technique
W_d_k = np.ma.array(np.exp(-1 * prdistance**2. / K_d**2.))
W_d_k2 = np.ma.masked_where(np.ma.getmask(er2_y[prind1d]), W_d_k.copy())
w1 = np.ma.sum(W_d_k2 * er2_y[prind1d], axis=1)
w2 = np.ma.sum(W_d_k2, axis=1)
lat_er2_matched = w1 / w2
W_d_k2 = np.ma.masked_where(np.ma.getmask(er2_x[prind1d]), W_d_k.copy())
w1 = np.ma.sum(W_d_k2 * er2_x[prind1d], axis=1)
w2 = np.ma.sum(W_d_k2, axis=1)
lon_er2_matched = w1 / w2
W_d_k2 = np.ma.masked_where(np.ma.getmask(er2_alt[prind1d]), W_d_k.copy())
w1 = np.ma.sum(W_d_k2 * er2_alt[prind1d], axis=1)
w2 = np.ma.sum(W_d_k2, axis=1)
alt_er2_matched = w1 / w2
# Create the dictionaries
match_obj = {}
kdtree = {}
kdtree['prind1d'] = {}; kdtree['prdistance'] = {}; kdtree['query_k'] = {}
kdtree['prind1d']['data'] = prind1d
kdtree['prind1d']['info'] = 'Index in the raveled radar array (after removing masked dbz values) for the matched values'
kdtree['prdistance']['data'] = dist_er2_matched
kdtree['prdistance']['info'] = 'Cartesian distance between the P-3 and matched radar gate (Barnes average if query_k greater than 1) [m]'
kdtree['query_k']['data'] = query_k
kdtree['query_k']['info'] = 'Number of gates that were considered to be matched'
matched = {}
matched['time_p3'] = {}; matched['lat_p3'] = {}; matched['lon_p3'] = {}; matched['alt_p3'] = {}
matched['time_rad'] = {}; matched['lat_rad'] = {}; matched['lon_rad'] = {}; matched['alt_rad'] = {}
matched['dist'] = {}; matched['time_diff'] = {}
matched['time_p3']['data'] = time_p3_matched
matched['time_p3']['info'] = 'Time of the P-3 observation [numpy datetime64]'
matched['lat_p3']['data'] = lat_p3_matched
matched['lat_p3']['info'] = 'Latitude of the P-3 aircraft [deg]'
matched['lon_p3']['data'] = lon_p3_matched
matched['lon_p3']['info'] = 'Longitude of the P-3 aircraft [deg]'
matched['alt_p3']['data'] = alt_p3_matched
matched['alt_p3']['info'] = 'Altitude of the P-3 aircraft [m]'
matched['time_rad']['data'] = time_er2_matched
matched['time_rad']['info'] = 'Time of the matched radar observation [numpy datetime64]'
matched['lat_rad']['data'] = lat_er2_matched
matched['lat_rad']['info'] = 'Latitude of the center of the matched radar gates [deg]'
matched['lon_rad']['data'] = lon_er2_matched
matched['lon_rad']['info'] = 'Longitude of the center of the matched radar gates [deg]'
matched['alt_rad']['data'] = alt_er2_matched
matched['alt_rad']['info'] = 'Altitude of the center of the matched radar gates [m ASL]'
matched['dist']['data'] = dist_er2_matched
matched['dist']['info'] = 'Cartesian distance between the P-3 and matched radar gate (Barnes average if query_k greater than 1) [m]'
matched['time_diff']['data'] = time_offset_matched
matched['time_diff']['info'] = 'Time difference between the matched radar gate and the P-3 observation [s]'
if radname=='CRS': # Potentially add the other radar vars to the dictionary later
matched['dbz_W'] = {}
matched['dbz_W']['data'] = dbz_matched
matched['dbz_W']['info'] = 'CRS W-band equivalent reflectivity factor matched to the P-3 location [dBZ]'
if query_k>1:
if outlier_method is not None:
matched['dbz_W_IQR'] = {}
matched['dbz_W_IQR']['data'] = IQR
matched['dbz_W_IQR']['info'] = 'Interquartile range in reflectivity for n-closest gates, before noise filtering'
matched['dbz_W_std'] = {}
matched['dbz_W_std']['data'] = dbz_stdev
matched['dbz_W_std']['info'] = 'Standard deviation in reflectivity for n-closest gates from the Barnes-weighted mean'
elif radname=='HIWRAP': # Potentially add the other radar vars to the dictionary later
matched['dbz_Ku'] = {}; matched['dbz_Ka'] = {}
matched['dbz_Ku']['data'] = dbz_matched
matched['dbz_Ku']['info'] = 'HIWRAP Ku-band equivalent reflectivity factor matched to the P-3 location [dBZ]'
matched['dbz_Ka']['data'] = dbz2_matched
matched['dbz_Ka']['info'] = 'HIWRAP Ka-band equivalent reflectivity factor matched to the P-3 location [dBZ]'
if query_k>1:
if outlier_method is not None:
matched['dbz_Ku_IQR'] = {}
matched['dbz_Ku_IQR']['data'] = IQR
matched['dbz_Ku_IQR']['info'] = 'Interquartile range in reflectivity for n-closest gates, before noise filtering'
matched['dbz_Ku_std'] = {}
matched['dbz_Ku_std']['data'] = dbz_stdev
matched['dbz_Ku_std']['info'] = 'Standard deviation in reflectivity for n-closest gates from the Barnes-weighted mean'
if outlier_method is not None:
matched['dbz_Ka_IQR'] = {}
matched['dbz_Ka_IQR']['data'] = IQR2
matched['dbz_Ka_IQR']['info'] = 'Interquartile range in reflectivity for n-closest gates, before noise filtering'
matched['dbz_Ka_std'] = {}
matched['dbz_Ka_std']['data'] = dbz2_stdev
matched['dbz_Ka_std']['info'] = 'Standard deviation in reflectivity for n-closest gates from the Barnes-weighted mean'
elif radname=='EXRAD': # Potentially add the other radar vars to the dictionary later
matched['dbz_X'] = {}
matched['dbz_X']['data'] = dbz_matched
matched['dbz_X']['info'] = 'EXRAD nadir-beam X-band equivalent reflectivity factor matched to the P-3 location [dBZ]'
if query_k>1:
matched['dbz_X_IQR'] = {}
matched['dbz_X_IQR']['data'] = IQR
matched['dbz_X_IQR']['info'] = 'Interquartile range in reflectivity for n-closest gates, before noise filtering'
matched['dbz_X_std'] = {}
matched['dbz_X_std']['data'] = dbz_stdev
matched['dbz_X_std']['info'] = 'Standard deviation in reflectivity for n-closest gates from the Barnes-weighted mean'
if return_indices:
matched['prind1d']['data'] = prind1d
matched['prind1d']['info'] = 'Index in the raveled radar array (after removing masked dbz values) for the matched values'
match_obj['kdtree'] = kdtree
match_obj['matched'] = matched
return match_obj
def match_nn(
er2obj, p3obj, Dm_liquid, Dm_solid, Nw, IWC, sphere_size, start_time, end_time,
query_k=1, outlier_method=None, return_indices=False):
'''
Get the matched neural network (NN) radar retrieval data based on the P-3 lat, lon, alt.
Since the NN retrieval can be computationally intensive, Dm_liquid, Dm_solid, Nw, and IWC need to be trimmed inputs.
Inputs:
er2_obj: ER-2 HIWRAP object obtained from the er2read() function
p3_obj: P-3 object obtained from the iwgread() and iwg_avg() functions
Dm_liquid: Retrieved Dm (liquid; mm) trimmed from start/end times
Dm_solid: Retrieved Dm (solid; mm) trimmed from start/end times
Dm_liquid: Retrieved Dm (liquid; mm) trimmed from start/end times
Nw: Retrieved Nw (m**-4) trimmed from start/end times
IWC: Retrieved IWC (g m**-3) trimmed from start/end times
sphere_size: Maximum distance [int in m] allowed in the kdTree search
start_time: Start time [str in YYYY-MM-DDTHH:MM:SS format] to consider in matching routine
end_time: End time [str in YYYY-MM-DDTHH:MM:SS format] to consider in matching routine
query_k: Number of gates (int) considered in the average (1 == use closest)
return_indices: True == returns the matched gates in 1d coords; False == does not
'''
# Load P-3 info and trim if needed
p3_time = p3obj['time']['data']
p3_lat = p3obj['Latitude']['data']
p3_lon = p3obj['Longitude']['data']
p3_alt = p3obj['GPS_Altitude']['data']
start_dt64 = np.datetime64(start_time)
end_dt64 = np.datetime64(end_time)
# Trim radar spatial data (to match NN retrieval data) and turn into 1-D arrays
time_inds = np.where((er2obj['time']>=np.datetime64(start_time)) & (er2obj['time']<=np.datetime64(end_time)))[0]
er2_time = np.ravel(er2obj['time_gate'][:, time_inds])
er2_x = np.ravel(er2obj['lon_gate'][:, time_inds])
er2_y = np.ravel(er2obj['lat_gate'][:, time_inds])
er2_alt = np.ravel(er2obj['alt_gate'][:, time_inds])
# Turn NN retrieval data into 1-D arrays
nn_dm_liq = np.ma.ravel(Dm_liquid[:, :])
nn_dm_sol = np.ma.ravel(Dm_solid[:, :])
nn_nw = np.ma.ravel(Nw[:, :])
nn_iwc = np.ma.ravel(IWC[:, :])
# Remove NN retrieval values/gates where data are masked
# Should be ==nn_*.mask if all vars were properly masked outside func
remove_inds = np.logical_or.reduce((nn_dm_liq.mask, nn_dm_sol.mask, nn_nw.mask, nn_iwc.mask))
nn_dm_liq = nn_dm_liq[~remove_inds]
nn_dm_sol = nn_dm_sol[~remove_inds]
nn_nw = nn_nw[~remove_inds]
nn_iwc = nn_iwc[~remove_inds]
er2_time = er2_time[~remove_inds]
er2_x = er2_x[~remove_inds]
er2_y = er2_y[~remove_inds]
er2_alt = er2_alt[~remove_inds]
# Trim P-3 nav data with +/- 1 min buffer on either side of specified period (since P-3 legs differ from the ER-2)
start_dt64 = start_dt64 - np.timedelta64(1, 'm')
end_dt64 = end_dt64 + np.timedelta64(1, 'm')
time_inds = np.where((p3_time>=start_dt64) & (p3_time<=end_dt64))[0]
if ('time_midpoint' in p3obj.keys()) and (p3_time[time_inds[-1]]==end_dt64): # P-3 data averaged in N-sec intervals...need to remove the last ob in time_inds
time_inds = time_inds[:-1]
p3_time = p3_time[time_inds]
p3_lat = p3_lat[time_inds]
p3_lon = p3_lon[time_inds]
p3_alt = p3_alt[time_inds]
# This section may need to be populated to handle masked P-3 nav data (will assume everything is fine for now)
# Set reference point (currently Albany, NY)
lat_0 = 42.6526
lon_0 = -73.7562
# Define a map projection to calculate cartesian distances
p = Proj(proj='laea', zone=10, ellps='WGS84', lat_0=lat_0, lon_0=lon_0)
# Use a projection to get cartiesian distances between the datasets
er2_x2, er2_y2 = p(er2_x, er2_y)
p3_x2, p3_y2 = p(p3_lon, p3_lat)
# Set kdtree parameters
leafsize = 16
query_eps = 0
query_p = 2
query_distance_upper_bound = sphere_size
query_n_jobs = 1
K_d = sphere_size
# Perform the kdtree search
kdt = cKDTree(list(zip(er2_x2, er2_y2, er2_alt)), leafsize=leafsize)
prdistance, prind1d = kdt.query(list(zip(p3_x2, p3_y2, p3_alt)), k=query_k, eps=query_eps, p=query_p,
distance_upper_bound=query_distance_upper_bound, n_jobs=query_n_jobs)
# Perform the matching routine
if query_k==1: # closest gate approach (more simple)
# Mask matched data that is outside of the defined bounds
bad_inds = np.where(prind1d == nn_dm_liq.shape[0])
if len(bad_inds[0]) > 0:
print('Nearest radar gate was outside distance upper bound...eliminating those instances')
#mask inds and distances that are outside the search area
prind1d[bad_inds] = np.ma.masked
prdistance[bad_inds] = np.ma.masked
# Trim NN retrieval data to only include valid matched values
dm_liq_matched = nn_dm_liq[prind1d]
dm_sol_matched = nn_dm_sol[prind1d]
nw_matched = nn_nw[prind1d]
iwc_matched = nn_iwc[prind1d]
dm_liq_matched = np.ma.masked_where(prind1d == 0, dm_liq_matched)
dm_sol_matched = np.ma.masked_where(prind1d == 0, dm_sol_matched)
nw_matched = np.ma.masked_where(prind1d == 0, nw_matched)
iwc_matched = np.ma.masked_where(prind1d == 0, iwc_matched)
# Get the current P-3 lat,lon and alt to save in the matched dictionary - maybe add other P-3 vars to this later
time_p3_matched = p3_time
lat_p3_matched = p3_lat
lon_p3_matched = p3_lon
alt_p3_matched = p3_alt
# Compute the time difference between matched radar obs and the P-3
time_offset_matched = (er2_time[prind1d] - p3_time) / np.timedelta64(1, 's') # [s]
# Get the current ER-2 nav and radar data to save in the matched dictionary - maybe add other vars to this later
time_er2_matched = er2_time[prind1d]
lat_er2_matched = er2_y[prind1d]
lon_er2_matched = er2_x[prind1d]
alt_er2_matched = er2_alt[prind1d]
dist_er2_matched = prdistance
ind_er2_matched = prind1d # TODO: This will be useful var in Barnes-weighted mean for query_k>1
else: # do a Barnes weighted mean of the NN retrieval gates
# Mask matched data that is outside of the defined bounds
bad_inds = np.where(prind1d == nn_dm_liq.shape[0])
if len(bad_inds[0]) > 0 or len(bad_inds[1]) > 0:
print('Nearest radar gate was outside distance upper bound...eliminating those instances')
#mask inds and distances that are outside the search area
prind1d[bad_inds] = np.ma.masked
prdistance[bad_inds] = np.ma.masked
# Trim NN retrieval data to only include valid matched values
dm_liq_matched = nn_dm_liq[prind1d]
dm_sol_matched = nn_dm_sol[prind1d]
nw_matched = nn_nw[prind1d]
iwc_matched = nn_iwc[prind1d]
dm_liq_matched = np.ma.masked_where(prind1d == 0, dm_liq_matched)
dm_sol_matched = np.ma.masked_where(prind1d == 0, dm_sol_matched)
nw_matched = np.ma.masked_where(prind1d == 0, nw_matched)
iwc_matched = np.ma.masked_where(prind1d == 0, iwc_matched)
# === Barnes-weighted mean and n-gate standard deviation from the mean ===
# dm_liq
dm_liq_matched = np.ma.masked_where(np.isnan(dm_liq_matched), dm_liq_matched)
W_d_k = np.ma.array(np.exp(-1 * prdistance**2. / K_d**2.)) # obtain distance weights
W_d_k2 = np.ma.masked_where(np.ma.getmask(dm_liq_matched), W_d_k.copy()) # mask weights where dm is masked
w1 = np.ma.sum(W_d_k2 * dm_liq_matched, axis=1) # weighted sum of dm per matched period
w2 = np.ma.sum(W_d_k2, axis=1) # sum of weights for each matched period (n-sec interval)
dm_liq_matched_temp = dm_liq_matched.copy()
dm_liq_matched = w1 / w2 # matched dm will now be 1-D array instead of 2 (was nTimes x query_k)
dm_liq_stdev = np.ma.zeros(dm_liq_matched.shape[0])
for i in range(dm_liq_matched_temp.shape[0]):
square_diff = (dm_liq_matched_temp[i, :] - dm_liq_matched[i])**2. # squared differences between gates and weighted mean
ssd = np.nansum(square_diff) # sum of squared differences between gates and weighted mean
if np.isnan(ssd):
dm_liq_stdev[i] = np.nan
else:
num_goodvals = len(dm_liq_matched_temp[i, :]) - np.sum(np.isnan(square_diff))
dm_liq_stdev[i] = np.sqrt(ssd / num_goodvals)
dm_liq_stdev = np.ma.masked_invalid(dm_liq_stdev)
#dm_liq_matched = np.ma.masked_where(dm_liq_stdev>5., dm_liq_matched) # found to be suspected skin paint artifact
# dm_sol
dm_sol_matched = np.ma.masked_where(np.isnan(dm_sol_matched), dm_sol_matched)
W_d_k = np.ma.array(np.exp(-1 * prdistance**2. / K_d**2.)) # obtain distance weights
W_d_k2 = np.ma.masked_where(np.ma.getmask(dm_sol_matched), W_d_k.copy()) # mask weights where dm is masked
w1 = np.ma.sum(W_d_k2 * dm_sol_matched, axis=1) # weighted sum of dm per matched period
w2 = np.ma.sum(W_d_k2, axis=1) # sum of weights for each matched period (n-sec interval)
dm_sol_matched_temp = dm_sol_matched.copy()
dm_sol_matched = w1 / w2 # matched dm will now be 1-D array instead of 2 (was nTimes x query_k)
dm_sol_stdev = np.ma.zeros(dm_sol_matched.shape[0])
for i in range(dm_sol_matched_temp.shape[0]):
square_diff = (dm_sol_matched_temp[i, :] - dm_sol_matched[i])**2. # squared differences between gates and weighted mean
ssd = np.nansum(square_diff) # sum of squared differences between gates and weighted mean
if np.isnan(ssd):
dm_sol_stdev[i] = np.nan
else:
num_goodvals = len(dm_sol_matched_temp[i, :]) - np.sum(np.isnan(square_diff))
dm_sol_stdev[i] = np.sqrt(ssd / num_goodvals)
dm_sol_stdev = np.ma.masked_invalid(dm_sol_stdev)
#dm_sol_matched = np.ma.masked_where(dm_sol_stdev>5., dm_sol_matched) # found to be suspected skin paint artifact
# nw
nw_matched = np.ma.masked_where(np.isnan(nw_matched), nw_matched)
W_d_k = np.ma.array(np.exp(-1 * prdistance**2. / K_d**2.)) # obtain distance weights
W_d_k2 = np.ma.masked_where(np.ma.getmask(nw_matched), W_d_k.copy()) # mask weights where dm is masked
w1 = np.ma.sum(W_d_k2 * nw_matched, axis=1) # weighted sum of nw per matched period
w2 = np.ma.sum(W_d_k2, axis=1) # sum of weights for each matched period (n-sec interval)
nw_matched_temp = nw_matched.copy()
nw_matched = w1 / w2 # matched nw will now be 1-D array instead of 2 (was nTimes x query_k)
nw_stdev = np.ma.zeros(nw_matched.shape[0])
for i in range(nw_matched_temp.shape[0]):
square_diff = (nw_matched_temp[i, :] - nw_matched[i])**2. # squared differences between gates and weighted mean
ssd = np.nansum(square_diff) # sum of squared differences between gates and weighted mean
if np.isnan(ssd):
nw_stdev[i] = np.nan
else:
num_goodvals = len(nw_matched_temp[i, :]) - np.sum(np.isnan(square_diff))
nw_stdev[i] = np.sqrt(ssd / num_goodvals)
nw_stdev = np.ma.masked_invalid(nw_stdev)
#nw_matched = np.ma.masked_where(nw_stdev>5., nw_matched) # found to be suspected skin paint artifact
# iwc
iwc_matched = np.ma.masked_where(np.isnan(iwc_matched), iwc_matched)
W_d_k = np.ma.array(np.exp(-1 * prdistance**2. / K_d**2.)) # obtain distance weights
W_d_k2 = np.ma.masked_where(np.ma.getmask(iwc_matched), W_d_k.copy()) # mask weights where dm is masked
w1 = np.ma.sum(W_d_k2 * iwc_matched, axis=1) # weighted sum of dm per matched period
w2 = np.ma.sum(W_d_k2, axis=1) # sum of weights for each matched period (n-sec interval)
iwc_matched_temp = iwc_matched.copy()
iwc_matched = w1 / w2 # matched dm will now be 1-D array instead of 2 (was nTimes x query_k)
iwc_stdev = np.ma.zeros(iwc_matched.shape[0])
for i in range(iwc_matched_temp.shape[0]):
square_diff = (iwc_matched_temp[i, :] - iwc_matched[i])**2. # squared differences between gates and weighted mean
ssd = np.nansum(square_diff) # sum of squared differences between gates and weighted mean
if np.isnan(ssd):
iwc_stdev[i] = np.nan
else:
num_goodvals = len(iwc_matched_temp[i, :]) - np.sum(np.isnan(square_diff))
iwc_stdev[i] = np.sqrt(ssd / num_goodvals)
iwc_stdev = np.ma.masked_invalid(iwc_stdev)
#iwc_matched = np.ma.masked_where(iwc_stdev>5., iwc_matched) # found to be suspected skin paint artifact
# Get the current P-3 lat,lon and alt to save in the matched dictionary - maybe add other P-3 vars to this later
time_p3_matched = p3_time
lat_p3_matched = p3_lat
lon_p3_matched = p3_lon
alt_p3_matched = p3_alt
# Compute time difference, using same Barnes weighting technique
p3_time_tile = np.tile(np.reshape(p3_time, (len(p3_time), 1)), (1, query_k))
time_offset_tile = (er2_time[prind1d] - p3_time_tile) / np.timedelta64(1, 's') # [s]
W_d_k = np.ma.array(np.exp(-1 * prdistance**2. / K_d**2.))
W_d_k2 = np.ma.masked_where(np.ma.getmask(time_offset_tile), W_d_k.copy())
w1 = np.ma.sum(W_d_k2 * time_offset_tile, axis=1)
w2 = np.ma.sum(W_d_k2, axis=1)
time_offset_matched = w1 / w2
time_er2_matched = np.array([], dtype='datetime64[ns]')
# print(p3_time.shape, time_offset_matched.shape)
for i in range(len(time_offset_matched)):
# print(p3_time[i], time_offset_matched[i], p3_time[i]+np.timedelta64(int(time_offset_matched[i]), 's'))
time_er2_matched = np.append(time_er2_matched, p3_time[i] + np.timedelta64(int(time_offset_matched[i]), 's'))
# Compute distance between P-3 and ER-2 gates, using same Barnes weighting technique
W_d_k = np.ma.array(np.exp(-1 * prdistance**2. / K_d**2.))
W_d_k2 = np.ma.masked_where(np.ma.getmask(prdistance), W_d_k.copy())
w1 = np.ma.sum(W_d_k2 * prdistance, axis=1)
w2 = np.ma.sum(W_d_k2, axis=1)
dist_er2_matched = w1 / w2
# Compute ER-2 matched latitude, longitude, and altitude, using same Barnes weighting technique
W_d_k = np.ma.array(np.exp(-1 * prdistance**2. / K_d**2.))
W_d_k2 = np.ma.masked_where(np.ma.getmask(er2_y[prind1d]), W_d_k.copy())
w1 = np.ma.sum(W_d_k2 * er2_y[prind1d], axis=1)
w2 = np.ma.sum(W_d_k2, axis=1)
lat_er2_matched = w1 / w2
W_d_k2 = np.ma.masked_where(np.ma.getmask(er2_x[prind1d]), W_d_k.copy())
w1 = np.ma.sum(W_d_k2 * er2_x[prind1d], axis=1)
w2 = np.ma.sum(W_d_k2, axis=1)
lon_er2_matched = w1 / w2
W_d_k2 = np.ma.masked_where(np.ma.getmask(er2_alt[prind1d]), W_d_k.copy())
w1 = np.ma.sum(W_d_k2 * er2_alt[prind1d], axis=1)
w2 = np.ma.sum(W_d_k2, axis=1)
alt_er2_matched = w1 / w2
# Create the dictionaries
match_obj = {}
kdtree = {}
kdtree['prind1d'] = {}; kdtree['prdistance'] = {}; kdtree['query_k'] = {}
kdtree['prind1d']['data'] = prind1d
kdtree['prind1d']['info'] = 'Index in the raveled radar array (after removing masked dbz values) for the matched values'
kdtree['prdistance']['data'] = dist_er2_matched
kdtree['prdistance']['info'] = 'Cartesian distance between the P-3 and matched radar gate (Barnes average if query_k greater than 1) [m]'
kdtree['query_k']['data'] = query_k
kdtree['query_k']['info'] = 'Number of gates that were considered to be matched'
matched = {}
matched['time_p3'] = {}; matched['lat_p3'] = {}; matched['lon_p3'] = {}; matched['alt_p3'] = {}
matched['time_rad'] = {}; matched['lat_rad'] = {}; matched['lon_rad'] = {}; matched['alt_rad'] = {}
matched['dm_liq'] = {}; matched['dm_sol'] = {}; matched['nw'] = {}; matched['iwc'] = {}
matched['dist'] = {}; matched['time_diff'] = {}
matched['time_p3']['data'] = time_p3_matched
matched['time_p3']['info'] = 'Time of the P-3 observation [numpy datetime64]'
matched['lat_p3']['data'] = lat_p3_matched
matched['lat_p3']['info'] = 'Latitude of the P-3 aircraft [deg]'
matched['lon_p3']['data'] = lon_p3_matched
matched['lon_p3']['info'] = 'Longitude of the P-3 aircraft [deg]'
matched['alt_p3']['data'] = alt_p3_matched
matched['alt_p3']['info'] = 'Altitude of the P-3 aircraft [m]'
matched['time_rad']['data'] = time_er2_matched
matched['time_rad']['info'] = 'Time of the matched radar observation [numpy datetime64]'
matched['lat_rad']['data'] = lat_er2_matched
matched['lat_rad']['info'] = 'Latitude of the center of the matched radar gates [deg]'
matched['lon_rad']['data'] = lon_er2_matched
matched['lon_rad']['info'] = 'Longitude of the center of the matched radar gates [deg]'
matched['alt_rad']['data'] = alt_er2_matched
matched['alt_rad']['info'] = 'Altitude of the center of the matched radar gates [m ASL]'
matched['dist']['data'] = dist_er2_matched
matched['dist']['info'] = 'Cartesian distance between the P-3 and matched radar gate (Barnes average if query_k greater than 1) [m]'
matched['time_diff']['data'] = time_offset_matched
matched['time_diff']['info'] = 'Time difference between the matched radar gate and the P-3 observation [s]'
matched['dm_liq']['data'] = dm_liq_matched
matched['dm_liq']['info'] = 'Retrieved liquid equivalent mass-weighted mean diameter [mm]'
matched['dm_sol']['data'] = dm_sol_matched
matched['dm_sol']['info'] = 'Retrieved solid/ice phase mass-weighted mean diameter [mm]'
matched['nw']['data'] = np.ma.log10(nw_matched)
matched['nw']['info'] = 'Retrieved liquid equivalent normalized intercept parameter [log10(m**-4)]'
matched['iwc']['data'] = iwc_matched
matched['iwc']['info'] = 'Retrieved ice water content [g m**-3]'
if query_k>1:
matched['dm_liq_stdev'] = {}; matched['dm_sol_stdev'] = {}; matched['nw_stdev'] = {}; matched['iwc_stdev'] = {}
matched['dm_liq_stdev']['data'] = dm_liq_stdev
matched['dm_liq_stdev']['info'] = 'Standard deviation in Dm_liquid for n-closest gates from the Barnes-weighted mean [mm]'
matched['dm_sol_stdev']['data'] = dm_sol_stdev
matched['dm_sol_stdev']['info'] = 'Standard deviation in Dm_solid for n-closest gates from the Barnes-weighted mean [mm]'
matched['nw_stdev']['data'] = nw_stdev
matched['nw_stdev']['info'] = 'Standard deviation in Nw for n-closest gates from the Barnes-weighted mean [m**-4]'
matched['iwc_stdev']['data'] = iwc_stdev
matched['iwc_stdev']['info'] = 'Standard deviation in IWC for n-closest gates from the Barnes-weighted mean [g m**-3]'
if return_indices:
matched['prind1d']['data'] = prind1d
matched['prind1d']['info'] = 'Index in the raveled radar array (after removing masked dbz values) for the matched values'
match_obj['kdtree'] = kdtree
match_obj['matched'] = matched
return match_obj
```
#### File: joefinlon/Finlon_et_al_2021_DFR/p3.py
```python
import xarray as xr
import numpy as np
np.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)
np.warnings.filterwarnings('ignore', message='Mean of empty slice')
np.seterr(invalid='ignore')
from datetime import datetime, timedelta
from scipy.optimize import least_squares
try: # try importing the pytmatrix package
from forward import *
except ImportError:
print(
'WARNING: The pytmatrix package cannot be installed for the psdread() function.'
)
def psdread(
twodsfile, hvpsfile, datestr, size_cutoff=1., minD=0.15, maxD=30.,
qc=False, deadtime_thresh=0.6, verbose=True,
start_time=None, end_time=None, tres=5.,
compute_bulk=False, compute_fits=False, Z_interp=False,
matchedZ_W=None, matchedZ_Ka=None, matchedZ_Ku=None, matchedZ_X=None):
'''
Load the 2DS and HVPS PSDs processed by UIOOPS and create n-second combined PSDs with optional bulk properties.
Inputs:
twodsfile: Path to the 2DS data
hvpsfile: Path to the HVPS data
datestr: YYYYMMDD [str]
size_cutoff: Size [mm] for the 2DS-HVPS crossover
minD: Minimum size [mm] to consider in the combined PSD
maxD: Maximum size [mm] to consider in the combined PSD
qc: Boolean to optionally ignore 1-Hz data from averaging when probe dead time > deadtime_thresh
deadtime_thresh: Deadtime hreshold [0–1] to ignore 1-Hz data when qc is True
verbose: Boolean to optionally print all data-related warnings (e.g., high probe dead time)
start_time: Start time [YYYY-MM-DDTHH:MM:SS as str] to consider for the PSDs (optional)
end_time: End time [YYYY-MM-DDTHH:MM:SS as str] to consider for the PSDs (optional)
tres: Averaging interval [s]; tres=1. skips averaging routine
compute_bulk: Boolean to optionally compute bulk statistics such as N, IWC, Dmm, rho_e
compute_fits: Boolean to optionally compute gamma fit parameters N0, mu, lambda
Z_interp: Boolean to optionally simulate Z for additional degrees of riming from Leinonen & Szyrmer (2015; LS15)
matchedZ_Ka, ...: None (skips minimization) or masked array of matched Z values to perform LS15 m-D minimization
'''
p3psd = {}
if (twodsfile is None) and (hvpsfile is not None):
print('Only using the HVPS data for {}'.format(datestr))
size_cutoff = 0.4 # start HVPS PSD at 0.4 mm
elif (hvpsfile is None) and (twodsfile is not None):
print('Only using the 2DS data for {}'.format(datestr))
size_cutoff = 3.2 # end 2DS PSD at 3.2 mm
elif (twodsfile is None) and (hvpsfile is None):
print('No input files given...exiting')
exit()
# 2DS information
if twodsfile is not None:
ds1 = xr.open_dataset(twodsfile)
time_raw = ds1['time'].values # HHMMSS from flight start date or numpy.datetime64
if np.issubdtype(time_raw.dtype, np.datetime64): # numpy.datetime64
time = np.array(time_raw, dtype='datetime64[s]')
else: # native HHMMSS format (from UIOOPS SD file)
time_dt = [
datetime(int(datestr[0:4]), int(datestr[4:6]), int(datestr[6:]))
+ timedelta(
hours=int(str(int(time_raw[i])).zfill(6)[0:2]),
minutes=int(str(int(time_raw[i])).zfill(6)[2:4]),
seconds=int(str(int(time_raw[i])).zfill(6)[4:]))
for i in range(len(time_hhmmss))
]
time_str = [
datetime.strftime(time_dt[i], '%Y-%m-%dT%H:%M:%S')
for i in range(len(time_dt))
]
time = np.array(time_str, dtype='datetime64[s]')
bin_min_2ds = ds1['bin_min'].values # mm
bin_max_2ds = ds1['bin_max'].values
bin_inds = np.where((bin_min_2ds>=minD) & (bin_max_2ds<=size_cutoff))[0] # find bins within user-specified range
bin_min_2ds = bin_min_2ds[bin_inds]; bin_max_2ds = bin_max_2ds[bin_inds]
bin_width_2ds = ds1['bin_dD'].values[bin_inds] / 10. # cm
bin_mid_2ds = bin_min_2ds + (bin_width_2ds * 10.) / 2.
count_2ds = ds1['count'].values[:, bin_inds]
sv_2ds = ds1['sample_vol'].values[:, bin_inds] # cm^3
count_hab_2ds = ds1['habitsd'].values[:, bin_inds, :] * np.tile(np.reshape(sv_2ds, (sv_2ds.shape[0], sv_2ds.shape[1], 1)), (1, 1, 10)) * np.tile(
np.reshape(bin_width_2ds, (1, len(bin_width_2ds), 1)), (sv_2ds.shape[0], 1, 10))
ar_2ds = ds1['mean_area_ratio'].values[:, bin_inds] # mean area ratio (circular fit) per bin
asr_2ds = ds1['mean_aspect_ratio_ellipse'].values[:, bin_inds] # mean aspect ratio (elliptical fit) per bin
activetime_2ds = ds1['sum_IntArr'].values # s
if hvpsfile is None:
count = count_2ds; count_hab = count_hab_2ds; sv = sv_2ds; ar = ar_2ds; asr = asr_2ds; activetime_hvps = np.ones(count.shape[0])
bin_min = bin_min_2ds; bin_mid = bin_mid_2ds; bin_max = bin_max_2ds; bin_width = bin_width_2ds
# HVPS information
if hvpsfile is not None:
ds2 = xr.open_dataset(hvpsfile)
bin_min_hvps = ds2['bin_min'].values # mm
bin_max_hvps = ds2['bin_max'].values
bin_inds = np.where((bin_min_hvps>=size_cutoff) & (bin_max_hvps<=maxD))[0] # find bins within user-specified range
bin_min_hvps = bin_min_hvps[bin_inds]; bin_max_hvps = bin_max_hvps[bin_inds]
bin_width_hvps = ds2['bin_dD'].values[bin_inds] / 10. # cm
if size_cutoff==2.:
bin_min_hvps = np.insert(bin_min_hvps, 0, 2.); bin_max_hvps = np.insert(bin_max_hvps, 0, 2.2); bin_width_hvps = np.insert(bin_width_hvps, 0, 0.02)
bin_inds = np.insert(bin_inds, 0, bin_inds[0]-1)
bin_mid_hvps = bin_min_hvps + (bin_width_hvps * 10.) / 2.
count_hvps = ds2['count'].values[:, bin_inds]
sv_hvps = ds2['sample_vol'].values[:, bin_inds] # cm^3
count_hab_hvps = (ds2['habitsd'].values[:, bin_inds, :]) * np.tile(np.reshape(sv_hvps, (sv_hvps.shape[0], sv_hvps.shape[1], 1)), (1, 1, 10)) * np.tile(
np.reshape(bin_width_hvps, (1, len(bin_width_hvps), 1)), (sv_hvps.shape[0], 1, 10))
ar_hvps = ds2['mean_area_ratio'].values[:, bin_inds] # mean area ratio (circular fit) per bin
asr_hvps = ds2['mean_aspect_ratio_ellipse'].values[:, bin_inds] # mean aspect ratio (elliptical fit) per bin
activetime_hvps = ds2['sum_IntArr'].values # s
if size_cutoff==2.: # normalize counts in first bin (1.8-2.2 mm, now only for 2-2.2 mm)
count_hvps[:, 0] = count_hvps[:, 0] / 2.
count_hab_hvps[:, 0, :] = count_hab_hvps[:, 0, :] / 2.
if twodsfile is None:
time_hhmmss = ds2['time'].values # HHMMSS from flight start date
time_dt = [datetime(int(datestr[0:4]), int(datestr[4:6]), int(datestr[6:])) + timedelta(
hours=int(str(int(time_hhmmss[i])).zfill(6)[0:2]), minutes=int(str(int(time_hhmmss[i])).zfill(6)[2:4]),
seconds=int(str(int(time_hhmmss[i])).zfill(6)[4:])) for i in range(len(time_hhmmss))]
time_str = [datetime.strftime(time_dt[i], '%Y-%m-%dT%H:%M:%S') for i in range(len(time_dt))]
time = np.array(time_str, dtype='datetime64[s]')
count = count_hvps; count_hab = count_hab_hvps; sv = sv_hvps; ar = ar_hvps; asr = asr_hvps; activetime_2ds = np.ones(count.shape[0])
bin_min = bin_min_hvps; bin_mid = bin_mid_hvps; bin_max = bin_max_hvps; bin_width = bin_width_hvps
# Combine the datasets
if (twodsfile is not None) and (hvpsfile is not None):
count = np.concatenate((count_2ds, count_hvps), axis=1)
count_hab = np.concatenate((count_hab_2ds, count_hab_hvps), axis=1)
sv = np.concatenate((sv_2ds, sv_hvps), axis=1)
ar = np.concatenate((ar_2ds, ar_hvps), axis=1)
asr = np.concatenate((asr_2ds, asr_hvps), axis=1)
bin_min = np.concatenate((bin_min_2ds, bin_min_hvps))
bin_mid = np.concatenate((bin_mid_2ds, bin_mid_hvps))
bin_max = np.concatenate((bin_max_2ds, bin_max_hvps))
bin_width = np.concatenate((bin_width_2ds, bin_width_hvps))
# Average the data
if start_time is None:
start_dt64 = time[0]
else:
start_dt64 = np.datetime64(start_time)
if end_time is None:
end_dt64 = time[-1] if int(tres)>1 else time[-1]+np.timedelta64(1, 's')
else:
end_dt64 = np.datetime64(end_time) if int(tres)>1 else np.datetime64(end_time)+np.timedelta64(1, 's')
dur = (end_dt64 - start_dt64) / np.timedelta64(1, 's') # dataset duration to consider [s]
# Allocate arrays
count_aver = np.zeros((int(dur/tres), len(bin_mid)))
count_hab_aver = np.zeros((int(dur/tres), len(bin_mid), 8))
sv_aver = np.zeros((int(dur/tres), len(bin_mid)))
at_2ds_aver = np.ma.array(np.ones(int(dur/tres)), mask=False)
at_hvps_aver = np.ma.array(np.ones(int(dur/tres)), mask=False)
ND = np.zeros((int(dur/tres), len(bin_mid)))
ar_aver = np.zeros((int(dur/tres), len(bin_mid)))
asr_aver = np.zeros((int(dur/tres), len(bin_mid)))
time_subset = start_dt64 # allocate time array of N-sec interval obs
curr_time = start_dt64
i = 0
while curr_time+np.timedelta64(int(tres),'s')<=end_dt64:
if curr_time>start_dt64:
time_subset = np.append(time_subset, curr_time)
time_inds = np.where((time>=curr_time) & (time<curr_time+np.timedelta64(int(tres), 's')))[0]
if qc is True:
activetime_thresh = 1. - deadtime_thresh
time_inds = time_inds[(activetime_2ds[time_inds]>=activetime_thresh) & (activetime_hvps[time_inds]>=activetime_thresh)]
if len(time_inds)>0:
count_aver[i, :] = np.nansum(count[time_inds, :], axis=0)
count_hab_aver[i, :, 0] = np.nansum(count_hab[time_inds, :, 3], axis=0) # tiny
count_hab_aver[i, :, 1] = np.nansum(count_hab[time_inds, :, 0], axis=0) # spherical
count_hab_aver[i, :, 2] = np.nansum(count_hab[time_inds, :, 1:3], axis=(0, 2)) # oriented + linear
count_hab_aver[i, :, 3] = np.nansum(count_hab[time_inds, :, 4], axis=0) # hexagonal
count_hab_aver[i, :, 4] = np.nansum(count_hab[time_inds, :, 5], axis=0) # irregular
count_hab_aver[i, :, 5] = np.nansum(count_hab[time_inds, :, 6], axis=0) # graupel
count_hab_aver[i, :, 6] = np.nansum(count_hab[time_inds, :, 7], axis=0) # dendrite
count_hab_aver[i, :, 7] = np.nansum(count_hab[time_inds, :, 8], axis=0) # aggregate
ar_aver[i, :] = np.nanmean(ar[time_inds, :], axis=0) # binned mean of area ratio
asr_aver[i, :] = np.nanmean(asr[time_inds, :], axis=0) # binned mean of aspect ratio
sv_aver[i, :] = np.nansum(sv[time_inds, :], axis=0)
at_2ds_aver[i] = np.nansum(activetime_2ds[time_inds]) / len(time_inds)
at_hvps_aver[i] = np.nansum(activetime_hvps[time_inds]) / len(time_inds)
ND[i, :] = np.nanmean(count[time_inds, :]/sv[time_inds, :], axis=0) / bin_width # take N(D) for each sec, then average [cm**-4]
else: # Mask data for current period if dead (active) time from either probe > 0.8*tres (< 0.2*tres) for all 1-Hz times
if verbose is True:
print('All 1-Hz data for the {}-s period beginning {} has high dead time. Masking data.'.format(str(tres), np.datetime_as_string(curr_time)))
at_2ds_aver[i] = np.nansum(activetime_2ds[np.where((time>=curr_time) & (time<curr_time+np.timedelta64(int(tres), 's')))[0]]) / tres; at_2ds_aver.mask[i] = True
at_hvps_aver[i] = np.nansum(activetime_hvps[np.where((time>=curr_time) & (time<curr_time+np.timedelta64(int(tres), 's')))[0]]) / tres; at_hvps_aver.mask[i] = True
count_aver[i, :] = np.nan; count_hab_aver[i, :] = np.nan; sv_aver[i, :] = np.nan; ND[i, :] = np.nan; asr_aver[i, :] = np.nan
i += 1
curr_time += np.timedelta64(int(tres), 's')
#ND = np.ma.masked_invalid(count_aver / sv_aver / np.tile(bin_width[np.newaxis, :], (int(dur/tres), 1))) # cm^-4
# Mask arrays
count_aver = np.ma.masked_where(np.isnan(count_aver), count_aver)
count_hab_aver = np.ma.masked_where(np.isnan(count_hab_aver), count_hab_aver)
sv_aver = np.ma.masked_where(np.isnan(sv_aver), sv_aver)
ar_aver = np.ma.masked_invalid(ar_aver)
asr_aver = np.ma.masked_invalid(asr_aver)
ND[~np.isfinite(ND)] = 0.; ND = np.ma.masked_where(ND==0., ND)
# Create dictionary
p3psd['time'] = time_subset
p3psd['count'] = count_aver
p3psd['count_habit'] = count_hab_aver
p3psd['sv'] = sv_aver
p3psd['area_ratio'] = ar_aver
p3psd['aspect_ratio'] = asr_aver
p3psd['ND'] = ND
p3psd['bin_min'] = bin_min
p3psd['bin_mid'] = bin_mid
p3psd['bin_max'] = bin_max
p3psd['bin_width'] = bin_width
p3psd['active_time_2ds'] = at_2ds_aver
p3psd['active_time_hvps'] = at_hvps_aver
if compute_bulk is True:
# Compute Z for various degrees of riming and radar wavelengths
# Based on work from <NAME> Szyrmer 2015 (LS15)
# (https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1002/2015EA000102)
# Follows https://github.com/dopplerchase/Leinonen_Python_Forward_Model
# and uses forward.py and ess238-sup-0002-supinfo.tex in repo
Z = forward_Z() #initialize class
# get the PSD in the format to use in the routine (mks units)
Z.set_PSD(PSD=ND*10.**8, D=bin_mid/1000., dD=bin_width/100., Z_interp=Z_interp)
Z.load_split_L15() # Load the leinonen output
Z.fit_sigmas(Z_interp) # Fit the backscatter cross-sections
Z.fit_rimefrac(Z_interp) # Fit the riming fractions
Z.calc_Z() # Calculate Z...outputs are Z.Z_x, Z.Z_ku, Z.Z_ka, Z.Z_w for the four radar wavelengths
# Compute IWC and Dmm following Brown and Francis (1995), modified for a Dmax definition following Hogan et al.
[
N0_bf, N0_hy, mu_bf, mu_hy, lam_bf, lam_hy, iwc_bf, iwc_hy, iwc_hab,
asr_nw, asr_bf, asr_hy, asr_hab, dmm_bf, dmm_hy, dmm_hab, dm_bf, dm_hy,
dm_hab, rho_bf, rho_hy, rho_hab, rhoe_bf, rhoe_hy, rhoe_hab] = calc_bulk(
count_aver, count_hab_aver, sv_aver, asr_aver, bin_mid, bin_width)
# Add bulk variables to the dictionary
if Z_interp is True: # consider additional degrees of riming from LS15
p3psd['riming_mass_array'] = [
0., 0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.75, 1., 2.]
else:
p3psd['riming_mass_array'] = [0., 0.1, 0.2, 0.5, 1., 2.]
p3psd['a_coeff_array'] = Z.a_coeff
p3psd['b_coeff_array'] = Z.b_coeff
p3psd['dbz_W'] = Z.Z_w
p3psd['dbz_Ka'] = Z.Z_ka
p3psd['dbz_Ku'] = Z.Z_ku
p3psd['dbz_X'] = Z.Z_x
p3psd['N0_bf'] = N0_bf
p3psd['N0_hy'] = N0_hy
p3psd['mu_bf'] = mu_bf
p3psd['mu_hy'] = mu_hy
p3psd['lambda_bf'] = lam_bf
p3psd['lambda_hy'] = lam_hy
p3psd['iwc_bf'] = iwc_bf
p3psd['iwc_hy'] = iwc_hy
p3psd['iwc_hab'] = iwc_hab
p3psd['mean_aspect_ratio'] = asr_nw
p3psd['mean_aspect_ratio_bf'] = asr_bf
p3psd['mean_aspect_ratio_hy'] = asr_hy
p3psd['mean_aspect_ratio_habit'] = asr_hab
p3psd['dmm_bf'] = dmm_bf
p3psd['dmm_hy'] = dmm_hy
p3psd['dmm_hab'] = dmm_hab
p3psd['dm_bf'] = dm_bf
p3psd['dm_hy'] = dm_hy
p3psd['dm_hab'] = dm_hab
p3psd['eff_density_bf'] = rhoe_bf
p3psd['eff_density_hy'] = rhoe_hy
p3psd['eff_density_hab'] = rhoe_hab
p3psd['density_bf'] = rho_bf
p3psd['density_hy'] = rho_hy
p3psd['density_hab'] = rho_hab
# Optionally constrain the matched Z at Ku- and Ka-band against PSDS to estimate bulk properties
if (
matchedZ_W is not None) or (matchedZ_Ka is not None) or (
matchedZ_Ku is not None) or (matchedZ_X is not None):
p3psd = calc_riming(
p3psd, Z, matchedZ_W, matchedZ_Ka, matchedZ_Ku, matchedZ_X,
compute_fits=compute_fits)
return p3psd
def calc_bulk(particle_count, habit_count, sample_vol, aspect_ratio, bin_mid, bin_width):
x0 = [1.e-1, -1., 5.] # initial guess for N0 [cm**-4], mu, lambda [cm**-1]
# allocate arrays
N0_bf = np.zeros(particle_count.shape[0])
N0_hy = np.zeros(particle_count.shape[0])
mu_bf = np.zeros(particle_count.shape[0])
mu_hy = np.zeros(particle_count.shape[0])
lam_bf = np.zeros(particle_count.shape[0])
lam_hy = np.zeros(particle_count.shape[0])
iwc_bf = np.zeros(particle_count.shape[0])
iwc_hy = np.zeros(particle_count.shape[0])
iwc_hab = np.zeros(particle_count.shape[0])
asr_nw = np.zeros(particle_count.shape[0])
asr_bf = np.zeros(particle_count.shape[0])
asr_hy = np.zeros(particle_count.shape[0])
asr_hab = np.zeros(particle_count.shape[0])
dmm_bf = np.zeros(particle_count.shape[0])
dmm_hy = np.zeros(particle_count.shape[0])
dmm_hab = np.zeros(particle_count.shape[0])
dm_bf = np.zeros(particle_count.shape[0])
dm_hy = np.zeros(particle_count.shape[0])
dm_hab = np.zeros(particle_count.shape[0])
rhoe_bf = np.zeros(particle_count.shape[0])
rhoe_hy = np.zeros(particle_count.shape[0])
rhoe_hab = np.zeros(particle_count.shape[0])
rho_bf = np.zeros((particle_count.shape[0], particle_count.shape[1]))
rho_hy = np.zeros((particle_count.shape[0], particle_count.shape[1]))
rho_hab = np.zeros((particle_count.shape[0], particle_count.shape[1]))
# compute particle habit mass outside loop for speed
a_coeff = np.array([1.96e-3, 1.96e-3, 1.666e-3, 7.39e-3, 1.96e-3, 4.9e-2, 5.16e-4, 1.96e-3])
a_tile = np.tile(np.reshape(a_coeff, (1, len(a_coeff))), (habit_count.shape[1], 1))
b_coeff = np.array([1.9, 1.9, 1.91, 2.45, 1.9, 2.8, 1.8, 1.9])
b_tile = np.tile(np.reshape(b_coeff, (1, len(b_coeff))), (habit_count.shape[1], 1))
D_tile = np.tile(np.reshape(bin_mid, (len(bin_mid), 1)), (1, habit_count.shape[2]))
mass_tile = a_tile * (D_tile/10.) ** b_tile
for time_ind in range(particle_count.shape[0]):
if particle_count[time_ind, :].count()==particle_count.shape[1]: # time period is not masked...continue on
Nt = 1000.*np.nansum(particle_count[time_ind, :]/sample_vol[time_ind, :]) # number concentratino [L**-1]
# spherical volume from Chase et al. (2018) [cm**3 / cm**3]
vol = (np.pi / 6.) * np.sum(0.6 * ((bin_mid/10.)**3.) * particle_count[time_ind, :] / sample_vol[time_ind, :])
# number-weighted mean aspect rato
asr_nw[time_ind] = np.nansum(aspect_ratio[time_ind, :] * particle_count[time_ind, :]) / np.nansum(particle_count[time_ind, :])
# Brown & Francis products
mass_particle = (0.00294/1.5) * (bin_mid/10.)**1.9 # particle mass [g]
mass_bf = mass_particle * particle_count[time_ind, :] # g (binned)
cumMass_bf = np.nancumsum(mass_bf)
if cumMass_bf[-1]>0.:
iwc_bf[time_ind] = 10.**6 * np.nansum(mass_bf / sample_vol[time_ind, :]) # g m^-3
z_bf = 1.e12 * (0.174/0.93) * (6./np.pi/0.934)**2 * np.nansum(mass_particle**2*particle_count[time_ind, :]/sample_vol[time_ind, :]) # mm^6 m^-3
sol = least_squares(calc_chisquare, x0, method='lm',ftol=1e-9,xtol=1e-9, max_nfev=int(1e6),\
args=(Nt,iwc_bf[time_ind],z_bf,bin_mid,bin_width,0.00294/1.5,1.9)) # sove the gamma params using least squares minimziation
N0_bf[time_ind] = sol.x[0]; mu_bf[time_ind] = sol.x[1]; lam_bf[time_ind] = sol.x[2]
asr_bf[time_ind] = np.sum(aspect_ratio[time_ind, :] * mass_bf / sample_vol[time_ind, :]) / np.sum(mass_bf / sample_vol[time_ind, :]) # mass-weighted aspect ratio
rhoe_bf[time_ind] = (iwc_bf[time_ind] / 10.**6) / vol # effective density from Chase et al. (2018) [g cm**-3]
rho_bf[time_ind, :] = (mass_bf / particle_count[time_ind, :]) / (np.pi / 6.) / (bin_mid/10.)**3. # rho(D) following Heymsfield et al. (2003) [g cm**-3]
dm_bf[time_ind] = 10. * np.sum((bin_mid/10.) * mass_bf / sample_vol[time_ind, :]) / np.sum(mass_bf / sample_vol[time_ind, :]) # mass-weighted mean D from Chase et al. (2020) [mm]
if cumMass_bf[0]>=0.5*cumMass_bf[-1]:
dmm_bf[time_ind] = bin_mid[0]
else:
dmm_bf[time_ind] = bin_mid[np.where(cumMass_bf>0.5*cumMass_bf[-1])[0][0]-1]
# Heymsfield (2010) products [https://doi.org/10.1175/2010JAS3507.1]
#mass_hy = (0.0061*(bin_mid/10.)**2.05) * particle_count[time_ind, :] # g (binned) H04 definition used in GPM NCAR files
mass_particle = 0.00528 * (bin_mid/10.)**2.1 # particle mass [g]
mass_hy = mass_particle * particle_count[time_ind, :] # g (binned)
cumMass_hy = np.nancumsum(mass_hy)
if cumMass_hy[-1]>0.:
iwc_hy[time_ind] = 10.**6 * np.nansum(mass_hy / sample_vol[time_ind, :]) # g m^-3
z_hy = 1.e12 * (0.174/0.93) * (6./np.pi/0.934)**2 * np.nansum(mass_particle**2*particle_count[time_ind, :]/sample_vol[time_ind, :]) # mm^6 m^-3
sol = least_squares(calc_chisquare, x0, method='lm',ftol=1e-9,xtol=1e-9, max_nfev=int(1e6),\
args=(Nt,iwc_hy[time_ind],z_hy,bin_mid,bin_width,0.00528,2.1)) # sove the gamma params using least squares minimziation
N0_hy[time_ind] = sol.x[0]; mu_hy[time_ind] = sol.x[1]; lam_hy[time_ind] = sol.x[2]
asr_hy[time_ind] = np.sum(aspect_ratio[time_ind, :] * mass_hy / sample_vol[time_ind, :]) / np.sum(mass_hy / sample_vol[time_ind, :]) # mass-weighted aspect ratio
rhoe_hy[time_ind] = (iwc_hy[time_ind] / 10.**6) / vol # effective density from Chase et al. (2018) [g cm**-3]
rho_hy[time_ind, :] = (mass_hy / particle_count[time_ind, :]) / (np.pi / 6.) / (bin_mid/10.)**3. # rho(D) following Heymsfield et al. (2003) [g cm**-3]
dm_hy[time_ind] = 10. * np.sum((bin_mid/10.) * mass_hy / sample_vol[time_ind, :]) / np.sum(mass_hy / sample_vol[time_ind, :]) # mass-weighted mean D from Chase et al. (2020) [mm]
if cumMass_hy[0]>=0.5*cumMass_hy[-1]:
dmm_hy[time_ind] = bin_mid[0]
else:
dmm_hy[time_ind] = bin_mid[np.where(cumMass_hy>0.5*cumMass_hy[-1])[0][0]-1]
# Habit-specific products
mass_hab = np.sum(mass_tile * habit_count[time_ind, :, :], axis=1) # g (binned)
cumMass_hab = np.nancumsum(mass_hab)
if cumMass_hab[-1]>0.:
if cumMass_hab[0]>=0.5*cumMass_hab[-1]:
dmm_hab[time_ind] = bin_mid[0]
else:
dmm_hab[time_ind] = bin_mid[np.where(cumMass_hab>0.5*cumMass_hab[-1])[0][0]-1]
iwc_hab[time_ind] = 10.**6 * np.nansum(mass_hab / sample_vol[time_ind, :]) # g m^-3
asr_hab[time_ind] = np.sum(aspect_ratio[time_ind, :] * mass_hab / sample_vol[time_ind, :]) / np.sum(mass_hab / sample_vol[time_ind, :]) # mass-weighted aspect ratio
rhoe_hab[time_ind] = (iwc_hab[time_ind] / 10.**6) / vol # effective density from Chase et al. (2018) [g cm**-3]
rho_hab[time_ind, :] = (mass_hab / particle_count[time_ind, :]) / (np.pi / 6.) / (bin_mid/10.)**3. # rho(D) following Heymsfield et al. (2003) [g cm**-3]
dm_hab[time_ind] = 10. * np.sum((bin_mid/10.) * mass_hab / sample_vol[time_ind, :]) / np.sum(mass_hab / sample_vol[time_ind, :]) # mass-weighted mean D from Chase et al. (2020) [mm]
mu_bf = np.ma.masked_where(N0_bf==0., mu_bf)
mu_hy = np.ma.masked_where(N0_hy==0., mu_hy)
lam_bf = np.ma.masked_where(N0_bf==0., lam_bf)
lam_hy = np.ma.masked_where(N0_hy==0., lam_hy)
N0_bf = np.ma.masked_where(N0_bf==0., N0_bf)
N0_hy = np.ma.masked_where(N0_hy==0., N0_hy)
dmm_bf = np.ma.masked_where(dmm_bf==0., dmm_bf)
dmm_hy = np.ma.masked_where(dmm_hy==0., dmm_hy)
dmm_hab = np.ma.masked_where(dmm_hab==0., dmm_hab)
dm_bf = np.ma.masked_where(dm_bf==0., dm_bf)
dm_hy = np.ma.masked_where(dm_hy==0., dm_hy)
dm_hab = np.ma.masked_where(dm_hab==0., dm_hab)
asr_nw = np.ma.masked_where(np.ma.getmask(dmm_bf), asr_nw)
asr_bf = np.ma.masked_where(np.ma.getmask(dmm_bf), asr_bf)
asr_hy = np.ma.masked_where(np.ma.getmask(dmm_hy), asr_hy)
asr_hab = np.ma.masked_where(np.ma.getmask(asr_hab), iwc_hab)
rhoe_bf = np.ma.masked_where(np.ma.getmask(dmm_bf), rhoe_bf)
rhoe_hy = np.ma.masked_where(np.ma.getmask(dmm_hy), rhoe_hy)
rhoe_hab = np.ma.masked_where(np.ma.getmask(dmm_hab), rhoe_hab)
iwc_bf = np.ma.masked_where(np.ma.getmask(dmm_bf), iwc_bf)
iwc_hy = np.ma.masked_where(np.ma.getmask(dmm_hy), iwc_hy)
iwc_hab = np.ma.masked_where(np.ma.getmask(dmm_hab), iwc_hab)
rho_bf = np.ma.masked_where(rho_bf==0., rho_bf)
rho_hy = np.ma.masked_where(rho_hy==0., rho_hy)
rho_hab = np.ma.masked_where(rho_hab==0., rho_hab)
return (N0_bf, N0_hy, mu_bf, mu_hy, lam_bf, lam_hy, iwc_bf, iwc_hy, iwc_hab, asr_nw, asr_bf, asr_hy, asr_hab, dmm_bf, dmm_hy, dmm_hab,\
dm_bf, dm_hy, dm_hab, rho_bf, rho_hy, rho_hab, rhoe_bf, rhoe_hy, rhoe_hab)
def calc_riming(p3psd, Z, matchedZ_W, matchedZ_Ka, matchedZ_Ku, matchedZ_X, compute_fits=False):
x0 = [1.e-1, -1., 5.] # initial guess for N0 [cm**-4], mu, lambda [cm**-1]
rmass = np.zeros(len(p3psd['time']))
rfrac = np.zeros(len(p3psd['time']))
a_coeff = np.zeros(len(p3psd['time']))
b_coeff = np.zeros(len(p3psd['time']))
Nw = np.zeros(len(p3psd['time']))
N0 = np.zeros(len(p3psd['time']))
mu = np.zeros(len(p3psd['time']))
lam = np.zeros(len(p3psd['time']))
iwc = np.zeros(len(p3psd['time']))
asr = np.zeros(len(p3psd['time']))
dm = np.zeros(len(p3psd['time']))
dmm = np.zeros(len(p3psd['time']))
rho_eff = np.zeros(len(p3psd['time']))
dfr_KuKa = np.zeros(len(p3psd['time']))
error = np.zeros((len(p3psd['time']), len(p3psd['riming_mass_array'])))
for i in range(len(p3psd['time'])):
# loop through the different possible riming masses
for j in range(len(p3psd['riming_mass_array'])):
if (matchedZ_W is not None) and (np.ma.is_masked(matchedZ_W[i]) is False) and (np.ma.is_masked(p3psd['dbz_W'][i, :]) is False):
error[i, j] = error[i, j] + np.abs(matchedZ_W[i] - p3psd['dbz_W'][i, j])
if (matchedZ_Ka is not None) and (np.ma.is_masked(matchedZ_Ka[i]) is False) and (np.ma.is_masked(p3psd['dbz_Ka'][i, :]) is False):
error[i, j] = error[i, j] + np.abs(matchedZ_Ka[i] - p3psd['dbz_Ka'][i, j])
if (matchedZ_Ku is not None) and (np.ma.is_masked(matchedZ_Ku[i]) is False) and (np.ma.is_masked(p3psd['dbz_Ku'][i, :]) is False):
error[i, j] = error[i, j] + np.abs(matchedZ_Ku[i] - p3psd['dbz_Ku'][i, j])
if (matchedZ_X is not None) and (np.ma.is_masked(matchedZ_X[i]) is False) and (np.ma.is_masked(p3psd['dbz_X'][i, :]) is False):
error[i, j] = error[i, j] + np.abs(matchedZ_X[i] - p3psd['dbz_X'][i, j])
if np.sum(error[i, :])>0.:
rmass[i] = p3psd['riming_mass_array'][np.argmin(error[i, :])]
a_coeff[i] = p3psd['a_coeff_array'][np.argmin(error[i, :])]
b_coeff[i] = p3psd['b_coeff_array'][np.argmin(error[i, :])]
if p3psd['count'][i, :].count()==p3psd['count'].shape[1]: # time period is not masked...continue on
Nt = 1000.*np.nansum(p3psd['count'][i, :]/p3psd['sv'][i, :]) # concentration [L**-1]
mass_particle = a_coeff[i] * (p3psd['bin_mid']/10.)**b_coeff[i] # particle mass [g]
mass = mass_particle * p3psd['count'][i, :] # g (binned)
cumMass = np.nancumsum(mass)
if cumMass[-1]>0.:
# Nw (follows Chase et al. 2021)
# [log10(m**-3 mm**-1)]
D_melt = ((6. * mass_particle) / (np.pi * 0.997))**(1./3.)
Nw[i] = np.log10((1e5) * (4.**4 / 6) * np.nansum(
D_melt**3 * p3psd['ND'][i, :] * p3psd['bin_width'])**5 / np.nansum(
D_melt**4 * p3psd['ND'][i, :] * p3psd['bin_width'])**4)
# IWC
iwc[i] = 10.**6 * np.nansum(mass / p3psd['sv'][i, :]) # g m^-3
# DFR
dfr_KuKa[i] = p3psd[
'dbz_Ku'][i, np.argmin(error[i, :])] - p3psd[
'dbz_Ka'][i, np.argmin(error[i, :])] # dB
# Optionally compute N0, mu, lambda
if compute_fits:
z = 10.**(p3psd['dbz_X'][i,np.argmin(error[i, :])]/10.) # mm^6 m^-3
# solve gamma params using least squares minimziation
sol = least_squares(
calc_chisquare, x0, method='lm', ftol=1e-9, xtol=1e-9,
max_nfev=int(1e6), args=(
Nt, iwc[i], z, p3psd['bin_mid'], p3psd['bin_width'],
a_coeff[i], b_coeff[i], np.argmin(error[i, :])))
N0[i] = sol.x[0]; mu[i] = sol.x[1]; lam[i] = sol.x[2]
# Mass-weighted mean aspect ratio
asr[i] = np.sum(
p3psd['aspect_ratio'][i, :] * mass / p3psd['sv'][i, :]) / np.sum(
mass / p3psd['sv'][i, :])
# Bulk riming fraction (see Eqn 1 of <NAME> Grabowski
# [2010, https://doi.org/10.1175/2010JAS3250.1] for binned version)
rfrac[i] = np.sum(
np.squeeze(Z.rimefrac[0, :, np.argmin(error[i, :])])
* mass / p3psd['sv'][i, :]) / np.nansum(
mass / p3psd['sv'][i, :]) # SUM(rimed mass conc)/iwc
# Effective density (follows Chase et al. 2018)
vol = (np.pi / 6.) * np.sum(
0.6 * ((p3psd['bin_mid']/10.)**3.) * p3psd['count'][i, :]
/ p3psd['sv'][i, :]) # [cm**3 / cm**3]
rho_eff[i] = (iwc[i] / 10.**6) / vol # [g cm**-3]
# Mass-weighted mean diameter (follows Chase et al. 2020)
# M3/M2 if b==2, more generally M(b+1)/Mb
dm[i] = 10. * np.sum(
(p3psd['bin_mid']/10.) * mass / p3psd['sv'][i, :]) / np.sum(
mass / p3psd['sv'][i, :]) # [mm]
# Mass-weighted median diameter [mm]
if cumMass[0]>=0.5*cumMass[-1]:
dmm[i] = p3psd['bin_mid'][0]
else:
dmm[i] = p3psd[
'bin_mid'][np.where(cumMass>0.5*cumMass[-1])[0][0]-1]
p3psd['sclwp'] = np.ma.masked_where(np.sum(error, axis=1)==0., rmass)
p3psd['riming_frac'] = np.ma.masked_where(np.sum(error, axis=1)==0., rfrac)
p3psd['a_coeff'] = np.ma.masked_where(np.sum(error, axis=1)==0., a_coeff)
p3psd['b_coeff'] = np.ma.masked_where(np.sum(error, axis=1)==0., b_coeff)
if compute_fits:
p3psd['mu_ls'] = np.ma.masked_where(N0==0., mu)
p3psd['lambda_ls'] = np.ma.masked_where(N0==0., lam)
p3psd['N0_ls'] = np.ma.masked_where(N0==0., N0)
p3psd['Nw_ls'] = np.ma.masked_where(np.sum(error, axis=1)==0., Nw)
p3psd['iwc_ls'] = np.ma.masked_where(np.sum(error, axis=1)==0., iwc)
p3psd['mean_aspect_ratio_ls'] = np.ma.masked_where(np.sum(error, axis=1)==0., asr)
p3psd['dm_ls'] = np.ma.masked_where(np.sum(error, axis=1)==0., dm)
p3psd['dmm_ls'] = np.ma.masked_where(np.sum(error, axis=1)==0., dmm)
p3psd['eff_density_ls'] = np.ma.masked_where(np.sum(error, axis=1)==0., rho_eff)
p3psd['dfr_KuKa_ls'] = np.ma.masked_where(np.sum(error, axis=1)==0., dfr_KuKa)
return p3psd
def calc_chisquare(
x, Nt_obs, iwc_obs, z_obs, bin_mid, bin_width, a_coefficient, b_coefficient,
rime_ind=None, exponential=False):
'''
Compute gamma fit parameters for the PSD.
Follows McFarquhar et al. (2015) by finding N0-mu-lambda minimizing first
(Nt), third (mass), sixth (reflectivity) moments.
Inputs:
x: N0, mu, lambda to test on the minimization procedure
Nt_obs: Observed number concentration [L^-1]
iwc_obs: Observed IWC using an assumed m-D relation [g m**-3]
z_obs: Observed Z (following Hogan et al. 2012 definition) using assumed m-D relation [mm**6 m**-3]
bin_mid: Midpoints for the binned particle size [mm]
bin_width: Bin width for the binned particle size [cm]
a_coefficient: Prefactor component to the assumed m-D reltation [cm**-b]
b_coefficient: Exponent component to the assumed m-D reltation
rime_ind (optional, for LS products only): Riming category index to use for the reflectivity moment
exponential: Boolean, True if setting mu=0 for the fit (exponential form)
Outputs:
chi_square: Chi-square value for the provided N0-mu-lambda configuration
'''
Dmax = bin_mid / 10. # midpoint in cm
dD = bin_width # bin width in cm
mass_particle = a_coefficient * Dmax**b_coefficient # binned particle mass [g]
if exponential: # exponential form with mu=0
ND_fit = x[0] * np.exp(-x[2]*Dmax)
else: # traditional gamma function with variable mu
ND_fit = x[0] * Dmax**x[1] * np.exp(-x[2]*Dmax)
Nt_fit = 1000.*np.nansum(ND_fit*dD) # L**-1
iwc_fit = 10.**6 * np.nansum(mass_particle*ND_fit*dD) # g m**-3
if rime_ind is not None:
Z_fit = forward_Z() #initialize class
Z_fit.set_PSD(PSD=ND_fit[np.newaxis,:]*10.**8, D=Dmax/100., dD=dD/100., Z_interp=True) # get the PSD in the format to use in the routine (mks units)
Z_fit.load_split_L15() # Load the leinonen output
Z_fit.fit_sigmas(Z_interp=True) # Fit the backscatter cross-sections
Z_fit.calc_Z() # Calculate Z...outputs are Z.Z_x, Z.Z_ku, Z.Z_ka, Z.Z_w for the four radar wavelengths
z_fit = 10.**(Z_fit.Z_x[0, rime_ind] / 10.) # mm**6 m**-3
else:
z_fit = 1.e12 * (0.174/0.93) * (6./np.pi/0.934)**2 * np.nansum(mass_particle**2*ND_fit*dD) # mm**6 m**-3
csq_Nt = ((Nt_obs-Nt_fit) / np.sqrt(Nt_obs*Nt_fit))**2
csq_iwc = ((iwc_obs-iwc_fit) / np.sqrt(iwc_obs*iwc_fit))**2
csq_z = ((z_obs-z_fit) / np.sqrt(z_obs*z_fit))**2
chi_square = [csq_Nt, csq_iwc, csq_z]
return chi_square
``` |
{
"source": "joefiorini/qtile-extras",
"score": 2
} |
#### File: qtile-extras/docs/conf.py
```python
import os
import sys
from unittest.mock import MagicMock
class Mock(MagicMock):
# xcbq does a dir() on objects and pull stuff out of them and tries to sort
# the result. MagicMock has a bunch of stuff that can't be sorted, so let's
# like about dir().
def __dir__(self):
return []
MOCK_MODULES = [
'libqtile.widget.wlan',
'stravalib',
'stravalib.model',
'units',
'qtile_extras.resources.stravadata.locations',
'libqtile._ffi_pango',
'libqtile.backend.x11._ffi_xcursors',
'libqtile.widget._pulse_audio',
'cairocffi',
'cairocffi.xcb',
'cairocffi.pixbuf',
'cffi',
'dateutil',
'dateutil.parser',
'dbus_next',
'dbus_next.aio',
'dbus_next.service',
'dbus_next.errors',
'dbus_next.constants',
'iwlib',
'keyring',
'mpd',
'psutil',
'trollius',
'xcffib',
'xcffib.randr',
'xcffib.render',
'xcffib.wrappers',
'xcffib.xfixes',
'xcffib.xinerama',
'xcffib.xproto',
'xdg.IconTheme',
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES if mod_name not in sys.modules)
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
project = 'qtile-extras'
copyright = '2021, elParaguayo'
author = 'elParaguayo'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.autosectionlabel',
'sphinx_qtile_extras'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# A workaround for the responsive tables always having annoying scrollbars.
def setup(app):
app.add_css_file("noscroll.css")
app.add_css_file("admonitions.css")
```
#### File: resources/footballscores/matchevent.py
```python
class MatchEvent(object):
TYPE_GOAL = "GOAL"
TYPE_RED_CARD = "RED"
TYPE_STATUS = "STATUS"
TYPE_NEW_MATCH = "NEW"
def __init__(self, event_type, match, home=None):
self.eventType = event_type
self.home = home
self.match = match
@property
def is_red(self):
return self.eventType == self.TYPE_RED_CARD
@property
def is_goal(self):
return self.eventType == self.TYPE_GOAL
@property
def is_status_change(self):
return self.eventType == self.TYPE_STATUS
@property
def is_new_match(self):
return self.eventType == self.TYPE_NEW_MATCH
@property
def is_live(self):
return self.match.is_live
@property
def is_fixture(self):
return self.match.is_fixture
@property
def is_finished(self):
return self.match.is_finished
@property
def scorer(self):
if self.is_goal:
if self.home:
return self.match.last_home_goal
else:
return self.match.last_away_goal
@property
def red_card(self):
if self.is_red:
if self.home:
return self.match.last_home_red_card
else:
return self.match.last_away_red_card
```
#### File: qtile_extras/widget/snapcast.py
```python
import shlex
import subprocess
from pathlib import Path
import requests
from libqtile import bar
from libqtile.log_utils import logger
from libqtile.widget import base
from qtile_extras.images import ImgMask
SNAPCAST_ICON = Path(__file__).parent / ".." / "resources" / "snapcast-icons" / "snapcast.svg"
SERVER_STATUS = "Server.GetStatus"
class SnapCast(base._Widget):
"""
A widget to run a snapclient instance in the background.
This is a work in progress. The plan is to add the ability for the client
to change groups from widget.
"""
_experimental = True
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("client_name", None, "Client name (as recognised by server)."),
("server_address", "localhost", "Name or IP address of server."),
("snapclient", "/usr/bin/snapclient", "Path to snapclient"),
("options", "", "Options to be passed to snapclient."),
("icon_size", None, "Icon size. None = autofit."),
("padding", 2, "Padding around icon (and text)."),
(
"active_colour",
"ffffff",
"Colour when client is active and connected to server",
),
("inactive_colour", "999999", "Colour when client is inactive"),
("error_colour", "ffff00", "Colour when client has an error (check logs)"),
]
_screenshots = [("snapcast.png", "Snapclient active running in background")]
_dependencies = ["requests"]
def __init__(self, **config):
base._Widget.__init__(self, bar.CALCULATED, **config)
self.add_defaults(SnapCast.defaults)
self.add_callbacks(
{
"Button1": self.show_select,
"Button3": self.toggle_state,
"Button4": self.scroll_up,
"Button5": self.scroll_down,
}
)
self._id = 0
self._proc = None
self.img = None
self.client_id = None
self.current_group = {}
self.show_text = False
def _configure(self, qtile, bar):
base._Widget._configure(self, qtile, bar)
self._cmd = [self.snapclient]
if self.options:
self._cmd.extend(shlex.split(self.options))
self._load_icon()
self._url = f"http://{self.server_address}:1780/jsonrpc"
self.timeout_add(1, self._check_server)
def _load_icon(self):
self.img = ImgMask.from_path(SNAPCAST_ICON)
self.img.attach_drawer(self.drawer)
if self.icon_size is None:
size = self.bar.height - 1
else:
size = min(self.icon_size, self.bar.height - 1)
self.img.resize(size)
self.icon_size = self.img.width
def _send_request(self, method, params=dict()):
self._id += 1
data = {"id": self._id, "jsonrpc": "2.0", "method": method}
if params:
data["params"] = params
r = requests.post(self._url, json=data)
if not r.status_code == 200:
logger.warning("Unable to connect to snapcast server.")
return {}
return r.json()
def _find_id(self, status):
self.client_id = None
self.current_group = {}
for group in status["result"]["server"]["groups"]:
for client in group.get("clients", list()):
if client["host"]["name"] == self.client_name:
self.client_id = client["id"]
self.current_group = {group["name"]: group["id"]}
def _check_server(self):
status = self._send_request(SERVER_STATUS)
if not status:
return
self._find_id(status)
self.streams = [x["id"] for x in status["result"]["server"]["streams"]]
@property
def status_colour(self):
if not self._proc:
return self.inactive_colour
if self.client_id:
return self.active_colour
return self.error_colour
def toggle_state(self):
if self._proc is None:
self._proc = subprocess.Popen(
self._cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
)
else:
self._proc.terminate()
self._proc = None
self.draw()
def refresh(self):
future = self.qtile.run_in_executor(self._get_data)
future.add_done_callback(self._read_data)
def calculate_length(self):
if self.img is None:
return 0
return self.icon_size
def draw_highlight(self, top=False, colour="000000"):
self.drawer.set_source_rgb(colour)
y = 0 if top else self.bar.height - 2
# Draw the bar
self.drawer.fillrect(0, y, self.width, 2, 2)
def draw(self):
# Remove background
self.drawer.clear(self.background or self.bar.background)
offsety = (self.bar.height - self.img.height) // 2
self.img.draw(colour=self.status_colour, y=offsety)
self.drawer.draw(offsetx=self.offsetx, offsety=self.offsety, width=self.length)
def show_select(self):
pass
def scroll_up(self):
pass
def scroll_down(self):
pass
def finalize(self):
if self._proc:
self._proc.terminate()
base._Widget.finalize(self)
```
#### File: qtile_extras/widget/upower.py
```python
import asyncio
from dbus_next.aio import MessageBus
from dbus_next.constants import BusType
from libqtile import bar
from libqtile.log_utils import logger
from libqtile.widget import base
PROPS_IFACE = "org.freedesktop.DBus.Properties"
UPOWER_SERVICE = "org.freedesktop.UPower"
UPOWER_INTERFACE = "org.freedesktop.UPower"
UPOWER_PATH = "/org/freedesktop/UPower"
UPOWER_DEVICE = UPOWER_INTERFACE + ".Device"
UPOWER_BUS = BusType.SYSTEM
class UPowerWidget(base._Widget):
"""
A graphical widget to display laptop battery level.
The widget uses dbus to read the battery information from the UPower
interface.
The widget will display one icon for each battery found or users can
specify the name of the battery if they only wish to display one.
Clicking on the widget will display the battery level and the time to
empty/full.
All colours can be customised as well as low/critical percentage levels.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("font", "sans", "Default font"),
("fontsize", None, "Font size"),
("font_colour", "ffffff", "Font colour for information text"),
("battery_height", 10, "Height of battery icon"),
("battery_width", 20, "Size of battery icon"),
("battery_name", None, "Battery name. None = all batteries"),
("border_charge_colour", "8888ff", "Border colour when charging."),
("border_colour", "dbdbe0", "Border colour when discharging."),
("border_critical_colour", "cc0000", "Border colour when battery low."),
("fill_normal", "dbdbe0", "Fill when normal"),
("fill_low", "aa00aa", "Fill colour when battery low"),
("fill_critical", "cc0000", "Fill when critically low"),
("margin", 2, "Margin on sides of widget"),
("spacing", 5, "Space between batteries"),
("percentage_low", 0.20, "Low level threshold."),
("percentage_critical", 0.10, "Critical level threshold."),
(
"text_charging",
"({percentage:.0f}%) {ttf} until fully charged",
"Text to display when charging.",
),
(
"text_discharging",
"({percentage:.0f}%) {tte} until empty",
"Text to display when on battery.",
),
("text_displaytime", 5, "Time for text to remain before hiding"),
]
_screenshots = [
("battery_normal.png", "Normal"),
("battery_low.png", "Low"),
("battery_critical.png", "Critical"),
("battery_charging.png", "Charging"),
("battery_multiple.png", "Multiple batteries"),
("battery_textdisplay.gif", "Showing text"),
]
_dependencies = ["dbus-next"]
def __init__(self, **config):
base._Widget.__init__(self, bar.CALCULATED, **config)
self.add_defaults(UPowerWidget.defaults)
self.batteries = []
self.charging = False
# Initial variables to hide text
self.show_text = False
self.hide_timer = None
self.configured = False
self.add_callbacks({"Button1": self.toggle_text})
def _configure(self, qtile, bar):
base._Widget._configure(self, qtile, bar)
# Define colours
self.colours = [
(self.percentage_critical, self.fill_critical),
(self.percentage_low, self.fill_low),
(100, self.fill_normal),
]
self.status = [
(self.percentage_critical, "Critical"),
(self.percentage_low, "Low"),
(100, "Normal"),
]
self.borders = {True: self.border_charge_colour, False: self.border_colour}
async def _config_async(self):
await self._setup_dbus()
async def _setup_dbus(self):
# Set up connection to DBus
self.bus = await MessageBus(bus_type=UPOWER_BUS).connect()
introspection = await self.bus.introspect(UPOWER_SERVICE, UPOWER_PATH)
object = self.bus.get_proxy_object(UPOWER_SERVICE, UPOWER_PATH, introspection)
props = object.get_interface("org.freedesktop.DBus.Properties")
props.on_properties_changed(self.upower_change)
self.upower = object.get_interface(UPOWER_INTERFACE)
# Get battery details from DBus
self.batteries = await self.find_batteries()
# Is laptop charging?
self.charging = not await self.upower.get_on_battery()
self.configured = await self._update_battery_info()
def max_text_length(self):
# Generate text string based on status
if self.charging:
text = self.text_charging.format(percentage=100, ttf="99:99")
else:
text = self.text_discharging.format(percentage=100, tte="99:99")
# Calculate width of text
width, _ = self.drawer.max_layout_size([text], self.font, self.fontsize)
return width
def calculate_length(self):
# Start with zero width and we'll add to it
bar_length = 0
if not self.configured:
return 0
# We can use maths to simplify if more than one battery
num_batteries = len(self.batteries)
if num_batteries:
# Icon widths
length = (
(self.margin * 2)
+ (self.spacing * (num_batteries - 1))
+ (self.battery_width * num_batteries)
)
bar_length += length
# Add text width if it's being displayed
if self.show_text:
bar_length += (self.max_text_length() + self.spacing) * num_batteries
return bar_length
async def find_batteries(self):
# Get all UPower devices that are named "battery"
batteries = await self.upower.call_enumerate_devices()
batteries = [b for b in batteries if "battery" in b]
if not batteries:
logger.warning("No batteries found. No icons will be displayed.")
return []
# Get DBus object for each battery
battery_devices = []
for battery in batteries:
bat = {}
introspection = await self.bus.introspect(UPOWER_SERVICE, battery)
battery_obj = self.bus.get_proxy_object(UPOWER_SERVICE, battery, introspection)
battery_dev = battery_obj.get_interface(UPOWER_DEVICE)
props = battery_obj.get_interface(PROPS_IFACE)
bat["device"] = battery_dev
bat["props"] = props
bat["name"] = await battery_dev.get_native_path()
battery_devices.append(bat)
# If user only wants named battery, get it here
if self.battery_name:
battery_devices = [b for b in battery_devices if b["name"] == self.battery_name]
if not battery_devices:
err = "No battery found matching {}.".format(self.battery_name)
logger.warning(err)
return []
# Listen for change signals on DBus
for battery in battery_devices:
battery["props"].on_properties_changed(self.battery_change)
await self._update_battery_info(False)
return battery_devices
def upower_change(self, interface, changed, invalidated):
# Update the charging status
asyncio.create_task(self._upower_change())
async def _upower_change(self):
self.charging = not await self.upower.get_on_battery()
asyncio.create_task(self._update_battery_info())
def battery_change(self, interface, changed, invalidated):
# The batteries are polled every 2 mins by DBus so let's just update
# when we get any signal
asyncio.create_task(self._update_battery_info())
async def _update_battery_info(self, draw=True):
for battery in self.batteries:
dev = battery["device"]
percentage = await dev.get_percentage()
battery["fraction"] = percentage / 100.0
battery["percentage"] = percentage
if self.charging:
ttf = await dev.get_time_to_full()
battery["ttf"] = self.secs_to_hm(ttf)
battery["tte"] = ""
else:
tte = await dev.get_time_to_empty()
battery["tte"] = self.secs_to_hm(tte)
battery["ttf"] = ""
battery["status"] = next(x[1] for x in self.status if battery["fraction"] <= x[0])
if draw:
self.qtile.call_soon(self.bar.draw)
return True
def draw(self):
if not self.configured:
return
# Remove background
self.drawer.clear(self.background or self.bar.background)
# Define an offset for widgets
offset = self.margin
# Work out top of battery
top_margin = (self.bar.height - self.battery_height) / 2
# Loop over each battery
for battery in self.batteries:
# Get battery energy level
percentage = battery["fraction"]
# Get the appropriate fill colour
# This finds the first value in self_colours which is greater than
# the current battery level and returns the colour string
fill = next(x[1] for x in self.colours if percentage <= x[0])
# Choose border colour
if (percentage <= self.percentage_critical) and not self.charging:
border = self.border_critical_colour
else:
border = self.borders[self.charging]
# Draw the border
self.drawer._rounded_rect(
offset, top_margin, self.battery_width, self.battery_height, 1
)
self.drawer.set_source_rgb(border)
self.drawer.ctx.stroke()
# Work out size of bar inside icon
fill_width = 2 + (self.battery_width - 6) * percentage
# Draw the filling of the battery
self.drawer._rounded_rect(
offset + 2, top_margin + 2, fill_width, (self.battery_height - 4), 0
)
self.drawer.set_source_rgb(fill)
self.drawer.ctx.fill()
# Increase offset for next battery
offset = offset + self.spacing + self.battery_width
if self.show_text:
# Generate text based on status and format time-to-full or
# time-to-empty
if self.charging:
text = self.text_charging.format(**battery)
else:
text = self.text_discharging.format(**battery)
# Create a text box
layout = self.drawer.textlayout(
text, self.font_colour, self.font, self.fontsize, None, wrap=False
)
# We want to centre this vertically
y_offset = (self.bar.height - layout.height) / 2
# Set the layout as wide as the widget so text is centred
layout.width = self.max_text_length()
# Draw it
layout.draw(offset, y_offset)
# Increase the offset
offset += layout.width
# Redraw the bar
self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.length)
def secs_to_hm(self, secs):
# Basic maths to convert seconds to h:mm format
m, _ = divmod(secs, 60)
h, m = divmod(m, 60)
# Need to mke sure minutes are zero padded in case single digit
return "{}:{:02d}".format(h, m)
def toggle_text(self):
if not self.show_text:
self.show_text = True
# Start a timer to hide the text
self.hide_timer = self.timeout_add(self.text_displaytime, self.hide)
else:
self.show_text = False
# Cancel the timer as no need for it if text is hidden already
if self.hide_timer:
self.hide_timer.cancel()
self.bar.draw()
def hide(self):
# Self-explanatory!
self.show_text = False
self.bar.draw()
def info(self):
info = base._Widget.info(self)
info["batteries"] = [
{k: v for k, v in x.items() if k not in ["device", "props"]} for x in self.batteries
]
info["charging"] = self.charging
info["levels"] = self.status
return info
```
#### File: test/scripts/window.py
```python
import os
if os.environ.get("WAYLAND_DISPLAY"):
os.environ["GDK_BACKEND"] = "wayland"
else:
os.environ["GDK_BACKEND"] = "x11"
# Disable GTK ATK bridge, which appears to trigger errors with e.g. test_strut_handling
# https://wiki.gnome.org/Accessibility/Documentation/GNOME2/Mechanics
os.environ["NO_AT_BRIDGE"] = "1"
import sys
from pathlib import Path
import gi
gi.require_version("Gdk", "3.0")
gi.require_version("Gtk", "3.0")
from gi.repository import Gdk, Gtk
from dbus_next import Message, Variant
from dbus_next.glib import MessageBus
from dbus_next.constants import MessageType, PropertyAccess
from dbus_next.service import ServiceInterface, dbus_property, method, signal
icon_path = Path(__file__).parent / ".." / "resources" / "icons" / "menuitem.png"
class SNIMenu(ServiceInterface):
"""
Simplified DBusMenu interface.
Only exports methods, properties and signals required by
StatusNotifier widget.
"""
def __init__(self, window, kill, *args):
ServiceInterface.__init__(self, *args)
self.window = window
self.kill = kill
@signal()
def LayoutUpdated(self) -> "ui":
return [1, 0]
@method()
def AboutToShow(self, id: "i") -> "b":
return True
@method()
def GetLayout(self, parent_id: "i", recursion_depth: "i", properties: "as") -> "u(ia{sv}av)":
with open(icon_path.as_posix(), "rb") as icon:
raw = icon.read()
return [
1,
[
1,
{},
[
Variant(
"(ia{sv}av)",
[
0,
{
"enabled": Variant("b", True),
"visible": Variant("b", True),
"label": Variant("s", "Test Menu"),
"children-display": Variant("s", "submenu"),
"icon-data": Variant("ay", bytes(raw)),
},
[],
],
),
Variant(
"(ia{sv}av)",
[
1,
{
"enabled": Variant("b", True),
"visible": Variant("b", True),
"label": Variant("s", "Quit"),
"icon-data": Variant("s", icon_path.as_posix()),
},
[],
],
),
],
],
]
@method()
def Event(self, id: "i", event_id: "s", data: "v", timestamp: "u"):
if id == 1:
self.kill()
class SNItem(ServiceInterface):
"""
Simplified StatusNotifierItem interface.
Only exports methods, properties and signals required by
StatusNotifier widget.
"""
def __init__(self, window, *args):
ServiceInterface.__init__(self, *args)
self.window = window
self.fullscreen = False
@method()
def Activate(self, x: "i", y: "i"):
if self.fullscreen:
self.window.unfullscreen()
else:
self.window.fullscreen()
self.fullscreen = not self.fullscreen
@dbus_property(PropertyAccess.READ)
def IconName(self) -> "s":
return ""
@dbus_property(PropertyAccess.READ)
def IconPixmap(self) -> "a(iiay)":
return [[32, 32, bytes([100] * (32 * 32 * 4))]]
@dbus_property(PropertyAccess.READ)
def AttentionIconPixmap(self) -> "a(iiay)":
return []
@dbus_property(PropertyAccess.READ)
def OverlayIconPixmap(self) -> "a(iiay)":
return []
@dbus_property(PropertyAccess.READ)
def IsMenu(self) -> "b":
return False
@dbus_property(PropertyAccess.READ)
def Menu(self) -> "s":
return "/DBusMenu"
@signal()
def NewIcon(self):
pass
@signal()
def NewAttentionIcon(self):
pass
@signal()
def NewOverlayIcon(self):
pass
if __name__ == "__main__":
# GTK consumes the `--name <class>` args
if len(sys.argv) > 1:
title = sys.argv[1]
else:
title = "TestWindow"
if len(sys.argv) > 2:
window_type = sys.argv[2]
else:
window_type = "normal"
# Check if we want to export a StatusNotifierItem interface
sni = "export_sni_interface" in sys.argv
win = Gtk.Window(title=title)
win.connect("destroy", Gtk.main_quit)
win.connect("key-press-event", Gtk.main_quit)
win.set_default_size(100, 100)
if window_type == "notification":
if os.environ["GDK_BACKEND"] == "wayland":
try:
gi.require_version("GtkLayerShell", "0.1")
from gi.repository import GtkLayerShell
except ValueError:
sys.exit(1)
win.add(Gtk.Label(label="This is a test notification"))
GtkLayerShell.init_for_window(win)
else:
win.set_type_hint(Gdk.WindowTypeHint.NOTIFICATION)
elif window_type == "normal":
win.set_type_hint(Gdk.WindowTypeHint.NORMAL)
if sni:
bus = MessageBus().connect_sync()
item = SNItem(win, "org.kde.StatusNotifierItem")
menu = SNIMenu(win, Gtk.main_quit, "com.canonical.dbusmenu")
# Export interfaces on the bus
bus.export("/StatusNotifierItem", item)
bus.export("/DBusMenu", menu)
# Request the service name
bus.request_name_sync(f"test.qtile.window-{title.replace(' ','-')}")
msg = bus.call_sync(
Message(
message_type=MessageType.METHOD_CALL,
destination="org.freedesktop.StatusNotifierWatcher",
interface="org.freedesktop.StatusNotifierWatcher",
path="/StatusNotifierWatcher",
member="RegisterStatusNotifierItem",
signature="s",
body=[bus.unique_name],
)
)
win.show_all()
Gtk.main()
```
#### File: qtile-extras/test/test_bar.py
```python
import libqtile.config
import libqtile.confreader
import libqtile.layout
from qtile_extras import widget
from qtile_extras.bar import Bar
class GeomConf(libqtile.confreader.Config):
auto_fullscreen = False
keys = []
mouse = []
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d"),
]
layouts = [libqtile.layout.stack.Stack(num_stacks=1)]
floating_layout = libqtile.resources.default_config.floating_layout
screens = [
libqtile.config.Screen(
top=Bar([], 10),
bottom=Bar([], 10),
left=Bar([], 10),
right=Bar([], 10),
)
]
def test_bar_border_horizontal(manager_nospawn):
config = GeomConf
config.screens = [
libqtile.config.Screen(
top=Bar(
[widget.Spacer()],
12,
margin=5,
border_width=5,
),
bottom=Bar(
[widget.Spacer()],
12,
margin=5,
border_width=0,
),
)
]
manager_nospawn.start(config)
top_info = manager_nospawn.c.bar["top"].info
bottom_info = manager_nospawn.c.bar["bottom"].info
# Screen is 800px wide so:
# -top bar should have width of 800 - 5 - 5 - 5 - 5 = 780 (margin and border)
# -bottom bar should have width of 800 - 5 - 5 = 790 (margin and no border)
assert top_info()["width"] == 780
assert bottom_info()["width"] == 790
# Bar "height" should still be the value set in the config but "size" is
# adjusted for margin and border:
# -top bar should have size of 12 + 5 + 5 + 5 + 5 = 32 (margin and border)
# -bottom bar should have size of 12 + 5 + 5 = 22 (margin and border)
assert top_info()["height"] == 12
assert top_info()["size"] == 32
assert bottom_info()["height"] == 12
assert bottom_info()["size"] == 22
# Test widget offsets
# Where there is a border, widget should be offset by that amount
_, xoffset = manager_nospawn.c.bar["top"].eval("self.widgets[0].offsetx")
assert xoffset == "5"
_, yoffset = manager_nospawn.c.bar["top"].eval("self.widgets[0].offsety")
assert xoffset == "5"
# Where there is no border, this should be 0
_, xoffset = manager_nospawn.c.bar["bottom"].eval("self.widgets[0].offsetx")
assert xoffset == "0"
_, yoffset = manager_nospawn.c.bar["bottom"].eval("self.widgets[0].offsety")
assert xoffset == "0"
def test_bar_border_vertical(manager_nospawn):
config = GeomConf
config.screens = [
libqtile.config.Screen(
left=Bar(
[widget.Spacer()],
12,
margin=5,
border_width=5,
),
right=Bar(
[widget.Spacer()],
12,
margin=5,
border_width=0,
),
)
]
manager_nospawn.start(config)
left_info = manager_nospawn.c.bar["left"].info
right_info = manager_nospawn.c.bar["right"].info
# Screen is 600px tall so:
# -left bar should have height of 600 - 5 - 5 - 5 - 5 = 580 (margin and border)
# -right bar should have height of 600 - 5 - 5 = 590 (margin and no border)
assert left_info()["height"] == 580
assert right_info()["height"] == 590
# Bar "width" should still be the value set in the config but "size" is
# adjusted for margin and border:
# -left bar should have size of 12 + 5 + 5 + 5 + 5 = 32 (margin and border)
# -right bar should have size of 12 + 5 + 5 = 22 (margin and border)
assert left_info()["width"] == 12
assert left_info()["size"] == 32
assert right_info()["width"] == 12
assert right_info()["size"] == 22
# Test widget offsets
# Where there is a border, widget should be offset by that amount
_, xoffset = manager_nospawn.c.bar["left"].eval("self.widgets[0].offsetx")
assert xoffset == "5"
_, yoffset = manager_nospawn.c.bar["left"].eval("self.widgets[0].offsety")
assert xoffset == "5"
# Where there is no border, this should be 0
_, xoffset = manager_nospawn.c.bar["right"].eval("self.widgets[0].offsetx")
assert xoffset == "0"
_, yoffset = manager_nospawn.c.bar["right"].eval("self.widgets[0].offsety")
assert xoffset == "0"
```
#### File: test/widget/test_widget_init.py
```python
import logging
from importlib import reload
import pytest
from libqtile.log_utils import init_log
from libqtile.widget.import_error import ImportErrorWidget
import qtile_extras.widget
def bad_importer(*args, **kwargs):
raise ImportError()
def test_init_import_error(monkeypatch, caplog):
"""Check we get an ImportError widget with missing import?"""
init_log(logging.INFO, log_path=None, log_color=False)
monkeypatch.setattr("qtile_extras.widget.importlib.import_module", bad_importer)
widget = qtile_extras.widget.WiFiIcon()
assert isinstance(widget, ImportErrorWidget)
assert "Unmet dependencies" in caplog.text
def test_init_import_error_no_fallback(monkeypatch, caplog):
"""If there's no fallback, we get an ImportError"""
init_log(logging.INFO, log_path=None, log_color=False)
monkeypatch.setattr("qtile_extras.widget.importlib.import_module", bad_importer)
monkeypatch.setattr("libqtile.widget.import_error.make_error", None)
reload(qtile_extras.widget)
with pytest.raises(ImportError):
_ = qtile_extras.widget.WiFiIcon()
def test_init_widget_dir():
"""Check patched dir method"""
assert dir(qtile_extras.widget) == sorted(list(qtile_extras.widget.all_widgets.keys()))
``` |
{
"source": "joeflack4/joefuncs",
"score": 4
} |
#### File: algorithms/sorts/__init__.py
```python
def qsort(arr):
"""A QuickSort Algorithm.
Author:
zangw: http://stackoverflow.com/users/3011380/zangw
References:
http://stackoverflow.com/questions/18262306/quick-sort-with-python
Alternate Implementations:
# 1. Non-DRY
def qsort(arr):
if len(arr) <= 1:
return arr
return qsort([x for x in arr[1:] if x < arr[0]]) + [arr[0]] + qsort(
[x for x in arr[1:] if x >= arr[0]])
"""
from operator import gt, le
def compare(operator):
"""Comparison."""
return [x for x in arr[1:] if operator(x, arr[0])]
ops = {'>': gt, '<=': le}
return arr if len(arr) <= 1 \
else qsort(compare(ops['>'])) + [arr[0]] + qsort(compare(ops['<=']))
```
#### File: data_manipulation/sets/__init__.py
```python
def non_empties(*args):
"""Get set of all non-empty elements in iterables."""
args = args[0] if len(args) is 1 and isinstance(args[0], list) else args
return [ne_set(i) for i in args]
def non_empty_pair(*args):
"""Get set of all non-empty elements in iterables."""
return ne_set(args[0]), ne_set(args[1])
def ne_set(x):
"""Get set of all non-empty elements in iterable."""
return set(i for i in x if i)
def de_list_pair(x, y=None):
"""De-list pair."""
if isinstance(x, list) and not y:
return x[0], x[1]
return x, y
def intersect(x, y=None):
"""Get sorted list of elements in common between two iterables."""
x, y = de_list_pair(x, y)
return sorted(list(set(x) & set(y)))
def ne_intersect(x, y):
"""Get sorted list of elements in common in two iterables, sans empties."""
return intersect(ne_set(x), ne_set(y))
def union(x, y=None):
"""Get sorted list of elements combined for two iterables."""
x, y = de_list_pair(x, y)
return sorted(list(set(x) | set(y)))
def ne_union(x, y):
"""Get sorted list of elements for two iterables, sans empties."""
return union(ne_set(x), ne_set(y))
def exclusive(x, y=None):
"""Get sorted list of uncommon elements in two iterables."""
x, y = de_list_pair(x, y)
return sorted(list(set(x) ^ set(y)))
```
#### File: data_manipulation/string/__init__.py
```python
def handle_permutations(existing_list, permutations_to_populate):
"""Handle permutations."""
temp_list = []
for perm in permutations_to_populate:
for item in existing_list:
temp_list.append('{}{}'.format(item, perm))
return [item for item in temp_list]
def make_string_permutations(permutation_matrix):
"""Make string permutations."""
temp_list = ['']
for permutation_list in permutation_matrix:
temp_list = handle_permutations(
existing_list=temp_list,
permutations_to_populate=permutation_list)
return temp_list
def single_value_from_permutable_keys(source_dict, permutable_keys,
default_value=''):
"""Single value from permutable keys."""
example_condition = True
err_msg = 'Multiple permutable keys were found. Please use one.\n\n' \
'Source dictionary: {}\n' \
'Allowable permutable keys: {}' \
.format(source_dict, permutable_keys)
valid_keys_in_source_dict = 0
for key in source_dict:
if key in permutable_keys:
valid_keys_in_source_dict += 1
if valid_keys_in_source_dict == 0:
return ''
elif valid_keys_in_source_dict > 1:
raise Exception(err_msg)
else:
return ''.join(
source_dict[key]
if key in source_dict else '' for key in permutable_keys
) if example_condition else default_value
def example_string_permutations_use_case():
"""Example."""
example_string_permutations = (
('char', 'characteristic'),
('Grp', 'Group'),
('', 1, 2),
('Label', '.label')
)
example_dict = {}
example_arg_name_permutations = \
make_string_permutations(example_string_permutations)
example_chargrp_label_arg_names = example_arg_name_permutations
example_char_grp_label = single_value_from_permutable_keys(
source_dict=example_dict,
permutable_keys=example_chargrp_label_arg_names)
return example_char_grp_label
```
#### File: io/file_io/__init__.py
```python
def re_readable_read(file):
"""Read file and reset cursor/pointer to allow fast, simple re-read.
Side Effects:
Mutates file stream object passed as argument by moving cursor/pointer
from from position at start of function call and setting it to position
'0'. If file stream has not been read before calling this function,
there will be no effective change.
Returns:
str: Contents of read file.
"""
file_contents = file.read()
file.seek(0)
return file_contents
def open_and_read(file):
"""Alias: read_contents"""
read_contents(file)
def read_contents(file):
"""Open file and read it.
Returns:
str: File contents.
"""
# with open(file, 'r') as stream:
# return re_readable_read(stream)
# return re_readable_read(open(file, 'r'))
return open(file, 'r').read()
```
#### File: io/yaml_io/__init__.py
```python
from joeutils.io.file_io import read_contents
def yaml_load_clean(data):
"""Read YAML.
Handles dependencies.
Raises:
YAMLError
Returns:
dict: Data.
"""
from yaml import load, YAMLError
try:
return load(read_contents(data))
except YAMLError:
raise YAMLError('YAMLError: An unexpected error occurred when '
'attempting to read supplied YAML.')
def yaml_dump_clean(data):
"""Dump YAML in highly readable format and preserving key order.
Handles dependencies.
# TODO: Upgrade to ruamel package to preserve order -
# https://stackoverflow.com/questions/31605131
# /dumping-a-dictionary-to-a-yaml-file-while-preserving-order
Returns:
str: YAML formatted string.
"""
import yaml
return yaml.dump(data=data, default_flow_style=False)
```
#### File: joeutils/testing/__init__.py
```python
from glob import glob
import os
import unittest
TEST_DIR = os.path.dirname(os.path.realpath(__file__)) + '/'
TEST_STATIC_DIR = TEST_DIR + 'static/'
class StaticIOTest(unittest.TestCase):
"""Base class for Standard input/output stuff package tests."""
@classmethod
def files_dir(cls):
"""Return name of test class."""
return TEST_STATIC_DIR + cls.__name__
def input_path(self):
"""Return path of input file folder for test class."""
return self.files_dir() + '/input/'
def output_path(self):
"""Return path of output file folder for test class."""
return self.files_dir() + '/output/'
def input_files(self):
"""Return paths of input files for test class."""
all_files = glob(self.input_path() + '*')
# With sans_temp_files, you can have Excel open while testing.
sans_temp_files = [x for x in all_files
if not x[len(self.input_path()):].startswith('~$')]
return sans_temp_files
def output_files(self):
"""Return paths of input files for test class."""
return glob(self.output_path() + '*')
def assert_success(self, func, **options):
"""Runs function and asserts success.
Args:
func (function): function to run.
options (kwargs): function options; unpacked keyword args
"""
if options:
func(**options)
else:
func()
expected = 'N files: ' + str(len(self.input_files()))
actual = 'N files: ' + str(len(self.output_files()))
self.assertEqual(expected, actual)
class ExampleTest(StaticIOTest):
"""Test class docstring"""
def mark_as_success(self):
"""Create a file to mark successful test.
Only use this if your test doesn't actually create a file in the
process.
"""
path = self.output_path() + 'success.txt'
try:
os.remove(path)
except FileNotFoundError:
pass
open(path, 'x').close()
def do_something(self):
"""Do something."""
for file in self.input_files():
print('Do something here with {}!'.format(file))
self.mark_as_success()
def test_non_strict_validation(self):
"""Test something."""
self.assert_success(func=self.do_something)
``` |
{
"source": "joeflack4/just-a-dash",
"score": 2
} |
#### File: just-a-dash/app/routes.py
```python
from flask import render_template, url_for, flash, redirect, request, Markup
from flask.ext.login import login_required, login_user, logout_user, current_user
from app import app, db
# - Marketing Moduel Imports -- Move these to blueprint.
# Marketing Module imports - dependencies.
import requests
from collections import Counter
from bs4 import BeautifulSoup
import operator
import re
import nltk
# Marketing Module imports - from w/in app.
from .stop_words import stops
from .models import User, Customers, Personnel, Messages, Result, AppNotifications, OmsConfig
from .forms import LoginForm, RegisterForm, UserAddForm, UserUpdateForm, UserDeleteForm, CustomerAddForm, \
CustomerUpdateForm, CustomerDeleteForm, PersonnelAddForm, PersonnelUpdateForm, PersonelDeleteForm, \
Config_Names_and_Aesthetics, Config_Secret_Key, Config_Modules, OMS_Settings
from .modals import user_add_modal, user_update_modal, customer_add_modal, customer_update_modal, personnel_add_modal, \
personnel_update_modal, user_csv_upload_modal, customer_csv_upload_modal, \
personnel_csv_upload_modal, violations_csv_upload_modal
# from .services.telephony.contacts import CompanyContacts
from .services.telephony.sms import sms_response, sms_check_in_data
from .services.telephony.calls import call_response, call_check_in_data
from .includes import get_app_settings, get_oms_settings, make_string_list
from .includes import csv2json_conversion, Import_Data, validate_columns, validate_import, add_to_db, \
add_user, update_user, delete_user, check_permissions_to_update_user, check_permissions_to_assign_user_role, \
check_permissions_to_delete_user, update_names_and_aesthetics, update_secret_key, update_modules, add_customer, \
update_customer, delete_customer, add_personnel, update_personnel, delete_personnel, get_upload_columns, \
update_self, update_oms_settings
# from .includes import check_permissions_to_change_App_Naming_and_Aesthetics, check_permissions_to_change_App_Secret_Key, \
# check_permissions_to_change_App_Modules
from .route_decorators import app_basic_admin_required, app_super_admin_required, oms_basic_admin_required, \
oms_super_admin_required, crm_basic_admin_required, crm_super_admin_required, hrm_basic_admin_required, \
hrm_super_admin_required, ams_basic_admin_required, ams_super_admin_required, mms_basic_admin_required, \
mms_super_admin_required
##############
# - Variables
record_update_error = 'Attempted to update record, but form submission failed validation. Please ensure that all of the' \
' non-blank fields submitted pass validation.'
record_delete_error = 'Attempted to delete record, but an unexpected error occurred. Please contact the application' \
' administrator'
record_add_error = 'Attempted to add record, but form submission failed validation. Please ensure that all of the' \
' non-blank fields submitted pass validation.'
##############
# - App Core - Root Pathing
@app.route('/')
def root_path():
if current_user.is_authenticated():
return redirect(url_for('index'))
else:
return redirect(url_for('welcome'))
@app.route('/welcome')
def welcome():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
register_form = RegisterForm(request.form)
return render_template('core_modules/welcome/index.html',
module_name=get_app_settings('App Name') + " Control Panel",
page_name="Welcome",
icon="fa fa-star-o",
module_abbreviation="Home",
app_config_settings=get_app_settings(),
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
login_form=login_form,
register_form=register_form,
current_user=current_user,
logged_in=logged_in)
@app.route('/index')
@login_required
def index():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
return render_template('core_modules/dashboard/index.html',
module_name=get_app_settings('App Name') + " Control Panel",
page_name="Dashboard",
icon="fa fa-dashboard",
module_abbreviation="Home",
app_config_settings=get_app_settings(),
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
login_form=login_form,
current_user=current_user,
logged_in=logged_in)
@app.route('/home')
@login_required
def home():
return redirect(url_for('root_path'))
@app.route('/dashboard')
@login_required
def dashboard():
return redirect(url_for('root_path'))
################
# - App Core - Standard Routes
@app.route('/logout')
def logout():
logged_in = current_user.is_authenticated()
logout_user()
flash(u'Logged out. Thank you, come again!', 'success')
return redirect(url_for('welcome'))
@app.route('/login', methods=['GET', 'POST'])
def login():
logged_in = current_user.is_authenticated()
errors = []
login_form = LoginForm(request.form)
register_form = RegisterForm()
if request.method == 'POST':
if login_form.validate_on_submit():
user = User.query.filter_by(username=request.form['username']).first()
# - This one is for bcrypt. Right now I'm using PBKDF2 a la Werkeug Security.
# if user is not None and bcrypt.check_password_hash(user.password, request.form['password']):
if user is not None and user.check_password(request.form['password']):
login_user(user)
flash(u'Logged in. Welcome back!', 'success')
return redirect(url_for('index'))
else:
errors.append('Login failed. Please check that your credentials are correct, and try again.')
user = User.query.filter_by(username=request.form['username']).first()
for error in errors:
flash(error, 'danger')
else:
flash('Login failed. Please make sure to to fill out all fields before submitting.', 'danger')
return render_template('core_modules/login/index.html',
icon="fa fa-dashboard",
module_abbreviation="Home",
module_name="Just-a-Dash Control Panel",
page_name="Login",
app_config_settings=get_app_settings(),
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
login_form=login_form,
register_form=register_form,
current_user=current_user,
logged_in=logged_in)
@app.route('/register', methods=['GET', 'POST'])
def register():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
register_form = RegisterForm()
if request.method == 'POST':
if register_form.validate_on_submit():
new_user = User(
username=register_form.username.data,
email=register_form.email.data,
password=register_form.password.data,
admin_role='None',
oms_role='None',
crm_role='None',
hrm_role='None',
ams_role='None',
mms_role='None')
db.session.add(new_user)
db.session.commit()
login_user(new_user)
flash('Registration complete! You have been logged in.', 'success')
flash(Markup('<strong>Info! -</strong> '), 'info')
return redirect(url_for('index'))
else:
flash('Registration failed, please try again.', 'warning')
flash('When registering, ensure the following conditions are all met. (1) Username is between 3-25 '
'characters, (2) E-mail is between 6-40 characters, (3) Password is beteen 6-25 characters, '
'(4) Password and confirm password are matching.', 'info')
return render_template('core_modules/register/index.html',
icon="fa fa-pencil-square-o",
module_abbreviation="Registration",
module_name="Registration",
page_name="New Submission",
app_config_settings=get_app_settings(),
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
login_form=login_form,
register_form=register_form,
current_user=current_user,
logged_in=logged_in)
@app.route('/upload', methods=['POST'])
def upload():
f = request.files['data_file'].read().decode('utf-8')
# - debugging
# file_contents = codecs.open(file_contents, "r", encoding='utf-8', errors='ignore')
# f = codecs.open(request.files['data_file'], "r", encoding='utf-8', errors='ignore')
# f = codecs.decode(request.files['data_file'], 'utf-8', 'ignore')
if not f:
flash("Error. File upload attempt detected, but no file found. Please contact the application administrator.",
'danger')
# To do: Get the conditional below to work, and remove the placeholder 'if True'.
# if type(f) == '.csv':
if True:
f = csv2json_conversion(f)
import_data = Import_Data(f)
data_context = request.form['form_submit']
valid_schema = validate_columns(import_data, data_context)
if valid_schema == True:
validated_data = validate_import(current_user, import_data, data_context)
if validated_data:
add_to_db(validated_data, data_context)
else:
flash('Error. Incorrect file type. The only file types accepted are: .csv', 'danger')
return redirect(request.referrer)
################
# - App Core - Personal Routes
@app.route('/account-settings', methods=['GET', 'POST'])
@login_required
def account_settings():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
settings_form = UserUpdateForm(request.form)
generic_error = 'Error. Data appears to have been posted to the server, but could not determine type of form ' \
'submission. Please contact the application administrator.'
if request.method == 'POST':
if request.form['form_submit']:
if request.form['form_submit'] == 'AccountSettingsForm':
authority = False
if int(current_user.id) == int(request.form['user_id']):
authority = True
if authority == True:
if settings_form.validate_on_submit():
update_self(settings_form)
else:
flash(record_add_error, 'danger')
else:
flash(generic_error, 'danger')
else:
flash(generic_error, 'danger')
else:
flash(generic_error, 'danger')
# return redirect((url_for('account_settings')))
return render_template('core_modules/account_settings/index.html',
icon="fa fa-dashboard",
module_abbreviation="Account Settings",
module_name="Account Settings",
page_name="Account Settings Home",
app_config_settings=get_app_settings(),
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
settings_form=settings_form,
login_form=login_form,
current_user=current_user,
logged_in=logged_in)
@app.route('/profile')
@login_required
def profile():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
return render_template('core_modules/profile/index.html',
icon="fa fa-dashboard",
module_abbreviation="Profile",
module_name="Profile",
page_name="Profile Home",
app_config_settings=get_app_settings(),
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
profile_form=UserUpdateForm(request.form),
login_form=login_form,
current_user=current_user,
logged_in=logged_in)
@app.route('/notifications')
@login_required
def notifications():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
return render_template('core_modules/profile/notifications.html',
icon="fa fa-dashboard",
module_abbreviation="Profile",
module_name="Profile",
page_name="Notifications",
app_config_settings=get_app_settings(),
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
login_form=login_form,
current_user=current_user,
logged_in=logged_in)
@app.route('/tasks')
@login_required
def tasks():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
return render_template('core_modules/profile/tasks.html',
icon="fa fa-dashboard",
module_abbreviation="Profile",
module_name="Profile",
page_name="Tasks",
app_config_settings=get_app_settings(),
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
login_form=login_form,
current_user=current_user,
logged_in=logged_in)
################
# - App Core - Administrative Routes
@app.route('/config', methods=['GET', 'POST'])
@app.route('/app-config', methods=['GET', 'POST'])
@app.route('/app-settings', methods=['GET', 'POST'])
@login_required
@app_super_admin_required
def app_settings():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
names_and_aesthetics_form = Config_Names_and_Aesthetics(request.form)
secret_key_form = Config_Secret_Key(request.form)
modules_form = Config_Modules(request.form)
setting_values = {'App Name': get_app_settings('App Name'),
'App Icon': get_app_settings('App Icon'),
'App Title': get_app_settings('App Title'),
'App Short-Title': get_app_settings('App Short-Title'),
'Secret Key': get_app_settings('Secret Key'),
'Toggle Placeholders': get_app_settings('Toggle Placeholders')}
forms = {'Naming-and-Aesthetics-Form': names_and_aesthetics_form,
'Secret-Key-Form': secret_key_form,
'Modules-Form': modules_form}
# - Note: Will refactor to return 'authority = True' if the current_user is a super_admin. Right now the
# App Settings page is simply inaccessible to non-super_admins.
if request.method == 'POST':
if request.form['form_submit']:
if request.form['form_submit'] == 'Config_Names-and-Aesthetics-Form':
authority = True
if authority == True:
if names_and_aesthetics_form.validate_on_submit():
update_names_and_aesthetics(current_user, names_and_aesthetics_form)
else:
flash(record_update_error, 'danger')
elif request.form['form_submit'] == 'Config_Secret-Key-Form':
authority = True
if authority == True:
if secret_key_form.validate_on_submit():
update_secret_key(current_user, secret_key_form)
else:
flash(record_update_error, 'danger')
# - Note: This form is currently hidden until the feature addition is complete.
elif request.form['form_submit'] == 'Config_Modules-Form':
authority = True
if authority == True:
if modules_form.validate_on_submit():
update_modules(modules_form)
else:
flash(record_update_error, 'danger')
else:
flash('An error occurred while processing the submitted form. Please correct the errors in your form '
'submission. If you feel this message is in error, please contact the application administrator.',
'danger')
else:
flash('Error. Data appears to have been posted to the server, but could not determine type of form'
' submission. Please contact the application administrator.', 'danger')
return redirect((url_for('app_settings')))
return render_template('core_modules/app_settings/index.html',
icon="fa fa-dashboard",
module_abbreviation="App Settings",
module_name="App Settings",
page_name="App Settings Home",
app_config_settings=get_app_settings(),
setting_values=setting_values,
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
login_form=login_form,
current_user=current_user,
logged_in=logged_in,
forms=forms)
@app.route('/user-management', methods=['GET', 'POST'])
@login_required
@app_basic_admin_required
def user_management():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
modals = {'UserAddModal': user_add_modal, 'UserUpdateModal': user_update_modal}
# To do: Need to fix this so that my forms are able to create fields dynamically based on database values.
# The code below doesn't seem to break app, but does not seem to have an effect.
add_form = UserAddForm(request.form)
update_form = UserUpdateForm(request.form)
delete_form = UserDeleteForm(request.form)
# db_populate_object = namedtuple('literal', 'name age')(**{'name': '<NAME>', 'age': 23})
# add_form.append_field("test", SelectField('test'))(obj=db_populate_object)
forms = {'User-Add-Form': add_form,
'User-Update-Form': update_form,
'User-Delete-Form': delete_form}
if request.method == 'POST':
if request.form['form_submit']:
if request.form['form_submit'] == 'User-Add-Form':
authority = check_permissions_to_assign_user_role(add_form, current_user)
if authority == True:
if add_form.validate_on_submit():
add_user(add_form)
else:
flash(record_add_error,'danger')
elif request.form['form_submit'] == 'User-Delete-Form':
superiority = check_permissions_to_delete_user(delete_form, current_user)
if superiority == False:
flash('Failed to delete user. Your administrative role must to be higher than another\'s in order to delete.','danger')
elif superiority == True:
if delete_form.validate_on_submit():
delete_user(update_form)
else:
flash(record_delete_error, 'danger')
else:
flash('One or more errors occurred while attempting to determine user permissions. Please contact the application administrator.', 'danger')
elif request.form['form_submit'] == 'User-Update-Form':
authority = check_permissions_to_assign_user_role(update_form, current_user)
if authority == True:
role_superiorities = check_permissions_to_update_user(update_form, current_user)
if update_form.validate_on_submit():
update_user(update_form, role_superiorities)
else:
flash(record_update_error, 'danger')
else:
flash('An error occurred while processing the submitted form. Please correct the errors in your form '
'submission. If you feel this message is in error, please contact the application administrator.',
'danger')
else:
flash('Error. Data appears to have been posted to the server, but could not determine type of form '
'submission. Please contact the application administrator.', 'danger')
return redirect((url_for('user_management')))
return render_template('core_modules/app_settings/user_management.html',
icon="fa fa-dashboard",
module_abbreviation="App Settings",
module_name="App Settings",
page_name="User Management",
app_config_settings=get_app_settings(),
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
users=db.session.query(User),
login_form=login_form,
current_user=current_user,
logged_in=logged_in,
modals=modals,
forms=forms,
csv_upload_modal=user_csv_upload_modal,
upload_columns=get_upload_columns(User))
@app.route('/module-settings', methods=['GET', 'POST'])
@login_required
@app_super_admin_required
def module_settings():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
oms_settings_form = OMS_Settings(request.form)
oms_setting_values = {'Twilio Account SID': get_oms_settings('Twilio Account SID'),
'Twilio Auth Token': get_oms_settings('Twilio Auth Token'),
'Twilio Phone Number': get_oms_settings('Twilio Phone Number'),
'Call Response MP3': get_oms_settings('Call Response MP3'),
'Call Response MP3 Toggle': get_oms_settings('Call Response MP3 Toggle'),
'Call Response Text-to-Speech': get_oms_settings('Call Response Text-to-Speech'),
'Call Response Text-to-Speech Toggle': get_oms_settings('Call Response Text-to-Speech Toggle'),
'Phone Number Visibility': get_oms_settings('Phone Number Visibility')}
forms = {'OMS-Settings-Form': oms_settings_form}
# - Note: Will refactor to return 'authority = True' if the current_user is a super_admin. Right this page is simply
# inaccessible to non-super_admins.
if request.method == 'POST':
if request.form['form_submit']:
if request.form['form_submit'] == 'OMS-Settings-Form':
authority = True
if authority == True:
if oms_settings_form.validate_on_submit():
update_oms_settings(current_user, oms_settings_form)
else:
flash(record_update_error, 'danger')
else:
flash('An error occurred while processing the submitted form. Please correct the errors in your form '
'submission. If you feel this message is in error, please contact the application administrator.',
'danger')
else:
flash('Error. Data appears to have been posted to the server, but could not determine type of form'
' submission. Please contact the application administrator.', 'danger')
return redirect((url_for('module_settings')))
return render_template('core_modules/app_settings/module_settings.html',
icon="fa fa-dashboard",
module_abbreviation="App Settings",
module_name="App Settings",
page_name="Module Settings",
app_config_settings=get_app_settings(),
oms_setting_values=oms_setting_values,
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
login_form=login_form,
current_user=current_user,
logged_in=logged_in,
forms=forms)
@app.route('/gallery', methods=['GET', 'POST'])
@login_required
@app_basic_admin_required
def gallery():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
shared_local_scripts = ('js/app.includes.js', 'js/components/gallery/galleryController.js')
return render_template('core_modules/app_settings/gallery.html',
icon="fa fa-picture-o",
module_abbreviation="App Settings",
module_name="App Settings",
page_name="Image Gallery",
app_config_settings=get_app_settings(),
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
login_form=login_form,
current_user=current_user,
logged_in=logged_in,
shared_local_scripts=shared_local_scripts)
############
# - Modules - OMS
@app.route('/oms-home')
@login_required
@oms_basic_admin_required
def oms_home():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
return render_template('modules/operations/home.html',
icon="fa fa-dashboard",
module_abbreviation="OMS",
module_name="Operations Management",
page_name="OMS Home",
app_config_settings=get_app_settings(),
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
profile_form=UserUpdateForm(request.form),
login_form=login_form,
current_user=current_user,
logged_in=logged_in)
@app.route('/operations')
@login_required
@oms_basic_admin_required
def operations(*args):
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
render_settings = {'Phone Number Visibility': OmsConfig.query.filter_by(key='Phone Number Visibility').first().value.lower(),
'Twilio Phone Number': OmsConfig.query.filter_by(key='Twilio Phone Number').first().value}
try:
check_in_type = args[0]
except:
check_in_type = None
try:
# Determine what kind of check-in is being executed.
if check_in_type == "sms_check_in":
check_in_entries = sms_check_in_data()
elif check_in_type == "call_check_in":
check_in_entries = call_check_in_data()
elif check_in_type == None:
check_in_entries = call_check_in_data()
else:
check_in_entries = {".": {"timestamp": ".", "first_name": ".", "last_name": ".", "phone_number": "."}}
# Check for errors.
critical_settings = ('Twilio Phone Number', 'Twilio Auth Token', 'Twilio Account SID')
critical_settings_errors = []
for setting in critical_settings:
if OmsConfig.query.filter_by(key=setting).first().value == '':
critical_settings_errors.append(setting)
elif not OmsConfig.query.filter_by(key=setting).first().value:
critical_settings_errors.append(setting)
if critical_settings_errors != []:
error_message = Markup('One or more errors occurred related to check-in submodule settings. The following '
'setting(s) have not yet been configured, and may cause this submodule to behave '
'incorrectly: {}'.format(make_string_list(critical_settings_errors)) +
'. Please have the master user update module settings, then have the server '
'administrator reset the server to apply settings.')
flash(error_message, 'danger')
return render_template('modules/operations/index.html',
icon="fa fa-fort-awesome",
module_abbreviation="OMS",
module_name="Operations Management",
page_name="OMS Home",
app_config_settings=get_app_settings(),
check_in_entries=check_in_entries,
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
login_form=login_form,
current_user=current_user,
logged_in=logged_in,
render_settings=render_settings)
except:
flash('Attempted to load check-in submodule, but an error occurred. Module settings may not be configured '
'correctly. Please have the master user update module settings, then have the server administrator reset '
'the server to apply settings.', 'danger')
return redirect(url_for('root_path'))
@app.route('/checkin')
@app.route('/check-in')
@app.route('/callin')
@app.route('/call-in')
@login_required
@oms_basic_admin_required
def call_check_in():
return operations("call_check_in")
@app.route('/textin')
@app.route('/text-in')
@app.route('/text-checkin')
@app.route('/sms-checkin')
@login_required
@oms_basic_admin_required
def sms_check_in():
return operations("sms_check_in")
@app.route('/oms-settings')
@login_required
@oms_super_admin_required
def oms_settings():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
oms_settings_form = OMS_Settings(request.form)
oms_setting_values = {'Twilio Account SID': get_oms_settings('Twilio Account SID'),
'Twilio Auth Token': get_oms_settings('Twilio Auth Token'),
'Twilio Phone Number': get_oms_settings('Twilio Phone Number'),
'Call Response MP3': get_oms_settings('Call Response MP3'),
'Call Response MP3 Toggle': get_oms_settings('Call Response MP3 Toggle'),
'Call Response Text-to-Speech': get_oms_settings('Call Response Text-to-Speech'),
'Call Response Text-to-Speech Toggle': get_oms_settings('Call Response Text-to-Speech Toggle'),
'Phone Number Visibility': get_oms_settings('Phone Number Visibility')}
forms = {'OMS-Settings-Form': oms_settings_form}
return render_template('modules/operations/settings.html',
icon="fa fa-dashboard",
module_abbreviation="OMS",
module_name="Operations Management",
page_name="OMS Settings",
app_config_settings=get_app_settings(),
oms_setting_values=oms_setting_values,
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
profile_form=UserUpdateForm(request.form),
login_form=login_form,
current_user=current_user,
logged_in=logged_in,
forms=forms)
# - OMS Services
@app.route('/sms')
@app.route('/sms_send')
@app.route('/sms_receive')
def sms():
return sms_response()
@app.route('/call', methods=['GET', 'POST'])
@app.route('/calls', methods=['GET', 'POST'])
@app.route('/call_send', methods=['GET', 'POST'])
@app.route('/call_receive', methods=['GET', 'POST'])
def call():
return call_response()
############
# - Modules - CRM
@app.route('/crm-home')
@login_required
@crm_basic_admin_required
def crm_home():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
return render_template('modules/crm/home.html',
icon="fa fa-dashboard",
module_abbreviation="CRM",
module_name="Customer Relationship Management",
page_name="CRM Home",
app_config_settings=get_app_settings(),
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
profile_form=UserUpdateForm(request.form),
login_form=login_form,
current_user=current_user,
logged_in=logged_in)
@app.route('/crm', methods=['GET', 'POST'])
@login_required
@crm_basic_admin_required
# def crm(*kwargs):
def crm():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
modals = {'CustomerAddModal': customer_add_modal, 'CustomerUpdateModal': customer_update_modal}
add_form = CustomerAddForm(request.form)
update_form = CustomerUpdateForm(request.form)
delete_form = CustomerDeleteForm(request.form)
forms = {'Customer-Add-Form': add_form,
'Customer-Update-Form': update_form,
'Customer-Delete-Form': delete_form}
shared_local_scripts = ['js/components/crm/crmController.js', ]
modular_local_styles = []
try:
customers = db.session.query(Customers)
# customers = CompanyContacts.get_customer_contacts()
except:
customers = {"-": {"timestamp": "-", "first_name": "-", "last_name": "-", "phone_number": "-"}}
if request.method == 'POST':
if request.form['form_submit']:
if request.form['form_submit'] == 'Customer-Add-Form':
# authority = check_permissions_to_add_customer(add_form, current_user)
authority = True
if authority == True:
if add_form.validate_on_submit():
add_customer(add_form)
else:
flash(record_add_error, 'danger')
elif request.form['form_submit'] == 'Customer-Delete-Form':
# authority = check_permissions_to_delete_customer(add_form, current_user)
authority = True
if authority == False:
flash('Failed to delete customer. You do not have sufficient permissions.', 'danger')
elif authority == True:
if delete_form.validate_on_submit():
delete_customer(update_form)
else:
flash(record_delete_error, 'danger')
else:
flash('One or more errors occurred while attempting to determine user permissions. Please contact '
'the application administrator.', 'danger')
elif request.form['form_submit'] == 'Customer-Update-Form':
# authority = check_permissions_to_assign_update_customer(update_form, current_user)
authority = True
if authority == True:
if update_form.validate_on_submit():
update_customer(update_form)
else:
flash(record_update_error, 'danger')
else:
flash('An error occurred while processing the submitted form. Please correct the errors in your form '
'submission. If you feel this message is in error, please contact the application administrator.',
'danger')
else:
flash('Error. Data appears to have been posted to the server, but could not determine type of form '
'submission. Please contact the application administrator.', 'danger')
return redirect((url_for('crm')))
if request.path == '/crm/development':
url = 'modules/crm/crm-development.html'
module_name = Markup('CRM Development')
# shared_local_scripts.append('js/components/crm/development/crm.dev.module.js')
# modular_local_styles.append('css/crm/development/crm.dev.css')
elif request.path == '/crm/development2':
url = 'modules/crm/crm-development2.html'
module_name = Markup('CRM Development #2')
shared_local_scripts.append('js/components/crm/development/crm.dev.module.js')
modular_local_styles.append('css/crm/development/crm.dev.css')
elif request.path == '/crm/development3':
url = 'modules/crm/crm-development3.html'
module_name = Markup('CRM Development #3')
shared_local_scripts.append('_dependencies/bower_components/angular-modal-service/angular-modal-service.min.js')
# shared_local_scripts.append('_dependencies/bower_components/angular-modal-service/angular-modal-service.js')
shared_local_scripts.append('js/components/crm/development3/crm.dev.modalController.js')
else:
url = 'modules/crm/crm-development.html'
module_name = 'Customer Relationship Management'
# - Note: Below is the more stable CRM template. Switch back to it if any issues.
# else:
# url ='modules/crm/index.html'
# module_name = 'Customer Relationship Management'
return render_template(url,
icon="ion-person-stalker",
module_abbreviation="CRM",
module_name=module_name,
page_name="CRM Home",
form_title="Customer",
app_config_settings=get_app_settings(),
customers=customers,
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
login_form=login_form,
current_user=current_user,
logged_in=logged_in,
modals=modals,
forms=forms,
csv_upload_modal=customer_csv_upload_modal,
upload_columns=get_upload_columns(Customers),
data_sections=Customers.data_sections,
data_tree=Customers.data_tree,
shared_local_scripts=shared_local_scripts,
modular_local_styles=modular_local_styles)
@app.route('/crm/development', methods=['GET', 'POST'])
@login_required
@crm_basic_admin_required
def crm_development():
return crm()
@app.route('/crm/development2', methods=['GET', 'POST'])
@login_required
@crm_basic_admin_required
def crm_development2():
return crm()
@app.route('/crm/development3', methods=['GET', 'POST'])
@login_required
@crm_basic_admin_required
def crm_development3():
return crm()
@app.route('/customers/contacts', methods=['GET', 'POST'])
@login_required
@crm_basic_admin_required
def customer_contacts():
return redirect(url_for('crm'))
@app.route('/customers/identifiers', methods=['GET', 'POST'])
@login_required
@crm_basic_admin_required
def customer_identifiers():
return redirect(url_for('crm'))
@app.route('/customers/services-and-authorizations', methods=['GET', 'POST'])
@login_required
@crm_basic_admin_required
def customer_services_and_authorizations():
return redirect(url_for('crm'))
@app.route('/customers/billing-info', methods=['GET', 'POST'])
@login_required
@crm_basic_admin_required
def customer_billing_info():
return redirect(url_for('crm'))
@app.route('/customers/case-notes', methods=['GET', 'POST'])
@login_required
@crm_basic_admin_required
def customer_case_notes():
return redirect(url_for('crm'))
@app.route('/customers/relationships', methods=['GET', 'POST'])
@login_required
@crm_basic_admin_required
def customer_relationships():
return redirect(url_for('crm'))
@app.route('/customers/other', methods=['GET', 'POST'])
@login_required
@crm_basic_admin_required
def customer_other():
return redirect(url_for('crm'))
@app.route('/crm-settings')
@login_required
@crm_super_admin_required
def crm_settings():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
return render_template('modules/crm/settings.html',
icon="fa fa-dashboard",
module_abbreviation="CRM",
module_name="Customer Relationship Management",
page_name="CRM Settings",
app_config_settings=get_app_settings(),
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
profile_form=UserUpdateForm(request.form),
login_form=login_form,
current_user=current_user,
logged_in=logged_in)
############
# - Modules - HRM
@app.route('/hrm-home')
@login_required
@hrm_basic_admin_required
def hrm_home():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
return render_template('modules/hrm/home.html',
icon="fa fa-dashboard",
module_abbreviation="HRM",
module_name="Human Resource Management",
page_name="HRM Home",
app_config_settings=get_app_settings(),
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
profile_form=UserUpdateForm(request.form),
login_form=login_form,
current_user=current_user,
logged_in=logged_in)
@app.route('/hr', methods=['GET', 'POST'])
@app.route('/hrm', methods=['GET', 'POST'])
@login_required
@hrm_basic_admin_required
def hrm():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
modals = {'PersonnelAddModal': personnel_add_modal, 'PersonnelUpdateModal': personnel_update_modal}
add_form = PersonnelAddForm(request.form)
update_form = PersonnelUpdateForm(request.form)
delete_form = PersonelDeleteForm(request.form)
forms = {'Personnel-Add-Form': add_form,
'Personnel-Update-Form': update_form,
'Personnel-Delete-Form': delete_form}
try:
personnel = db.session.query(Personnel)
# personnel = CompanyContacts.get_contacts()
except:
personnel = {"-": {"timestamp": "-", "first_name": "-", "last_name": "-", "phone_number": "-"}}
if request.method == 'POST':
if request.form['form_submit']:
if request.form['form_submit'] == 'Personnel-Add-Form':
# authority = check_permissions_to_add_personnel(add_form, current_user)
authority = True
#DEBUGGING
# new_form = {}
# flash(type(add_form))
# flash(add_form)
# for key, val in add_form.data.items():
# if len(val) > 0:
# new_form[key] = val
# new_form = PersonnelAddForm(new_form)
# flash(new_form)
if authority == True:
#Debugging
if add_form.validate_on_submit():
add_personnel(add_form)
else:
flash(record_add_error, 'danger')
elif request.form['form_submit'] == 'Personnel-Delete-Form':
# authority = check_permissions_to_delete_personnel(add_form, current_user)
authority = True
if authority == False:
flash( 'Failed to delete personnel. You do not have sufficient permissions.', 'danger')
elif authority == True:
if delete_form.validate_on_submit():
delete_personnel(update_form)
else:
flash(record_delete_error, 'danger')
else:
flash( 'One or more errors occurred while attempting to determine user permissions. '
'Please contact the application administrator.', 'danger')
elif request.form['form_submit'] == 'Personnel-Update-Form':
# authority = check_permissions_to_assign_update_personnel(update_form, current_user)
authority = True
if authority == True:
if update_form.validate_on_submit():
update_personnel(update_form)
else:
flash(record_update_error, 'danger')
else:
flash('An error occurred while processing the submitted form. Please correct the errors in your form '
'submission. If you feel this message is in error, please contact the application administrator.',
'danger')
else:
flash('Error. Data appears to have been posted to the server, but could not determine type of form '
'submission. Please contact the application administrator.', 'danger')
return redirect((url_for('hrm')))
return render_template('modules/hrm/index.html',
icon="fa fa-users",
module_abbreviation="HRM",
module_name="Human Resource Management",
page_name="HRM Home",
form_title="Personnel",
app_config_settings=get_app_settings(),
personnel=personnel,
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
login_form=login_form,
current_user=current_user,
logged_in=logged_in,
modals=modals,
forms=forms,
csv_upload_modal=personnel_csv_upload_modal,
upload_columns=get_upload_columns(Personnel))
@app.route('/hrm-settings')
@login_required
@hrm_super_admin_required
def hrm_settings():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
return render_template('modules/hrm/settings.html',
icon="fa fa-dashboard",
module_abbreviation="HRM",
module_name="Human Resources Management",
page_name="HRM Settings",
app_config_settings=get_app_settings(),
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
profile_form=UserUpdateForm(request.form),
login_form=login_form,
current_user=current_user,
logged_in=logged_in)
############
# - Modules - AMS
@app.route('/bms')
@app.route('/billing')
@app.route('/ams')
@app.route('/accounting')
@login_required
@ams_basic_admin_required
def accounting():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
return render_template('modules/accounting/index.html',
icon="fa fa-bar-chart",
module_abbreviation="AMS",
module_name="Accounting Management",
page_name="AMS Home",
app_config_settings=get_app_settings(),
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
login_form=login_form,
current_user=current_user,
logged_in=logged_in)
@app.route('/ams-settings')
@login_required
@ams_super_admin_required
def ams_settings():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
return render_template('modules/accounting/settings.html',
icon="fa fa-dashboard",
module_abbreviation="AMS",
module_name="Accounting Management",
page_name="AMS Settings",
app_config_settings=get_app_settings(),
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
profile_form=UserUpdateForm(request.form),
login_form=login_form,
current_user=current_user,
logged_in=logged_in)
############
# - Modules - MMS
@app.route('/mms', methods=['GET', 'POST'])
@app.route('/marketing', methods=['GET', 'POST'])
@login_required
@mms_basic_admin_required
def marketing():
logged_in = current_user.is_authenticated()
errors = []
results = {}
login_form = LoginForm(request.form)
if request.method == "POST":
try:
url = request.form['url']
# See if URL submitted contains 'http://' prepended.
if url.find("http://") == 0:
# r = requests.get(url).text.encode("utf-8")
# r = requests.get(url).text
r = requests.get(url)
# print(r)
else:
url = "http://" + url
r = requests.get(url)
except:
errors.append('Unable to get URL. Please make sure it\'s valid and try again.')
return render_template('modules/marketing/index.html',
icon="fa fa-line-chart",
module_abbreviation="MMS",
module_name="Marketing Management",
page_name="MMS Home",
app_config_settings=get_app_settings(),
errors=errors,
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
login_form=login_form,
current_user=current_user,
logged_in=logged_in)
if r:
# text processing
raw = BeautifulSoup(r.text).get_text()
nltk.data.path.append('./nltk_data/') # set the path
tokens = nltk.word_tokenize(raw)
text = nltk.Text(tokens)
# remove punctuation, count raw words
nonPunct = re.compile('.*[A-Za-z].*')
raw_words = [w for w in text if nonPunct.match(w)]
raw_word_count = Counter(raw_words)
# stop words
no_stop_words = [w for w in raw_words if w.lower() not in stops]
no_stop_words_count = Counter(no_stop_words)
# save the results
results = sorted(
no_stop_words_count.items(),
key=operator.itemgetter(1),
reverse=True
)[0:10]
try:
result = Result(
url=url,
result_all=raw_word_count,
result_no_stop_words=no_stop_words_count)
db.session.add(result)
db.session.commit()
except:
errors.append("Unable to add item to database.")
return render_template('modules/marketing/index.html',
icon="fa fa-line-chart",
module_abbreviation="MMS",
module_name="Marketing Management",
page_name="MMS Home",
app_config_settings=get_app_settings(),
errors=errors,
results=results,
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
login_form=login_form,
current_user=current_user,
logged_in=logged_in)
@app.route('/mms-settings')
@login_required
@mms_super_admin_required
def mms_settings():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
return render_template('modules/marketing/settings.html',
icon="fa fa-dashboard",
module_abbreviation="MMS",
module_name="Marketing Management",
page_name="MMS Settings",
app_config_settings=get_app_settings(),
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
profile_form=UserUpdateForm(request.form),
login_form=login_form,
current_user=current_user,
logged_in=logged_in)
@app.route('/cool-analytics')
# @login_required
def cool_analytics():
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
modular_cdn_scripts = ('https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.1.4/Chart.bundle.min.js', )
# shared_local_scripts = ('js/Chart.js/dist/chart.bundle.min.js', 'js/require.js/require.min.js')
modular_local_scripts = ('js/components/analytics/line-legend.js', )
testy = ('js/components/analytics/line-legend.js', '')
# modular_local_scripts = ('templates/modules/analytics/static/js/line-legend.js', 'static/js/line-legend.js',
# 'line-legend.js', 'templates/test.js', 'test.js')
return render_template('modules/analytics/index.html',
icon="fa fa-dashboard",
module_abbreviation="Dashboard",
module_name='Dashboard',
page_name="Cool Analytics",
app_config_settings=get_app_settings(),
# messages='',
# notifications='',
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
login_form=login_form,
current_user=current_user,
logged_in=logged_in,
modular_cdn_scripts=modular_cdn_scripts,
# modular_cdn_scripts=('https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.1.4/Chart.bundle.min.js', 'http://kjdfkljl.com'),
modular_local_scripts=modular_local_scripts,
testy=testy)
class Violations(db.Model):
__tablename__ = 'violations'
db_columns = {
'violation_id': {'required': True, 'validators': 'string', 'validator_parameters': {'max': 25}},
'inspection_id': {'required': False, 'validators': 'string', 'validator_parameters': {'max': 25}},
'violation_category': {'required': False, 'validators': 'string', 'validator_parameters': {'max': 25}},
'violation_date': {'required': False, 'validators': 'string', 'validator_parameters': {'max': 25}},
'violation_date_closed': {'required': False, 'validators': 'string', 'validator_parameters': {'max': 25}},
'violation_type': {'required': False, 'validators': 'string', 'validator_parameters': {'max': 25}},
}
violation_id = db.Column(db.Integer, primary_key=True)
inspection_id = db.Column(db.Integer, index=True)
violation_category = db.Column(db.String(100), index=True)
violation_date = db.Column(db.String(100), index=True)
violation_date_closed = db.Column(db.String(100), index=True)
violation_type = db.Column(db.String(100), index=True)
created_on = db.Column(db.DateTime, default=db.func.now(), index=True)
updated_on = db.Column(db.DateTime, default=db.func.now(), onupdate=db.func.now(), index=True)
def __init__(self, violation_id, inspection_id, violation_category, violation_date, violation_date_closed,
violation_type):
self.violation_id = violation_id
self.inspection_id = inspection_id
self.violation_category = violation_category
self.violation_date = violation_date
self.violation_date_closed = violation_date_closed
self.violation_type = violation_type
def __repr__(self):
return '<id: {}>'.format(self.violation_id)
# @app.template_filter('datetime')
# def format_datetime(value, format='medium'):
# # https://pythonhosted.org/Flask-Babel/
# import babel
# if format == 'full':
# format = "EEEE, d. MMMM y 'at' HH:mm"
# elif format == 'medium':
# format = "EE dd.MM.y HH:mm"
#
# return babel.dates.format_datetime(value, format)
@app.route('/violations')
# @login_required
def violations():
import datetime
logged_in = current_user.is_authenticated()
login_form = LoginForm(request.form)
# modular_cdn_scripts = ('https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.1.4/Chart.bundle.min.js', )
modular_cdn_scripts = ('//cdnjs.cloudflare.com/ajax/libs/Chart.js/2.1.4/Chart.bundle.min.js',)
# modular_local_scripts = ('js/components/analytics/violations-pie-chart.js', )
# modular_local_styles = ('css/analytics/violations.css', )
violations_data = db.session.query(Violations)
categories = []
category_data = {}
for row in violations_data:
if row.violation_category not in categories:
categories.append(row.violation_category)
for category in categories:
category_data[category] = {
'total_violations': 0,
'earliest_violation': '',
'latest_violation': ''
}
for category in categories:
for row in violations_data:
if row.violation_category == category:
# Get total violations.
category_data[category]['total_violations'] += 1
# Get earliest violation.
if category_data[category]['earliest_violation'] == '':
category_data[category]['earliest_violation'] = datetime.datetime.strptime(row.violation_date,
'%Y-%m-%d %H:%M:%S')
else:
if datetime.datetime.strptime(row.violation_date,
'%Y-%m-%d %H:%M:%S')< category_data[category]['earliest_violation']:
category_data[category]['earliest_violation'] = datetime.datetime.strptime(row.violation_date,
'%Y-%m-%d %H:%M:%S')
# Get latest violation.
if category_data[category]['latest_violation'] == '':
category_data[category]['latest_violation'] = datetime.datetime.strptime(row.violation_date,
'%Y-%m-%d %H:%M:%S')
else:
if datetime.datetime.strptime(row.violation_date,
'%Y-%m-%d %H:%M:%S') > category_data[category]['latest_violation']:
category_data[category]['latest_violation'] = datetime.datetime.strptime(row.violation_date,
'%Y-%m-%d %H:%M:%S')
from collections import OrderedDict
category_data = OrderedDict(sorted(category_data.items()))
return render_template('modules/analytics/violations.html',
icon="fa fa-dashboard",
module_abbreviation="Dashboard",
module_name='Dashboard',
page_name="Cool Building Violations Analytics",
app_config_settings=get_app_settings(),
# messages='',
# notifications='',
messages=db.session.query(Messages),
notifications=db.session.query(AppNotifications),
login_form=login_form,
current_user=current_user,
logged_in=logged_in,
modular_cdn_scripts=modular_cdn_scripts,
# modular_cdn_scripts=('https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.1.4/Chart.bundle.min.js', 'http://kjdfkljl.com'),
# modular_local_scripts=modular_local_scripts,
# modular_local_styles=modular_local_styles,
csv_upload_modal=violations_csv_upload_modal,
upload_columns=get_upload_columns(Violations),
violations_data=violations_data,
categories=categories,
category_data=category_data)
if __name__ == "__main__":
print("## Running Just-a-Dash routes.py directly. ##")
```
#### File: migrations/versions/0791be4065b7_.py
```python
revision = '0791be4065b7'
down_revision = '<KEY>7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('ams_role', sa.String(length=20), nullable=True))
op.add_column('user', sa.Column('crm_role', sa.String(length=20), nullable=True))
op.add_column('user', sa.Column('hrm_role', sa.String(length=20), nullable=True))
op.add_column('user', sa.Column('mms_role', sa.String(length=20), nullable=True))
op.add_column('user', sa.Column('oms_role', sa.String(length=20), nullable=True))
op.drop_column('user', 'group_roles')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('group_roles', sa.VARCHAR(length=500), autoincrement=False, nullable=True))
op.drop_column('user', 'oms_role')
op.drop_column('user', 'mms_role')
op.drop_column('user', 'hrm_role')
op.drop_column('user', 'crm_role')
op.drop_column('user', 'ams_role')
### end Alembic commands ###
```
#### File: migrations/versions/b6b8280dfd06_.py
```python
revision = '<KEY>'
down_revision = 'b4a25<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('messages_user_id_fkey', 'messages', type_='foreignkey')
op.drop_column('messages', 'user_id')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('messages', sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('messages_user_id_fkey', 'messages', 'user', ['user_id'], ['id'])
### end Alembic commands ###
``` |
{
"source": "joeflack4/knowledge-graph-change-language",
"score": 2
} |
#### File: knowledge-graph-change-language/parser/kgcl_2_sparql.py
```python
import re
from kgcl import (
NodeRename,
NodeObsoletion,
NodeUnobsoletion,
NodeDeletion,
NodeMove,
NodeDeepening,
NodeShallowing,
EdgeCreation,
EdgeDeletion,
PredicateChange,
NodeCreation,
ClassCreation,
NewSynonym,
RemovedNodeFromSubset,
)
def is_label(input):
return re.match(r"\'[^ \s\'].*\'", input)
def is_id(input):
return re.match(r"<\S+>", input)
def convert(kgclInstance):
# label renaming
# TODO: case for "rename 'old' from 'id' to 'new'
if type(kgclInstance) is NodeRename:
if is_label(kgclInstance.old_value) and is_label(kgclInstance.new_value):
return rename(kgclInstance)
# TODO: error handling
# node obsoletion
# TODO: new model only allows to obsolete a node
if type(kgclInstance) is NodeObsoletion:
if is_label(kgclInstance.about_node):
return obsolete_by_label(kgclInstance)
if is_id(kgclInstance.about_node):
return obsolete_by_id(kgclInstance)
# TODO: error handling
# node obsoletion
if type(kgclInstance) is NodeUnobsoletion:
if is_id(kgclInstance.about_node):
return unobsolete(kgclInstance)
# TODO: error handling
# node deletion
if type(kgclInstance) is NodeDeletion:
if is_id(kgclInstance.about_node):
return delete_by_id(kgclInstance)
if is_label(kgclInstance.about_node):
return delete_by_label(kgclInstance)
# TODO: error handling
# node creation
if type(kgclInstance) is NodeCreation:
if is_id(kgclInstance.node_id) and is_label(kgclInstance.name):
return create_node(kgclInstance)
# class creation
if type(kgclInstance) is ClassCreation:
if is_id(kgclInstance.node_id):
return create_class(kgclInstance)
# node deepending
if type(kgclInstance) is NodeDeepening:
if (
is_id(kgclInstance.about_edge.subject)
and is_id(kgclInstance.old_value)
and is_id(kgclInstance.new_value)
):
return node_deepening(kgclInstance)
# node shallowing
if type(kgclInstance) is NodeShallowing:
if (
is_id(kgclInstance.about_edge.subject)
and is_id(kgclInstance.old_value)
and is_id(kgclInstance.new_value)
):
return node_shallowing(kgclInstance)
# edge creation
if type(kgclInstance) is EdgeCreation:
if (
is_id(kgclInstance.subject)
and is_id(kgclInstance.predicate)
and (is_id(kgclInstance.object) or is_label(kgclInstance.object))
):
return edge_creation(kgclInstance)
# edge deletion
if type(kgclInstance) is EdgeDeletion:
if (
is_id(kgclInstance.subject)
and is_id(kgclInstance.predicate)
and (is_id(kgclInstance.object) or is_label(kgclInstance.object))
):
return edge_deletion(kgclInstance)
# node move
if type(kgclInstance) is NodeMove:
if (
is_id(kgclInstance.about_edge.subject)
and is_id(kgclInstance.old_value)
and is_id(kgclInstance.new_value)
):
return node_move(kgclInstance)
if type(kgclInstance) is NewSynonym:
if is_id(kgclInstance.about_node) and is_label(kgclInstance.new_value):
return new_synonym(kgclInstance)
if type(kgclInstance) is PredicateChange:
if (
is_id(kgclInstance.about_edge.subject)
and is_id(kgclInstance.about_edge.object)
and is_id(kgclInstance.old_value)
and is_id(kgclInstance.new_value)
):
return change_predicate(kgclInstance)
if type(kgclInstance) is RemovedNodeFromSubset:
if is_id(kgclInstance.about_node) and is_id(kgclInstance.subset):
return remove_node_from_subset(kgclInstance)
def node_move(kgclInstance):
term_id = kgclInstance.about_edge.subject
old_value = kgclInstance.old_value
new_value = kgclInstance.new_value
prefix = "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
prefix += "PREFIX owl: <http://www.w3.org/2002/07/owl#> "
prefix += "PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> "
deleteQuery = term_id + " ?relation " + old_value + " . "
delete = "DELETE {" + deleteQuery + "}"
insertQuery = term_id + " ?relation " + new_value + " . "
insert = "INSERT {" + insertQuery + "}"
whereQuery = term_id + " ?relation " + old_value + " . "
where = "WHERE {" + whereQuery + "}"
updateQuery = prefix + " " + delete + " " + insert + " " + where
return updateQuery
def remove_node_from_subset(kgclInstance):
about = kgclInstance.about_node
subset = kgclInstance.subset
prefix = "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
prefix += "PREFIX obo: <http://purl.obolibrary.org/obo/> "
deleteQuery = about + " obo:inSubset " + subset + " . "
delete = "DELETE {" + deleteQuery + "}"
whereQuery = about + " obo:inSubset " + subset + " . "
where = "WHERE { " + whereQuery + " }"
updateQuery = prefix + " " + delete + " " + where
return updateQuery
def change_predicate(kgclInstance):
subject = kgclInstance.about_edge.subject
object = kgclInstance.about_edge.object
old_value = kgclInstance.old_value
new_value = kgclInstance.new_value
prefix = "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
deleteQuery = subject + " " + old_value + " " + object + " . "
delete = "DELETE {" + deleteQuery + "}"
insertQuery = subject + " " + new_value + " " + object + " . "
insert = "INSERT {" + insertQuery + "}"
where = "WHERE {}"
updateQuery = prefix + " " + delete + " " + insert + " " + where
return updateQuery
def node_deepening(kgclInstance):
term_id = kgclInstance.about_edge.subject
old_value = kgclInstance.old_value
new_value = kgclInstance.new_value
prefix = "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
prefix += "PREFIX owl: <http://www.w3.org/2002/07/owl#> "
prefix += "PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> "
deleteQuery = term_id + " ?relation " + old_value + " . "
delete = "DELETE {" + deleteQuery + "}"
insertQuery = term_id + " ?relation " + new_value + " . "
insert = "INSERT {" + insertQuery + "}"
whereQuery = term_id + " ?relation " + old_value + " . "
whereQuery += new_value + " ?relation " + old_value + " . "
where = "WHERE {" + whereQuery + "}"
updateQuery = prefix + " " + delete + " " + insert + " " + where
return updateQuery
def node_shallowing(kgclInstance):
term_id = kgclInstance.about_edge.subject
old_value = kgclInstance.old_value
new_value = kgclInstance.new_value
prefix = "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
prefix += "PREFIX owl: <http://www.w3.org/2002/07/owl#> "
prefix += "PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> "
deleteQuery = term_id + " ?relation " + old_value + " . "
delete = "DELETE {" + deleteQuery + "}"
insertQuery = term_id + " ?relation " + new_value + " . "
insert = "INSERT {" + insertQuery + "}"
whereQuery = term_id + " ?relation " + old_value + " . "
whereQuery += old_value + " ?relation " + new_value + " . "
where = "WHERE {" + whereQuery + "}"
updateQuery = prefix + " " + delete + " " + insert + " " + where
return updateQuery
# TODO: handling of language tags
# look things up at https://www.ebi.ac.uk/ols/ontologies/iao
def unobsolete(kgclInstance):
about = kgclInstance.about_node
# http://wiki.geneontology.org/index.php/Restoring_an_Obsolete_Ontology_Term
# 1. remove 'obsolete' from label
# 2. remove 'OBSOLETE' from definition
# 3. update comment to "Note that this term was reinstated from obsolete"
# TODO: no we remove the previous comment? (all comments?)
# 4. Remove any replaced_by and consider tags
# 5. Remove the owl:deprecated: true tag
prefix = "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
prefix += "PREFIX owl: <http://www.w3.org/2002/07/owl#> "
prefix += "PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> "
prefix += "PREFIX obo: <http://purl.obolibrary.org/obo/> "
prefix += "PREFIX oboInOwl: <http://www.geneontology.org/formats/oboInOwl#> "
deleteQuery = about + " rdfs:label ?label . "
deleteQuery += about + ' owl:deprecated "true"^^xsd:boolean . '
deleteQuery += about + " obo:IAO_0000115 ?unobsolete_definition . "
deleteQuery += about + " obo:IAO_0100001 ?replacedBy . "
deleteQuery += about + " oboInOwl:consider ?consider . "
delete = "DELETE {" + deleteQuery + "}"
insertQuery = about + " rdfs:label ?unobsolete_label . "
insertQuery += about + " obo:IAO_0000115 ?unobsolete_definition . "
insertQuery += (
'?entity rdfs:comment "Note that this term was reinstated from obsolete." . '
)
insert = "INSERT {" + insertQuery + "}"
whereQuery = "{ " + about + " rdfs:label ?label . "
whereQuery += 'BIND(IF(STRSTARTS(?label, "obsolete "),'
whereQuery += 'SUBSTR(?label,10),?label) AS ?unobsolete_label ) } '
whereQuery += " UNION "
whereQuery += "{ " + about + " rdfs:label ?label . "
whereQuery += about + " obo:IAO_0000115 ?definition . "
whereQuery += 'BIND(IF(STRSTARTS(?definition, "OBSOLETE "),'
whereQuery += 'SUBSTR(?definition,10),?definition) AS ?unobsolete_definition ) } '
whereQuery += " UNION "
whereQuery += "{ " + about + " rdfs:label ?label . "
whereQuery += about + " obo:IAO_0100001 ?replacedBy . } "
whereQuery += " UNION "
whereQuery += "{ " + about + " rdfs:label ?label . "
whereQuery += about + " oboInOwl:consider ?consider . } "
where = "WHERE {" + whereQuery + "}"
updateQuery = prefix + " " + delete + " " + insert + " " + where
return updateQuery
def rename(kgclInstance):
oldValue = kgclInstance.old_value
newValue = kgclInstance.new_value
oldValue = oldValue.replace("'", "")
# initialise subject
if kgclInstance.about_node is None:
subject = "?entity"
else:
subject = kgclInstance.about_node
# this changes only the label of an entity
prefix = "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>"
deleteQuery = subject + " rdfs:label ?label ."
delete = "DELETE {" + deleteQuery + "}"
insertQuery = subject + " rdfs:label " + newValue + " ."
insert = "INSERT {" + insertQuery + "}"
whereQuery = subject + " rdfs:label ?label . "
whereQuery += " BIND( LANG(?label) AS ?language) "
whereQuery += ' FILTER(STR(?label)="' + oldValue + '") '
where = "WHERE {" + whereQuery + "}"
updateQuery = prefix + " " + delete + " " + insert + " " + where
return updateQuery
def delete_by_id(kgclInstance):
about = kgclInstance.about_node # this needs to be an ID - not a label
deleteQuery = (
"?s1 ?p1 " + about + " . "
) # this does not delete triples with blank nodes
deleteQuery += "?s2 " + about + " ?o1 . "
deleteQuery += "?s2 " + about + " ?o1 . "
deleteQuery += about + " ?p2 ?o2 . "
delete = "DELETE {" + deleteQuery + "}"
whereQuery = "{ ?s1 ?p1 " + about + " . } "
whereQuery += " UNION "
whereQuery = "{ ?s2 " + about + " ?o1 . } "
whereQuery += " UNION "
whereQuery += "{ " + about + " ?p2 ?o2 . } "
where = "WHERE {" + whereQuery + "}"
updateQuery = delete + " " + where
return updateQuery
def delete_by_label(kgclInstance):
about = kgclInstance.about_node
about = about.replace("'", "") # remove single quotes from label input
deleteQuery = "?s1 ?p1 ?label . "
deleteQuery += "?s1 ?p2 ?o1 . "
delete = "DELETE {" + deleteQuery + "}"
whereQuery = "?s1 ?p1 ?label . "
whereQuery += "?s1 ?p2 ?o1 . "
whereQuery += ' FILTER(STR(?label)="' + about + '") ' # ignore language tags
where = "WHERE {" + whereQuery + "}"
updateQuery = delete + " " + where
return updateQuery
def create_class(kgclInstance):
termId = kgclInstance.node_id
prefix = "PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> "
prefix += "PREFIX owl: <http://www.w3.org/2002/07/owl#> "
insertQuery = termId + " rdf:type owl:Class . "
insert = "INSERT {" + insertQuery + "}"
where = "WHERE {}"
updateQuery = prefix + " " + insert + " " + where
return updateQuery
def create_node(kgclInstance):
termId = kgclInstance.node_id
label = kgclInstance.name
prefix = "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
insertQuery = termId + " rdfs:label " + label + " . "
insert = "INSERT {" + insertQuery + "}"
where = "WHERE {}"
updateQuery = prefix + " " + insert + " " + where
return updateQuery
def edge_creation(kgclInstance):
subject = kgclInstance.subject
predicate = kgclInstance.predicate
object = kgclInstance.object
prefix = "PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> "
prefix += "PREFIX owl: <http://www.w3.org/2002/07/owl#> "
insertQuery = subject + " " + predicate + " " + object + " . "
insert = "INSERT {" + insertQuery + "}"
where = "WHERE {}"
updateQuery = prefix + " " + insert + " " + where
return updateQuery
def edge_deletion(kgclInstance):
subject = kgclInstance.subject
predicate = kgclInstance.predicate
object = kgclInstance.object
prefix = "PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> "
prefix += "PREFIX owl: <http://www.w3.org/2002/07/owl#> "
deleteQuery = subject + " " + predicate + " " + object + " . "
delete = "DELETE {" + deleteQuery + "}"
where = "WHERE {" + deleteQuery + "}"
updateQuery = prefix + " " + delete + " " + where
return updateQuery
def obsolete_by_id(kgclInstance):
about = kgclInstance.about_node
replacement = kgclInstance.has_direct_replacement
prefix = "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
prefix += "PREFIX owl: <http://www.w3.org/2002/07/owl#> "
prefix += "PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> "
prefix += "PREFIX obo: <http://purl.obolibrary.org/obo/> "
deleteQuery = about + "rdfs:label ?label . "
deleteQuery += about + " rdfs:subClassOf ?superclass . "
deleteQuery += about + " owl:equivalentClass ?rhs . "
deleteQuery += "?lhs owl:equivalentClass " + about + " ."
delete = "DELETE {" + deleteQuery + "}"
insertQuery = "?entity rdfs:label ?tag . "
insertQuery += about + ' owl:deprecated "true"^^xsd:boolean . '
if kgclInstance.has_direct_replacement is not None:
insertQuery += about + " obo:IAO_0100001 " + replacement + " . "
insert = "INSERT {" + insertQuery + "}"
whereQuery = "{ " + about + " rdfs:subClassOf ?superclass . } "
whereQuery += " UNION "
whereQuery += "{ " + about + " owl:equivalentClass ?rhs . } "
whereQuery += " UNION "
whereQuery += "{ ?lhs owl:equivalentClass " + about + " . } "
whereQuery += " UNION "
whereQuery += "{ ?entity rdfs:label ?label . "
whereQuery += ' BIND(CONCAT("obsolete ", ?label) AS ?obsolete_label ) '
whereQuery += " BIND( LANG(?label) AS ?language) "
whereQuery += " BIND( STRLANG(?obsolete_label,?language) AS ?tag) } "
where = "WHERE {" + whereQuery + "}"
updateQuery = prefix + " " + delete + " " + insert + " " + where
return updateQuery
def obsolete_by_label(kgclInstance):
about = kgclInstance.about_node
prefix = "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
prefix += "PREFIX owl: <http://www.w3.org/2002/07/owl#> "
prefix += "PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> "
deleteQuery = "?entity rdfs:label ?label . "
deleteQuery += "?entity rdfs:subClassOf ?superclass . "
deleteQuery += "?entity owl:equivalentClass ?rhs . "
deleteQuery += "?lhs owl:equivalentClass ?entity . "
delete = "DELETE {" + deleteQuery + "}"
inner_label = about.replace("'", "")
obsolete_label = "obsolete " + inner_label
insertQuery = "?entity rdfs:label ?tag . "
insertQuery += '?entity owl:deprecated "true"^^xsd:boolean . '
insert = "INSERT {" + insertQuery + "}"
# TODO: handle the special case where only a label is present
# (in that case we need to query for a single triple)
whereQuery = "{ ?entity rdfs:label ?label . "
whereQuery += " ?entity rdfs:subClassOf ?superclass . "
whereQuery += " BIND( LANG(?label) AS ?language) "
whereQuery += ' BIND( STRLANG("' + obsolete_label + '",?language) AS ?tag) '
whereQuery += ' FILTER(STR(?label)="' + inner_label + '") } '
whereQuery += " UNION "
whereQuery += "{ ?entity rdfs:label ?label . "
whereQuery += " ?entity owl:equivalentClass ?rhs . "
whereQuery += " BIND( LANG(?label) AS ?language) "
whereQuery += ' BIND( STRLANG("' + obsolete_label + '",?language) AS ?tag) '
whereQuery += ' FILTER(STR(?label)="' + inner_label + '") } '
whereQuery += " UNION "
whereQuery += "{ ?entity rdfs:label ?label . "
whereQuery += " ?lhs owl:equivalentClass ?entity . "
whereQuery += " BIND( LANG(?label) AS ?language) "
whereQuery += ' BIND( STRLANG("' + obsolete_label + '",?language) AS ?tag) '
whereQuery += ' FILTER(STR(?label)="' + inner_label + '") } '
whereQuery += " UNION "
whereQuery += "{ ?entity rdfs:label ?label . "
whereQuery += " ?entity rdf:type ?type . "
whereQuery += " BIND( LANG(?label) AS ?language) "
whereQuery += ' BIND( STRLANG("' + obsolete_label + '",?language) AS ?tag) '
whereQuery += ' FILTER(STR(?label)="' + inner_label + '") } '
where = "WHERE {" + whereQuery + "}"
updateQuery = prefix + " " + delete + " " + insert + " " + where
return updateQuery
def new_synonym(kgclInstance):
about = kgclInstance.about_node
synonym = kgclInstance.new_value
prefix = "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
prefix += "PREFIX owl: <http://www.w3.org/2002/07/owl#> "
prefix += "PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> "
prefix += "PREFIX oboInOwl: <http://www.geneontology.org/formats/oboInOwl#> "
# TODO: check whether this way of creating synonyms is OK
# or whether we want to include qualifiers, e.g. broader, exact, related, narrower
insertQuery = about + " oboInOwl:Synonym " + synonym + " . "
insert = "INSERT {" + insertQuery + "}"
where = "WHERE {}"
updateQuery = prefix + " " + insert + " " + where
return updateQuery
``` |
{
"source": "joeflack4/linkml-runtime",
"score": 2
} |
#### File: linkml_runtime/loaders/json_loader.py
```python
import json
from typing import Union, TextIO, Optional, Dict, Type
from hbreader import FileInfo
from linkml_runtime.loaders.loader_root import Loader
from linkml_runtime.utils.yamlutils import YAMLRoot
class JSONLoader(Loader):
def load(self, source: Union[str, dict, TextIO], target_class: Type[YAMLRoot], *, base_dir: Optional[str] = None,
metadata: Optional[FileInfo] = None, **_) -> YAMLRoot:
def loader(data: Union[str, dict], _: FileInfo) -> Optional[Dict]:
data_as_dict = json.loads(data) if isinstance(data, str) else data
typ = data_as_dict.pop('@type', None)
if typ and typ != target_class.__name__:
# TODO: connect this up with the logging facility or warning?
print(f"Warning: input type mismatch. Expected: {target_class.__name__}, Actual: {typ}")
return self.json_clean(data_as_dict)
if not metadata:
metadata = FileInfo()
if base_dir and not metadata.base_path:
metadata.base_path = base_dir
return self.load_source(source, loader, target_class,
accept_header="application/ld+json, application/json, text/json", metadata=metadata)
```
#### File: linkml_runtime/utils/dataclass_extensions_376.py
```python
import sys
if sys.version_info < (3, 7, 0):
raise NotImplementedError("LinkML requires Python 3.7 or later to run")
elif sys.version_info >= (3, 7, 6):
from dataclasses import MISSING, _HAS_DEFAULT_FACTORY, _POST_INIT_NAME, _FIELD_INITVAR, _init_param, _field_init, _create_fn
def dataclasses_init_fn_with_kwargs(fields, frozen, has_post_init, self_name, globals):
# fields contains both real fields and InitVar pseudo-fields.
# Make sure we don't have fields without defaults following fields
# with defaults. This actually would be caught when exec-ing the
# function source code, but catching it here gives a better error
# message, and future-proofs us in case we build up the function
# using ast.
seen_default = False
for f in fields:
# Only consider fields in the __init__ call.
if f.init:
if not (f.default is MISSING and f.default_factory is MISSING):
seen_default = True
elif seen_default:
raise TypeError(f'non-default argument {f.name!r} '
'follows default argument')
locals = {f'_type_{f.name}': f.type for f in fields}
locals.update({
'MISSING': MISSING,
'_HAS_DEFAULT_FACTORY': _HAS_DEFAULT_FACTORY,
})
body_lines = []
for f in fields:
line = _field_init(f, frozen, locals, self_name)
# line is None means that this field doesn't require
# initialization (it's a pseudo-field). Just skip it.
if line:
body_lines.append(line)
# Does this class have a post-init function?
if has_post_init:
params_str = ','.join(f.name for f in fields
if f._field_type is _FIELD_INITVAR)
body_lines.append(f'{self_name}.{_POST_INIT_NAME}({params_str}{", " if params_str else ""} **kwargs)')
# If no body lines, use 'pass'.
if not body_lines:
body_lines = ['pass']
return _create_fn('__init__',
[self_name] + [_init_param(f) for f in fields if f.init] + ["**kwargs"],
body_lines,
locals=locals,
globals=globals,
return_type=None)
else:
from dataclasses import MISSING, _HAS_DEFAULT_FACTORY, _POST_INIT_NAME, _FIELD_INITVAR, _init_param, _field_init, \
_create_fn
def dataclasses_init_fn_with_kwargs(fields, frozen, has_post_init, self_name):
# fields contains both real fields and InitVar pseudo-fields.
# Make sure we don't have fields without defaults following fields
# with defaults. This actually would be caught when exec-ing the
# function source code, but catching it here gives a better error
# message, and future-proofs us in case we build up the function
# using ast.
seen_default = False
for f in fields:
# Only consider fields in the __init__ call.
if f.init:
if not (f.default is MISSING and f.default_factory is MISSING):
seen_default = True
elif seen_default:
raise TypeError(f'non-default argument {f.name!r} '
'follows default argument')
globals = {'MISSING': MISSING,
'_HAS_DEFAULT_FACTORY': _HAS_DEFAULT_FACTORY}
body_lines = []
for f in fields:
line = _field_init(f, frozen, globals, self_name)
# line is None means that this field doesn't require
# initialization (it's a pseudo-field). Just skip it.
if line:
body_lines.append(line)
# Does this class have a post-init function?
if has_post_init:
params_str = ','.join(f.name for f in fields
if f._field_type is _FIELD_INITVAR)
body_lines.append(f'{self_name}.{_POST_INIT_NAME}({params_str}{", " if params_str else ""} **kwargs)')
# If no body lines, use 'pass'.
if not body_lines:
body_lines = ['pass']
locals = {f'_type_{f.name}': f.type for f in fields}
return _create_fn('__init__',
[self_name] + [_init_param(f) for f in fields if f.init] + ["**kwargs"],
body_lines,
locals=locals,
globals=globals,
return_type=None)
```
#### File: linkml-runtime/tests/__init__.py
```python
import logging
import configparser
# Global testing control variables
import os
from tests.support.test_environment import MismatchAction
# ---------------------------------------------------------------
# DO NOT change this file.
# To change the default test harness settings:
# > cd tests
# > cp test_config.ini.example test_config.ini
#
# Make your edits in test_config.ini. Note that it is in .gitignore and will not be submitted
# ----------------------------------------------------------------
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), 'test_config.ini'))
if 'test.settings' not in config.sections():
config['test.settings'] = {} # initialize a blank setting if file doesn't exist
test_settings = config['test.settings']
# Action on mismatch. One of 'Ignore', 'Report' or 'Fail'
# If 'Fail', the expected file will be saved in the appropriate temp directory
# NOTE: Before setting this back to Report or Ignore, you need to run cleartemp.sh in this directory
DEFAULT_MISMATCH_ACTION = eval(test_settings.get('DEFAULT_MISMATCH_ACTION', 'MismatchAction.Report'))
# Use local import map. If True, tests/input/local_import_map.json is used to create the test files. Note that this
# will result in local path names being recorded in jsonld files. This should always be set to False before generating
# the final output
USE_LOCAL_IMPORT_MAP = test_settings.getboolean('USE_LOCAL_IMPORT_MAP', False)
# There are lots of warnings emitted by the generators. Default logging level
DEFAULT_LOG_LEVEL = eval(test_settings.get('DEFAULT_LOG_LEVEL', 'logging.ERROR'))
DEFAULT_LOG_LEVEL_TEXT = test_settings.get('DEFAULT_LOG_LEVEL_TEXT', 'ERROR')
# Skip RDF comparison, as it takes a lot of time
SKIP_RDF_COMPARE = test_settings.getboolean('SKIP_RDF_COMPARE', False)
SKIP_RDF_COMPARE_REASON = test_settings.get('SKIP_RDF_COMPARE_REASON', 'tests/__init__.py RDF output not checked SKIP_RDF_COMPARE is True')
# Exception for use in script testing. Global to prevent redefinition
class CLIExitException(Exception):
def __init__(self, code: int) -> None:
self.code = code
super().__init__(self)
```
#### File: test_issues/input/issue_368.py
```python
import dataclasses
import sys
import re
from typing import Optional, List, Union, Dict, ClassVar, Any
from dataclasses import dataclass
from linkml_runtime.utils.slot import Slot
from linkml_runtime.utils.metamodelcore import empty_list, empty_dict, bnode
from linkml_runtime.utils.yamlutils import YAMLRoot, extended_str, extended_float, extended_int
from linkml_runtime.utils.dataclass_extensions_376 import dataclasses_init_fn_with_kwargs
from linkml_runtime.utils.formatutils import camelcase, underscore, sfx
from linkml_runtime.utils.enumerations import EnumDefinitionImpl
from rdflib import Namespace, URIRef
from linkml_runtime.utils.curienamespace import CurieNamespace
from . issue_368_imports import ParentClass, SampleEnum
metamodel_version = "1.7.0"
# Overwrite dataclasses _init_fn to add **kwargs in __init__
dataclasses._init_fn = dataclasses_init_fn_with_kwargs
# Namespaces
LINKML = CurieNamespace('linkml', 'https://w3id.org/linkml/')
DEFAULT_ = CurieNamespace('', 'https://microbiomedata/schema/')
# Types
# Class references
@dataclass
class SampleClass(ParentClass):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = URIRef("https://microbiomedata/schema/SampleClass")
class_class_curie: ClassVar[str] = None
class_name: ClassVar[str] = "SampleClass"
class_model_uri: ClassVar[URIRef] = URIRef("https://microbiomedata/schema/SampleClass")
slot_1: Optional[Union[str, "SampleEnum"]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.slot_1 is not None and not isinstance(self.slot_1, SampleEnum):
self.slot_1 = SampleEnum(self.slot_1)
super().__post_init__(**kwargs)
# Enumerations
# Slots
class slots:
pass
slots.slot_1 = Slot(uri=DEFAULT_.slot_1, name="slot_1", curie=DEFAULT_.curie('slot_1'),
model_uri=DEFAULT_.slot_1, domain=None, range=Optional[Union[str, "SampleEnum"]])
```
#### File: tests/test_issues/test_include_schema.py
```python
import unittest
from linkml_runtime.linkml_model.meta import SchemaDefinition
from linkml_runtime.loaders import yaml_loader
from tests.test_issues.environment import env
class IncludeSchemaTestCase(unittest.TestCase):
""" include_schema.yaml produces a Python exception on an uncaught error"""
# "Awaiting fix for issue #3"
def test_include_schema(self):
inp = yaml_loader.load(env.input_path('include_schema.yaml'), SchemaDefinition)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/test_issues/test_issue_8.py
```python
import unittest
from typing import Type
from linkml_runtime.linkml_model import SchemaDefinition, SlotDefinition, ClassDefinition
from linkml_runtime.loaders import yaml_loader
from linkml_runtime.utils.yamlutils import YAMLRoot
from tests.test_issues.environment import env
def override(cls: Type[YAMLRoot]):
orig = cls.MissingRequiredField
def mrf(self, field_name: str) -> None:
if isinstance(self, SchemaDefinition) and field_name == "name" and self.id:
id_parts = self.id.replace('#', '/').rsplit('/')
self.name = id_parts[-1]
elif isinstance(self, SlotDefinition) and field_name == "name":
self.name = "id"
elif isinstance(self, ClassDefinition) and field_name == "name":
self.name = "core"
else:
orig(self, f"{type(self).__name__}.{field_name}")
cls.MissingRequiredField = mrf
return orig
msgs = set()
def override2():
def mrf(self, field_name: str) -> None:
msgs.add(f"{type(self).__name__}.{field_name} is not supplied")
orig = YAMLRoot.MissingRequiredField
YAMLRoot.MissingRequiredField = mrf
return orig
class TestErrorIntercept(unittest.TestCase):
def test_legitimate_error(self):
""" Test that legitimate errors are emitted correctly """
test_file = env.input_path('issue_8a.yaml')
with self.assertRaises(ValueError) as e:
yaml_loader.load(test_file, SchemaDefinition)
self.assertEqual('name must be supplied', str(e.exception), "ValueError should be raised")
orig = override(SchemaDefinition)
try:
with self.assertRaises(ValueError) as e:
yaml_loader.load(test_file, SchemaDefinition)
self.assertEqual('SchemaDefinition.name must be supplied', str(e.exception))
finally:
# SchemaDefinition.MissingRequiredField = orig
delattr(SchemaDefinition, "MissingRequiredField")
def test_missing_intercept(self):
test_file = env.input_path('issue_8.yaml')
with self.assertRaises(ValueError) as e:
yaml_loader.load(test_file, SchemaDefinition)
self.assertEqual('name must be supplied', str(e.exception), "ValueError should be raised")
try:
orig = override2()
yaml_loader.load(test_file, SchemaDefinition)
finally:
YAMLRoot.MissingRequiredField = orig
self.assertEqual({'ClassDefinition.name is not supplied',
'SlotDefinition.name is not supplied',
'SchemaDefinition.name is not supplied'}, msgs)
try:
origschd = override(SchemaDefinition)
origslotd = override(SlotDefinition)
origcd = override(ClassDefinition)
yaml_loader.load(test_file, SchemaDefinition)
finally:
delattr(SchemaDefinition, "MissingRequiredField")
delattr(SlotDefinition, "MissingRequiredField")
delattr(ClassDefinition, "MissingRequiredField")
# SchemaDefinition.MissingRequiredField = origschd
# SlotDefinition.MissingRequiredField = origslotd
# ClassDefinition.MissingRequiredField = origcd
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joeflack4/ohbehave",
"score": 2
} |
#### File: ohbehave/spec/test_update_output_div.py
```python
from lib.app import update_output_div
def test_update_output_div_test():
""""Test update user input"""
input_value = 'hello'
assert update_output_div(input_value) == '{}'.format(input_value)
``` |
{
"source": "joeflack4/pma-api-open-model",
"score": 2
} |
#### File: api/open_model_py/__main__.py
```python
from pmaapi.config import MODEL_FILE # For testing.
from pprint import PrettyPrinter # For testing.
from sys import stderr
from os.path import isfile
from copy import copy
from itertools import repeat as iter_repeat
# from sqlalchemy.ext.declarative import declarative_base
# from sqlalchemy import Column, DateTime, Boolean, Integer, String
# from sqlalchemy.exc import ProgrammingError, IntegrityError
# from sqlalchemy.sql.functions import func as sqlalchemy_func
# from flask_sqlalchemy import SQLAlchemy
from pmaapi.api.open_model_py.definitions.error \
import OpenModelException, UnsupportedFileTypeException, \
UnexpectedDataTypeException, InvalidSchemaException, \
UnimplementedFunctionalityException
from pmaapi.api.open_model_py.definitions.abstractions \
import read_contents, inverse_filter_dict, yaml_load_clean, \
yaml_dump_clean
from pmaapi.api.open_model_py.definitions.constants import MAPPINGS
# from pmaapi.__main__ import FLASK_APP
# from pmaapi.definitions.error import raise_database_exception
# db = SQLAlchemy(FLASK_APP)
# db.Base = declarative_base()
# now = sqlalchemy_func.now
# TODO - Remove all this stuff as it's all going to be auto-generated.
# - TODO: Relational mapping - http://tinyurl.com/yc2j7jkg
# - TODO: Use unicode instead of string?
# - TODO: Consider autoload to reflect table attributes from what is in DB.
# Class Generation ------------------------------------------------------------
# _dict = {}
# Dynamic instance attribute generation.
# class AllMyFields:
# """Dynamic Class."""
#
# def __init__(self, dictionary):
# """Init."""
# for k, v in dictionary.items():
# setattr(self, k, v)
#
#
# # Dynamic class generation.
# # For the tuple, can use 'object' maybe, or give it a class(s).
# my_class = type('MyClass', (object, ), {'hello_world': lambda: 'hello'})
# my_instance = my_class({'name': 'my name'})
# SqlAlchemy ------------------------------------------------------------------
# class BaseModel(db.Model): # TODO: Set in UTC.
# """Base Model."""
# __abstract__ = True
#
# created_on = Column(DateTime, default=now(), index=True)
# updated_on = Column(DateTime, default=now(), onupdate=now(), index=True)
# class Modules(BaseModel):
# class Modules(BaseModel):
# """Modules."""
# __tablename__ = 'modules'
#
# id = Column(Integer, primary_key=True)
# name = Column(String(80), unique=True, nullable=False, index=True)
# abbreviation = Column(String(20), unique=True, nullable=False,
# index=True)
# description = Column(String(500), nullable=False)
# active = Column(Boolean, nullable=False, index=True)
#
# def __init__(self, name=None, abbreviation=None, description=None,
# active=None):
# self.name = name
# self.abbreviation = abbreviation
# self.description = description
# self.active = active
#
# def __repr__(self):
# return '<module name: {}>'.format(self.id)
# --- Testing --- #
# def add_module(_db, data): # TODO: Make this a method of Module.
# """Add module."""
# try:
# mod = Modules(name=data['name'], abbreviation='', description='',
# active=True)
# _db.session.add(mod)
# _db.session.commit()
# except ProgrammingError as err:
# msg = str(
# err) + '\n\nAn error occurred and the DB session was rolled' \
# ' back. Please see stack trace for more information.'
# raise_database_exception(_db, msg)
# except IntegrityError as err:
# msg = str(
# err) + '\n\nAn error occurred and the DB session was rolled' \
# ' back. Please see stack trace for more information.'
# raise_database_exception(_db, msg)
# OpenModel -------------------------------------------------------------------
class OpenModel:
"""Open Model to SqlAlchemy"""
PLANNED_SUPPORTED_FILE_TYPES = ('csv', 'json', 'xml')
SUPPORTED_DATA_FORMATS = ('yaml',)
# from pathlib import PurePath # - Disabled: Not supported in Python 2.
SUPPORTED_DATA_TYPES = (str, bytes, int) # - Disabled: pathlib.PurePath
MODEL_ROOT_KEYS = \
('baller', 'config', 'info', 'models', 'abstractModels', 'relations')
BRAND_NAME = 'OpenModel'
def __init__(self, source_data=None):
"""Initialize.
Arguments:
source_data (dict): Source data in Python dictionary serialization.
"""
self.data, self.source_data, self.source, self.source_file_data, \
self.source_file_path, self.dict, self.yaml, self.sqlalchemy, \
self.open_model_version, self.config, self.info, self.models, \
self.abstract_models, self.relations, self.custom_fields\
= iter_repeat(None, 15)
if source_data:
self.load(source_data)
def __iter__(self):
"""Returns dict with data as dict set as value to key 'open_model'."""
yield 'open_model', self.dict
def __str__(self):
"""Returns stringified dictionary."""
return str(self.dict)
def get_data(self, data_format):
"""Get data in requested format.
Returns:
Data in the format requested.
"""
return self.yaml if data_format is 'yaml' \
else self.dict if data_format in ('dict', 'dictionary') \
else self.sqlalchemy if data_format is 'sqlalchemy' \
else self.yaml
def load(self, source_data):
"""Load source data.
The pathlib.PurePath class represents os.PathLike.
Raises:
UnexpectedDataTypeException
"""
err_msg = 'UnexpectedDataTypeException: Unexpected data type.'
if type(source_data) in OpenModel.SUPPORTED_DATA_TYPES:
if isfile(source_data):
self._load_file(file=source_data)
else:
raise UnexpectedDataTypeException(err_msg)
elif type(source_data) is dict:
self._load_serialized(data=source_data)
else:
raise UnexpectedDataTypeException(err_msg)
@staticmethod
def serialize_to_yaml(model):
"""Serialize Python dictionary to YAML string.
# TODO: Set in PrettyPrinter format.
Args:
model (dict): Python dictionary formatted model.
Returns:
str: YAML formatted model.
"""
return yaml_dump_clean(model)
@staticmethod
def serialize_to_sqlalchemy(model): # TODO: Last to create.
"""Serialize Python dictionary to a dictionary of SqlAlchemy objects.
Args:
model (dict): OpenModel format of model.
Returns:
dict: SqlAlchemy format of model.
"""
# from sqlalchemy.ext.declarative import declarative_base
# from sqlalchemy.exc import ProgrammingError, IntegrityError
# from sqlalchemy.sql.functions import func as sqlalchemy_func
# from flask_sqlalchemy import SQLAlchemy
# from sqlalchemy import Column, DateTime, Boolean, Integer, String
from sqlalchemy import Column
# from pmaapi.api.open_model.open_model_py.definitions.error \
# import OpenModelException, UnsupportedFileTypeException, \
# UnexpectedDataTypeException, InvalidSchemaException
# from pmaapi.api.open_model.open_model_py.definitions.abstractions \
# import read_contents, inverse_filter_dict, yaml_load_clean, \
# yaml_dump_clean
# from pmaapi.__main__ import FLASK_APP
# from pmaapi.definitions.error import raise_database_exception
def _det_sqlalchemy_col_type_from_openmodel(om_type):
"""Determine column data type. None values are currently not supported.
Args:
om_type (str): Type as displayed in OpenModel file.
Returns:
(class): The matching SqlAlchemy type class.
"""
return MAPPINGS['OPENMODEL_SQLALCHEMY']['DATA_TYPES'][om_type]
def _det_col_type(openmodel_type):
"""Alias: _det_sqlalchemy_col_type_from_openmodel"""
return _det_sqlalchemy_col_type_from_openmodel(openmodel_type)
def _det_col_props(openmodel_props):
"""Determine column type properties."""
if openmodel_props:
if 'size_max' in openmodel_props:
return openmodel_props['size_max']
else:
raise UnimplementedFunctionalityException(
'UnimplementedFunctionalityException: One or more '
'field type properties defined in model specification '
'is not yet supported.')
def _det_col_type_and_props(openmodel_type_def):
"""Determine column type and type properties.
Args:
openmodel_type_def (dict): Type definition.
Returns:
class(params): SqlAlchemy type class with params.
"""
data_type = _det_col_type(openmodel_type_def['name'])
data_props = _det_col_props(openmodel_type_def['props'])
return data_type(data_props)
def _type(openmodel_type_def):
"""Alias: _det_col_type_and_props"""
return _det_col_type_and_props(openmodel_type_def)
def _to_sqlalchemy_classdef_dict(mdl_name, mdl_data):
"""Convert OpenModel model spec to SqlAlchemy.
Any parameter using passed to Column() which uses the function
evaluation 'set_and_true()' will return True if the specified key
is in the model, and its value is set to either TRUE, true, or True
, without quotes.
Args:
mdl_name (str): Name of model as defined in spec.
mdl_data (dict): Python dictionary representation of the model.
Returns:
dict: SqlAlchemy class definition as Python dictionary.
"""
mapping = MAPPINGS['OPENMODEL_SQLALCHEMY']['COLUMN_KEYS']
# noinspection PyCompatibility
return {
**{'__tablename__': mdl_name},
**{field: Column(
_type(fld_data['type']), # (1) Data Type
primary_key=fld_data['key'] == 'PK' or False, # (2) PK
**{kwd: fld_data.pop(kwd, None) for kwd in # (3) Kwargs
[mapping[key] for key in fld_data
if key in mapping]}
) for field, fld_data in mdl_data['fields'].items()}
}
def _render_classes(classes):
"""Render classes."""
# TODO: Use abstract models and create superclasses. Then pass in.
# the 2nd, tuple argument when creating the table classes.
# class BaseModel(db.Model): # TODO: Set in UTC.
# """Base Model."""
# __abstract__ = True
# created_on = Column(DateTime, default=now(), index=True)
# updated_on = Column(DateTime, default=now(), onupdate=now(),
# index=True)
# TODO Do this for: sqlalchemy_class_defs. Then test.
# for item in table_classes:
# table_class_def = type(
# 'ExampleTableClass' + str(i), (BaseModel,), item
# )
# # noinspection PyTypeChecker
# self.sqlalchemy.append(table_class_def)
# i += 1
return classes
# Testing
# db = SQLAlchemy(FLASK_APP)
# db.Base = declarative_base()
# Testing
# TODO: Handle data type value mapping, e.g. 'now'.
# now = sqlalchemy_func.now
sqlalchemy_base_representations = {
'abstract_classes': [], # TODO
'uninherited_classes':
[_to_sqlalchemy_classdef_dict(name, defn)
for name, defn in model['models'].items()]
}
# pp = PrettyPrinter(indent=2)
# pp.pprint(table['name'].index)
# Testing
# db2.create_all() # Magically knows that 'tables' has classes.
# db2.session.commit()
# Testing
return _render_classes(sqlalchemy_base_representations)
# TODO: Return a dictionary only. db.<whatever> can be done after.
def _load_file(self, file):
"""Loads file, and runs initialization in Python dictionary format.
Side Effects:
self.__init__: Initializes with source file data.
self.source_file_path, self.source_file_data: Set.
Raises:
UnexpectedException, UnsupportedFileTypeException
"""
file_ext = file.rpartition('.')[-1]
exc1 = 'UnsupportedDataFormatException: Apologies, but format \'{}\''\
' is not yet supported.'.format(file_ext)
exc2 = 'UnsupportedDataFormatException: Format \'{}\' is not ' \
'supported.'.format(file_ext)
if file_ext in OpenModel.SUPPORTED_DATA_FORMATS:
data = None
if file_ext == 'yaml':
data = yaml_load_clean(file)
elif file_ext == 'json': # Planned
pass
elif file_ext == 'xml': # Planned
pass
elif file_ext == 'csv': # Planned
pass
self.__init__(source_data=copy(data))
self.source_file_path = str(copy(file))
self.source_file_data = read_contents(file)
elif file_ext in OpenModel.PLANNED_SUPPORTED_FILE_TYPES:
raise UnsupportedFileTypeException(exc1)
else:
raise UnsupportedFileTypeException(exc2)
def _set_meta_attributes(self, model):
"""Set primary model meta-attribute properties.
OpenModel spec specifies that primary model meta-attributes keys reside
in the root of a hierarchical model.
Side Effects:
self.open_model_version, self.config, self.info, self.models,
self.abstract_models, self.relations
Raises:
InvalidSchemaException
"""
try:
self.open_model_version, self.config, self.info, self.models, \
self.abstract_models, self.relations \
= model['baller'], model['config'], model['info'], \
model['models'], model['abstractModels'], model['relations']
except KeyError:
msg = 'InvalidSchemaException: An error occurred while attempting'\
' to read data model. Please checked that root keys conform'\
' to {} standard.'.format(OpenModel.BRAND_NAME)
raise InvalidSchemaException(msg)
def _set_custom_meta_attributes(self, model):
"""Set custom primary meta attributes.
OpenModel spec specifies that primary model meta-attributes keys reside
in the root of a hierarchical model. Custom attributes are any
attributes in the model root which are not specified by the spec.
Side Effects:
self.custom_fields
"""
self.custom_fields = {
'customFields': inverse_filter_dict(dictionary=model,
keys=OpenModel.MODEL_ROOT_KEYS)
}
def _set_custom_fields(self, model):
"""Alias: _set_custom_meta_attributes."""
self._set_custom_meta_attributes(model)
def _set_dict_format_attribute_aliases(self, data):
"""Set dictionary format instance attributes.
Set dictionary format instance attribute self.dict and other instance
attribute aliases for that attribute.
Side Effects:
self.source_data, self.source, self.data, self.dict
"""
self.source_data, self.source, self.data, self.dict, \
= iter_repeat(copy(data), 4)
def _load_serialized(self, data):
"""Loads seralized data into instance.
Side Effects:
(1) Sets dictionary format instance attribute self.dict and other
instance attribute aliases for that attribute, (2) Sets custom
specified by the data but not understood by the OpenModel spec, (3)
Serializes Python dictionary to YAML string, (4) Serializes Python
dictionary to a dictionary of SqlAlchemy objects.
Raises:
UnexpectedException, UnsupportedFileTypeException
"""
self._set_dict_format_attribute_aliases(copy(data)) # 1
self._set_custom_fields(copy(data)) # 2
self.yaml = self.serialize_to_yaml(copy(data)) # 3
self.sqlalchemy = self.serialize_to_sqlalchemy(copy(data)) # 4
if __name__ == '__main__': # Testing
# TODO: Implement CLI and use file path as follows.
# /Users/joeflack4/projects/pma-api/pmaapi/model/model.yaml
try:
# OpenModel Testing
mdl = OpenModel()
mdl.load(MODEL_FILE)
pp2 = PrettyPrinter(indent=0)
pp2.pprint(mdl.sqlalchemy)
# print(mdl.yaml)
# pp2.pprint(mdl.custom_fields)
# pp2.pprint(mdl.dict['models']['indicators'])
# SqlAlchemy Testing
# example = Modules()
# print(dir(example))
# print(example.created_on)
# Class Generation Teting
# class_gen = AllMyFields({'a': 1, 'b': 2})
# print(class_gen.a)
except OpenModelException as exc:
print(exc, file=stderr)
# pass
```
#### File: pma-api-open-model/pmaapi/config.py
```python
import os
import string
import random
PACKAGE_ROOT = os.path.dirname(__file__)
MODEL_FILE = PACKAGE_ROOT + '/model/model.yaml'
class Config(object):
"""Flask app configuration super class."""
DEBUG = False
TESTING = False
CSRF_ENABLED = True
WTF_CSRF_ENABLED = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
try:
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
except KeyError:
SQLALCHEMY_DATABASE_URI = 'sqlite:///'+PACKAGE_ROOT+'/../pmaapi.db'
class ProductionConfig(Config):
"""Flask app production configuration."""
DEBUG = False
class StagingConfig(Config):
"""Flask app staging configuration."""
DEVELOPMENT = True
DEBUG = True
class DevelopmentConfig(Config):
"""Flask app development configuration."""
DEVELOPMENT = True
DEBUG = True
class TestConfig(Config):
"""Flask app test configuration."""
TESTING = True
DEBUG = True
WTF_CSRF_ENABLED = False
def sk_generator(size=24, chars=string.ascii_letters + string.digits):
"""Secret key generator."""
return ''.join(random.choice(chars) for _ in range(size))
```
#### File: pma-api-open-model/test/test.py
```python
import doctest
import unittest
from argparse import ArgumentParser
import os
TEST_PACKAGES = ['pmaapi', 'test']
TEST_DIR = os.path.dirname(os.path.realpath(__file__)) + '/'
class ResourceTest(unittest.TestCase):
"""Unit tests for the Resource class."""
def test_init(self):
"""Test initialization.."""
pass
if __name__ == '__main__':
def get_args():
"""CLI for test runner."""
desc = 'Run tests for package.'
parser = ArgumentParser(description=desc)
doctests_only_help = 'Specifies whether to run doctests only, as ' \
'opposed to doctests with unittests. Default is' \
' False.'
parser.add_argument('-d', '--doctests-only', action='store_true',
help=doctests_only_help)
args = parser.parse_args()
return args
def get_test_modules(test_package):
"""Get files to test.
Args:
test_package (str): The package containing modules to test.
Returns:
list: List of all python modules in package.
"""
# TODO: Make dynamic. Maybe make TEST_PACKAGES a dict (mod name + path)
if test_package == 'pmaapi':
root_dir = TEST_DIR + "../" + "pmaapi"
elif test_package == 'test':
root_dir = TEST_DIR
else:
raise Exception('Test package not found.')
test_modules = []
for dummy, dummy, filenames in os.walk(root_dir):
for file in filenames:
if file.endswith('.py'):
file = file[:-3]
test_module = test_package + '.' + file
test_modules.append(test_module)
return test_modules
def get_test_suite(test_packages):
"""Get suite to test.
Returns:
TestSuite: Suite to test.
"""
suite = unittest.TestSuite()
for package in test_packages:
pkg_modules = get_test_modules(test_package=package)
for pkg_module in pkg_modules:
suite.addTest(doctest.DocTestSuite(pkg_module))
return suite
PARAMS = get_args()
TEST_SUITE = get_test_suite(TEST_PACKAGES)
unittest.TextTestRunner(verbosity=1).run(TEST_SUITE)
if PARAMS.doctests_only: # TODO: For dev testing needs. Refactor.
pass
# TEST_SUITE = get_test_suite()
# unittest.TextTestRunner(verbosity=1).run(TEST_SUITE)
else:
# unittest.main()
pass
``` |
{
"source": "joeflack4/pma-api",
"score": 2
} |
#### File: pma_api/manage/db_mgmt.py
```python
import csv
import glob
import logging
import ntpath
import os
import subprocess
from collections import OrderedDict
from copy import copy
from datetime import datetime
from typing import List, Dict, Union, Iterable
import boto3
import xlrd
from xlrd.sheet import Sheet
from xlrd.book import Book
import sqlalchemy
from flask import Flask, current_app
from flask_user import UserManager
from sqlalchemy import Table
# noinspection PyProtectedMember
from sqlalchemy.engine import Connection
from sqlalchemy.exc import OperationalError, IntegrityError, DatabaseError
from pma_api import create_app
from pma_api.config import DATA_DIR, BACKUPS_DIR, Config, \
IGNORE_SHEET_PREFIX, DATA_SHEET_PREFIX, AWS_S3_STORAGE_BUCKETNAME as \
BUCKET, S3_BACKUPS_DIR_PATH, S3_DATASETS_DIR_PATH, S3_UI_DATA_DIR_PATH, \
UI_DATA_DIR, DATASETS_DIR, API_DATASET_FILE_PREFIX as API_PREFIX, \
UI_DATASET_FILE_PREFIX as UI_PREFIX, HEROKU_INSTANCE_APP_NAME as APP_NAME,\
FILE_LIST_IGNORES, TEMP_DIR
from pma_api.error import PmaApiDbInteractionError, PmaApiException
from pma_api.models import db, Cache, Characteristic, CharacteristicGroup, \
Task, Country, Data, EnglishString, Geography, Indicator, ApiMetadata, \
Survey, Translation, Dataset, User
from pma_api.utils import most_common
from pma_api.manage.utils import log_process_stderr, run_proc, \
_get_bin_path_from_ref_config
# Sorted in order should be executed
ORDERED_METADATA_SHEET_MODEL_MAP = OrderedDict({ # str,db.Model
'geography': Geography,
'country': Country,
'survey': Survey,
'char_grp': CharacteristicGroup,
'char': Characteristic,
'indicator': Indicator
})
# For lookup
DATASET_WB_SHEET_MODEL_MAP = {
**ORDERED_METADATA_SHEET_MODEL_MAP,
**{'data': Data},
**{'translation': Translation}}
root_connection_info = {
'hostname': Config.DB_ROOT_HOST,
'port': Config.DB_ROOT_PORT,
'database': Config.DB_ROOT_NAME,
'username': Config.DB_ROOT_USER,
'password': Config.DB_ROOT_PASS}
db_connection_info = {
'hostname': Config.DB_HOST,
'port': Config.DB_PORT,
'database': Config.DB_NAME,
'username': Config.DB_USER,
'password': Config.DB_PASS}
connection_error = 'Was not able to connect to the database. Please '\
'check that it is running, and your database URL / credentials are ' \
'correct.\n\n' \
'Original error:\n' \
'{}'
caching_error = 'Warning: Error occurred while trying to cache data after ' \
'import. Is the server running?\n' \
'- Side effects: The first time any cache-relevant routes ' \
'(e.g. datalab/init) are loaded, they will load slower. ' \
'However, at that time, an attempt will be made to cache ' \
'again.\n' \
'- Original error:\n' \
'{}'
db_mgmt_err = 'An error occurred during db management procedure. This is ' \
'probably due to the database being currently accessed. The connection ' \
'could be, for example, be a db browsing client such as psql, pgadmin, ' \
'etc. These or any other active connections must closed before proceeding'\
'. If closing such clients still does not solve the issue, try shutting ' \
'down the server as well.'
db_not_exist_tell = 'database "{}" does not exist'\
.format(os.getenv('DB_NAME', 'pmaapi'))
env_access_err_tell = "'NoneType' object has no attribute 'drivername'"
env_access_err_msg = \
'An error occurred while interacting with the database. This can often ' \
'happen when db related environmental variables (e.g. DATABASE_URL) are ' \
'not set or cannot be accessed. Please check that they are set and ' \
'being loaded correctly.\n\n' \
'- Original error:\n{}'
def aws_s3(func):
"""AWS S3 Wrapper
This wrapper is not to be called directly, but should be used in the
following way:
@aws_s3
def my_function_that_uses_s3(...):
...
This wrapper provides the following functions:
- Offers guidance in the event of connection issues
- Prints out status update before calling function
- Suppresses buggy, unfixed resource warnings from boto3 S3 client
Args:
func (function): This will be the function wrapped, e.g.
'my_function_that_uses_s3' in the above example.
Returns:
function: The wrapped function
"""
msg = '\nAccess was denied when attempting to interact with AWS S3. ' \
'Please check the following: ' \
'\n1. That you have set the following environment variables: ' \
'AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY' \
'\n2. The credentials specified in the environment variables are ' \
'correct' \
'\n3. The variables are able to be accessed. If, for example, ' \
'you are using an IDE or tester, make sure that it has access to' \
'the afforementioned environmental variables.'
from botocore.exceptions import ClientError
def wrap(*args, **kwargs):
"""Wrapped function"""
verbose = kwargs and 'verbose' in kwargs and kwargs['verbose']
if verbose:
print('Executing: ' + func.__name__)
wrapper_kwargs_removed = \
{k: v for k, v in kwargs.items() if k != 'silent'}
try:
from test import SuppressStdoutStderr
if verbose:
return func(*args, **wrapper_kwargs_removed)
else:
# TODO: suppression works when running tests in Pycharm,
# but running `make backup` from terminal hangs
# S3 has unfixed resource warnings
# with SuppressStdoutStderr():
# return func(*args, **wrapper_kwargs_removed)
return func(*args, **wrapper_kwargs_removed)
except ClientError as err:
if 'Access Denied' in str(err) or 'AccessDenied' in str(err):
raise PmaApiDbInteractionError(msg)
else:
raise err
return wrap
def get_data_file_by_glob(pattern):
"""Get file by glob.
Args:
pattern (str): A glob pattern.
Returns:
str: Path/to/first_file_found
Raises:
PmaApiException: If more was found than expected
"""
found: List = glob.glob(pattern)
if len(found) > 1:
raise PmaApiException('Expected only 1 file to be found, but '
'discovered the following: \n' + str(found))
return found[0] if found else ''
def get_api_data():
"""Get API data."""
pattern: str = API_PREFIX + '*.xlsx'
return get_data_file_by_glob(os.path.join(DATA_DIR, pattern))
def get_ui_data():
"""Get API data."""
pattern: str = UI_PREFIX + '*.xlsx'
return get_data_file_by_glob(os.path.join(DATA_DIR, pattern))
def make_shell_context():
"""Make shell context, for the ability to manipulate these db_models/tables
from the command line shell.
Returns:
dict: Context for application manager shell.
"""
return dict(
app=create_app(os.getenv('ENV_NAME', 'default')), db=db,
Country=Country, EnglishString=EnglishString, Translation=Translation,
Survey=Survey, Indicator=Indicator, Data=Data, Task=Task, User=User,
Characteristic=Characteristic, Cache=Cache, ApiMetadata=ApiMetadata,
CharacteristicGroup=CharacteristicGroup, Dataset=Dataset,
Geography=Geography)
def init_from_source(path, model):
"""Initialize DB table data from csv file.
Initialize table data from csv source data files associated with the
corresponding data model.
Args:
path (str): Path to csv data file.
model (class): SqlAlchemy model class.
"""
with open(path, newline='', encoding='utf-8') as csvfile:
csvreader = csv.DictReader(csvfile)
for row in csvreader:
record = model(**row)
db.session.add(record)
db.session.commit()
def remove_stata_undefined_token_from_wb(wb: xlrd.book.Book):
"""Remove all instances of Stata undefined token '.' from wb
Args:
wb (xlrd.book.Book): workbook object
Returns:
xlrd.book.Book: Formatted workbook
"""
numeric_types = (int, float, complex)
field_types_to_format = (x.__name__ for x in numeric_types)
book = copy(wb)
sheet_names = [x for x in book.sheet_names()
if not str(x).startswith(IGNORE_SHEET_PREFIX)]
data_sheet_names = [x for x in sheet_names
if str(x).startswith(DATA_SHEET_PREFIX)]
sheets = [book.sheet_by_name(x) for x in data_sheet_names]
# For de-bug purposes
# noinspection PyUnusedLocal
none_in_field_vals = False
# noinspection PyUnusedLocal
empty_in_field_vals = False
for sheet in sheets:
for i in range(sheet.ncols):
col = sheet.col(i)
# field_name = col[0].value # For de-bug
field_vals = [x.value for x in col[1:] if x.value]
sample_size = 50
sample = field_vals[:sample_size]
sample_types = [type(x).__name__ for x in sample]
if len(sample_types) > 0:
most_common_type = most_common(sample_types)
else:
most_common_type = None
field_data_type = most_common_type
# modify wb.sheet.col
if field_data_type in field_types_to_format:
for cell in col:
if cell.value == '.':
# TODO: Will this do the trick?
cell.value = None
# For de-bug purposes
if None in field_vals:
# noinspection PyUnusedLocal
none_in_field_vals = True
if '' in field_vals:
# noinspection PyUnusedLocal
empty_in_field_vals = True
return book
def commit_from_sheet(ws: Sheet, model: db.Model, **kwargs):
"""Initialize DB table data from XLRD Worksheet.
Initialize table data from source data associated with corresponding
data model.
Args:
ws (xlrd.sheet.Sheet): XLRD worksheet object.
model (class): SqlAlchemy model class.
"""
survey, indicator, characteristic = '', '', ''
if model == Data:
survey = kwargs['survey']
indicator = kwargs['indicator']
characteristic = kwargs['characteristic']
header = None
for i, row in enumerate(ws.get_rows()):
row = [r.value for r in row]
if i == 0:
header = row
else:
row_dict = {k: v for k, v in zip(header, row)}
if model == Data:
survey_code = row_dict.get('survey_code')
survey_id = survey.get(survey_code)
row_dict['survey_id'] = survey_id
indicator_code = row_dict.get('indicator_code')
indicator_id = indicator.get(indicator_code)
row_dict['indicator_id'] = indicator_id
char1_code = row_dict.get('char1_code')
char1_id = characteristic.get(char1_code)
row_dict['char1_id'] = char1_id
char2_code = row_dict.get('char2_code')
char2_id = characteristic.get(char2_code)
row_dict['char2_id'] = char2_id
try:
record = model(**row_dict)
except (DatabaseError, ValueError, AttributeError, KeyError,
IntegrityError, Exception) as err:
msg = 'Error when processing data import.\n' \
'- Worksheet name: {}\n' \
'- Row number: {}\n' \
'- Cell values: {}\n\n' \
'- Original Error:\n' + \
type(err).__name__ + ': ' + str(err)
msg = msg.format(ws.name, i + 1, row)
logging.error(msg)
raise PmaApiDbInteractionError(msg)
db.session.add(record)
# TODO: After adding FunctionalTask class, is this necessary?
# TODO 2019.04.08-jef: This is really not ideal. This exists here because
# every model creates new EnglishString records, and given how we
# currently create and generate unique codes, it appears we /may/ need to
# commit both the original record and the english string record. So,
# for such models, everything will have already been committed, hence
# why we currently run this additional 'commit_needed' step/check.
# sheet_rows: int = ws.nrows - 1
# db_rows: int = len(model.query.all())
# commit_needed: bool = db_rows < sheet_rows
# if commit_needed:
# db.session.commit()
def format_book(wb: Book) -> Book:
"""Format workbook by making edits to prevent edge case errors
Args:
wb (xlrd.book.Book): workbook object
Returns:
xlrd.book.Book: Formatted workbook
"""
book: Book = remove_stata_undefined_token_from_wb(wb)
return book
def register_administrative_metadata(wb_path):
"""Create metadata for Excel Workbook files imported into the DB.
Args:
wb_path (str) Path to Excel Workbook.
"""
record = ApiMetadata(wb_path)
db.session.add(record)
db.session.commit()
def drop_tables(tables: Iterable[Table] = None):
"""Drop database tables
Side effects
- Drops database tables
Args:
tables list(db.Model): Tables to drop
Raises:
OperationalError: If encounters such an error that is not 'database
does not exist'
"""
try:
if tables:
db.metadata.drop_all(db.engine, tables=tables)
else:
db.drop_all()
except OperationalError as err:
if db_not_exist_tell not in str(err):
raise err
create_db()
if tables:
db.metadata.drop_all(db.engine, tables=tables)
else:
db.drop_all()
def get_datasheet_names(wb: Book) -> List[str]:
"""Gets data sheet names from a workbook
Args:
wb (Book): Pre-loaded XLRD Workbook obj
Returns:
list(str): List of datasheet names
"""
data_sheets: List[xlrd.sheet] = \
[x for x in wb.sheets() if x.name.startswith('data')]
datasheet_names: List[str] = [x.name for x in data_sheets]
return datasheet_names
def is_db_empty(_app: Flask = current_app) -> bool:
"""Is database empty or not?
Empty is defined here as a DB that has been created, but has no tables. As
a proxy for this ideal way of telling if DB is empty, this function
currently just checks if there is any data in the 'data' table.
Args:
_app (Flask): Flask application for context
Returns:
bool: True if empty, else False
"""
with _app.app_context():
data_present: Data = Data.query.first()
empty: bool = not data_present
return empty
def new_backup_path(_os: str = '', _env: str = '', ext: str = 'dump') -> str:
"""Backup default path
Args:
_os (str): Operating system backup is being created on. Useful to add
this if backing up remotely. Otherwise, backup name will
reflect the OS of current system.
_env (str): Environment name where backup is being created. Useful to
add if backing up remotely. Otherwise, backup name will
reflect the environment of current system.
ext (str): File extension to use
Returns:
str: Default path of backup file at specific date and time
"""
import platform
filename_base = 'pma-api-backup'
datetime_str: \
str = str(datetime.now()).replace('/', '-').replace(':', '-')\
.replace(' ', '_')
if not _os:
op_sys: str = \
'MacOS' if platform.system() == 'Darwin' else platform.system()
else:
op_sys: str = _os
if not _env:
env: str = os.getenv('ENV_NAME', 'development')
else:
env: str = _env
filename: str = '_'.join(
[filename_base, op_sys, env, datetime_str]
) + '.' + ext
return os.path.join(BACKUPS_DIR, filename)
def grant_full_permissions_to_file(path):
"""Grant access to file
Raises:
PmaApiException: If errors during process
"""
cmd: List[str] = 'chmod 600 {}'\
.format(path)\
.split(' ')
output: Dict = run_proc(cmd)
errors: str = output['stderr']
if errors:
raise PmaApiException(errors)
def update_pgpass(creds: str, path: str = os.path.expanduser('~/.pgpass')):
"""Update pgpass file with credentials
Side effects:
- Updates file
- Creates file if does not exist
Args:
creds (str): Url pattern string containing connection credentials
path (str): Path to pgpass file
"""
cred_line: str = creds if creds.endswith('\n') else creds + '\n'
with open(path, 'r') as file:
contents: str = file.read()
exists: bool = creds in contents
cred_line = cred_line if contents.endswith('\n') else cred_line + '\n'
if not exists:
with open(path, 'a+') as file:
file.write(cred_line)
def backup_local_using_heroku_postgres(
path: str = new_backup_path(),
app_name: str = APP_NAME,
silent: bool = False) -> str:
"""Backup using Heroku PostgreSQL DB using Heroku CLI
Args:
path (str): Path of file to save
app_name (str): Name of app as recognized by Heroku
silent (bool): Don't print updates?
Side effects:
- Runs command: `heroku pg:backups:capture`
- Runs command: `heroku pg:backups:download`
- Downloads to file system
- Makes directory (if not exist)
Raises:
PmaApiDbInteractionError: If errors during process
Returns:
str: path to backup file saved
"""
target_dir = os.path.dirname(path) if path else BACKUPS_DIR
if not os.path.exists(target_dir):
os.mkdir(target_dir)
cmd_str_base: str = \
'heroku pg:backups:capture --app={app}'
cmd_str: str = cmd_str_base.format(app=app_name)
run_proc(cmd=cmd_str, raises=False, prints=not silent)
cmd_str_base2: str = \
'heroku pg:backups:download --app={app} --output={output}'
cmd_str2: str = cmd_str_base2.format(app=app_name, output=path)
run_proc(cmd_str2, raises=False, prints=not silent)
return path
def backup_using_pgdump(path: str = new_backup_path()) -> str:
"""Backup using pg_dump
Args:
path (str): Path of file to save
Side effects:
- Grants full permissions to .pgpass file
- Reads and writes to .pgpass file
- Runs pg_dump process, storing result to file system
Raises:
PmaApiDbInteractionError: If errors during process
Returns:
str: path to backup file saved
"""
pgpass_url_base = '{hostname}:{port}:{database}:{username}:{password}'
pgpass_url: str = pgpass_url_base.format(**db_connection_info)
pgpass_path = os.path.expanduser('~/.pgpass')
grant_full_permissions_to_file(pgpass_path)
update_pgpass(path=pgpass_path, creds=pgpass_url)
cmd_base: str = \
'pg_dump --format=custom --host={hostname} --port={port} ' \
'--username={username} --dbname={database} --file {path}'
cmd: str = cmd_base.format(**db_connection_info, path=path)
output: Dict = run_proc(cmd)
errors: str = output['stderr']
if errors:
with open(os.path.expanduser('~/.pgpass'), 'r') as file:
pgpass_contents: str = file.read()
msg = '\n' + errors + \
'Offending command: ' + cmd + \
'Pgpass contents: ' + pgpass_contents
raise PmaApiDbInteractionError(msg)
return path
def backup_local(path: str = '', silent: bool = False) -> str:
"""Backup database locally
Args:
path (str): Path to save file
silent (bool): Don't print updates?
Side effects:
- Saves file at path
Raises:
PmaApiDbInteractionError: If DB exists and any errors during backup
Returns:
str: Path to backup file saved
"""
func = backup_local_using_heroku_postgres
target_dir = os.path.dirname(path) if path else BACKUPS_DIR
if not os.path.exists(target_dir):
os.mkdir(target_dir)
try:
if os.getenv('ENV_NAME') == 'development':
saved_path: str = backup_using_pgdump(path) if path \
else backup_using_pgdump()
else:
saved_path: str = func(path=path, silent=silent) if path \
else func(silent=silent)
return saved_path
except PmaApiDbInteractionError as err:
if db_not_exist_tell not in str(err):
raise err
@aws_s3
def store_file_on_s3(path: str, storage_dir: str = ''):
"""Given path to file on local file system, upload file to AWS S3
Prerequisites:
Environmental variable setup: https://boto3.amazonaws.com/v1/
documentation/api/latest/guide/quickstart.html#configuration
Side effects:
- Uploads to cloud
Args:
path (str): Path to local file
storage_dir (str): Subdirectory path where file should be stored
Returns:
str: File name of uploaded file
"""
local_backup_first = False if os.path.exists(path) else True
filename = ntpath.basename(path)
s3 = boto3.resource(
's3',
aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY'))
if local_backup_first:
backup_local(path)
# Datasets only: This only applies to datasets, so might want refactor.
# noinspection PyBroadException
try:
metadata: Dict[str, str] = {}
d = Dataset(path)
metadata['dataset_display_name']: str = d.dataset_display_name
metadata['version_number']: str = str(d.version_number)
metadata['dataset_type']: str = d.dataset_type
except Exception:
metadata: Dict[str, str] = {}
# TODO Troubleshoot slow upload: https://github.com/boto/boto3/issues/409
# Until fixed, print these statements.
# Experiments (seconds): 62, 61, 104, 68, 59, 58, 0, 65
msg1 = 'Backing up to cloud: {}'.format(filename) + \
'\nThis normally takes seconds, but due to intermittent issues ' \
'with Amazon Web Services S3 file storage service, this has known '\
'to occasionally take between 1-2 minutes.'
msg2 = 'Backup of file complete. Seconds elapsed: {}'
with open(path, 'rb') as f:
filepath = storage_dir + filename
print(msg1)
t1 = datetime.now()
s3.Bucket(BUCKET).put_object(
Key=filepath,
Metadata=metadata,
Body=f)
t2 = datetime.now()
elapsed_seconds: int = int((t2 - t1).total_seconds())
print(msg2.format(elapsed_seconds))
if local_backup_first:
os.remove(path)
return filename
def backup_ui_data(path: str = get_ui_data()) -> str:
"""Given path to file on local file system, push file to AWS S3
Args:
path (str): Path to local file
Returns:
str: File name of uploaded file
"""
filename: str = store_file_on_s3(path=path,
storage_dir=S3_UI_DATA_DIR_PATH)
return filename
def backup_datasets(path: str = get_api_data()) -> str:
"""Given path to file on local file system, push file to AWS S3
Args:
path (str): Path to local file
Returns:
str: File name of uploaded file
"""
filename: str = store_file_on_s3(path=path,
storage_dir=S3_DATASETS_DIR_PATH)
return filename
def backup_source_files():
"""Backup ui data and datasets"""
backup_ui_data()
backup_datasets()
def backup_db_cloud(path_or_filename: str = '', silent: bool = False):
"""Backs up database to the cloud
If path_or_filename is a path, uploads from already stored backup at path.
Else if it is a path_or_filename, creates new backup and then uploads that.
Args:
path_or_filename (str): Either path to a backup file, or file name. If
file name, will restore from local backup if file exists in default
backups directory, else will restore from the cloud.
silent (bool): Don't print updates?
Side effects:
- backup_local()
- backup_to_s3()
"""
if not path_or_filename:
path = new_backup_path()
else:
pth = os.path.split(path_or_filename)
is_filename = len(pth) < 2 or (len(pth) == 2 and pth[0])
path = path_or_filename if not is_filename \
else os.path.join(BACKUPS_DIR, path_or_filename)
local_backup_first = False if os.path.exists(path) else True
if local_backup_first:
backup_local(path=path, silent=silent)
filename: str = \
store_file_on_s3(path=path, storage_dir=S3_BACKUPS_DIR_PATH)
if local_backup_first:
os.remove(path)
return filename
def backup_db(path: str = ''):
"""Backup database locally and to the cloud
Args:
path (str): Path to save file
Side effects:
- backup_local()
- backup_cloud()
Returns:
str: Path saved locally
"""
saved_path: str = backup_local(path)
backup_db_cloud(saved_path)
return saved_path
def s3_signed_url(url: str, sleep: int = 1) -> str:
"""From an unsigned AWS S3 object URL, generates and returns signed one.
Args:
url (str): Unsigned AWS S3 object URL
sleep (int): Amount of time, in seconds, to sleep after creating URL.
Useful for combining with another operation which will use generated
URL.
Returns:
str: Signed AWS S3 URL for object
"""
import time
bucket, key = url.replace('https://', '').split('.s3.amazonaws.com/')
s3 = boto3.client('s3')
signed_url: str = s3.generate_presigned_url(
ClientMethod='get_object',
ExpiresIn=7 * 24 * 60 * 60, # 7 days; maximum
Params={
'Bucket': bucket,
'Key': key
}
)
time.sleep(sleep)
return signed_url
def restore_using_heroku_postgres(
s3_url: str = '',
s3_url_is_signed: bool = False,
app_name: str = APP_NAME,
silent: bool = False,
ok_tells: tuple = ('Restoring... done',)):
"""Restore Heroku PostgreSQL DB using Heroku CLI
Args:
s3_url (str): AWS S3 unsigned object url. If signed, should pass
's3_url_is_signed' param as True.
s3_url_is_signed (bool): Is this a S3 signed url? If not, will attempt
to sign before doing restore.
app_name (str): Name of app as recognized by Heroku
silent (bool): Don't print updates?
ok_tells (tuple(str)): A list of strings to look for in the command
result output. If any given 'tell' strings are in the output, we will
consider the result to be ok. It is important to note that if using a
different version of the Heroku CLI, it is possible that the output
will appear to be different. If so, try to find another 'ok tell'
inside the output, and add it to the list.
Side effects:
- Signs url if needed
- Restores database
- Drops any tables and other database objects before recreating them
"""
signed_url: str = s3_url if s3_url_is_signed else s3_signed_url(s3_url)
cmd_str_base: str = \
'heroku pg:backups:restore "{s3_url}" DATABASE_URL ' \
'--confirm {app} --app {app}'
cmd_str: str = cmd_str_base.format(
s3_url=signed_url,
app=app_name)
output: Dict[str, str] = run_proc(
cmd=cmd_str,
prints=not silent,
raises=False)
possible_err = output['stderr']
apparent_success = not possible_err or \
any(x in possible_err for x in ok_tells)
if not apparent_success:
msg = '\n' + possible_err + \
'Offending command: ' + str(cmd_str)
raise PmaApiDbInteractionError(msg)
def restore_using_pgrestore(
path: str, attempt: int = 1, dropdb: bool = False,
silent: bool = False):
"""Restore postgres datagbase using pg_restore
Args:
path (str): Path of file to restore
attempt (int): Attempt number
dropdb (bool): Drop database in process?
silent (bool): Don't print updates?
Side effects:
- Restores database
- Drops database (if dropdb)
"""
system_bin_paths: List[str] = \
['pg_restore', '/usr/local/bin/pg_restore']
system_bin_path_registered: str = \
_get_bin_path_from_ref_config(bin_name='pg_restore', system=True)
if system_bin_path_registered not in system_bin_paths:
system_bin_paths.append(system_bin_path_registered)
project_bin_path: str = \
_get_bin_path_from_ref_config(bin_name='pg_restore', project=True)
pg_restore_paths: List[str] = []
if system_bin_paths:
pg_restore_paths += system_bin_paths
if project_bin_path:
pg_restore_paths.append(project_bin_path)
pg_restore_path: str = pg_restore_paths[attempt-1] if pg_restore_paths \
else ''
max_attempts: int = len(pg_restore_paths)
try:
cmd_base: str = '{pg_restore_path} --exit-on-error --create {drop}' \
'--dbname={database} --host={hostname} --port={port} ' \
'--username={username} {path}'
cmd_str: str = cmd_base.format(
**root_connection_info,
path=path,
pg_restore_path=pg_restore_path,
drop='--clean ' if dropdb else '')
cmd: List[str] = cmd_str.split(' ')
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
if not silent:
try:
for line in iter(proc.stdout.readline, ''):
print(line.encode('utf-8'))
except AttributeError:
print(proc.stdout)
errors = proc.stderr.read()
if errors:
msg = '\n' + errors + \
'Offending command: ' + cmd_str
log_process_stderr(msg, err_msg=db_mgmt_err)
raise PmaApiDbInteractionError(msg)
proc.stderr.close()
proc.stdout.close()
proc.wait()
except FileNotFoundError as err:
if attempt < max_attempts:
restore_using_pgrestore(
path=path, dropdb=dropdb, silent=silent, attempt=attempt+1)
else:
raise err
# TODO 2019.04.08-jef: Remove because requires superuser; can't do on Heroku.
def superuser_dbms_connection(
connection_url: str = os.getenv('DBMS_SUPERUSER_URL')) -> Connection:
"""Connect to database management system as a super user
Returns:
sqlalchemy.engine.Connection: connection object
"""
from sqlalchemy import create_engine
engine_default = create_engine(connection_url)
conn: sqlalchemy.engine.Connection = engine_default.connect()
return conn
# TODO 2019.04.08-jef: Remove because requires superuser; can't do on Heroku.
# def view_db_connections() -> List[dict]:
# """View active connections to a db
#
# Returns:
# list(dict): List of active connections in the form of dictionaries
# containing information about connections
# """
# # noinspection PyProtectedMember
# from sqlalchemy.engine import ResultProxy
#
# try:
# db_name: str = current_app.config.get('DB_NAME', 'pmaapi')
# except RuntimeError as err:
# if 'Working outside of application context' not in str(err):
# raise err
# db_name: str = 'pmaapi'
# statement = "SELECT * FROM pg_stat_activity WHERE datname = '%s'" \
# % db_name
# conn: Connection = superuser_dbms_connection()
#
# conn.execute("COMMIT")
# result: ResultProxy = conn.execute(statement)
# conn.close()
#
# active_connections: List[dict] = []
# for row in result:
# conn_info = {}
# for key_val in row.items():
# conn_info = {**conn_info, **{key_val[0]: key_val[1]}}
# active_connections.append(conn_info)
#
# return active_connections
def create_db(name: str = 'pmaapi', with_schema: bool = True):
"""Create a brand new database
Side effects:
- Creates database
- Creates database tables and schema (if with_schema)
Args:
name (str): Name of database to create
with_schema (bool): Also create all tables and initialize them?
"""
db_name: str = current_app.config.get('DB_NAME', name)
conn: Connection = superuser_dbms_connection()
conn.execute("COMMIT")
conn.execute("CREATE DATABASE %s" % db_name)
conn.close()
if with_schema:
db.create_all()
@aws_s3
def download_file_from_s3(
filename: str, file_dir: str, dl_dir: str = TEMP_DIR) -> str:
"""Download a file from AWS S3
Args:
filename (str): Name of file to restore
file_dir (str): Path to dir where file is stored
dl_dir (str): Path to directory to download file
Returns:
str: Path to downloaded file
"""
from botocore.exceptions import ClientError
# create temp dir if doesn't exist
if not os.path.exists(TEMP_DIR):
os.mkdir(TEMP_DIR)
s3 = boto3.resource('s3')
download_from_path: str = os.path.join(file_dir, filename)
download_to_path: str = os.path.join(dl_dir, filename)
try:
s3.Bucket(BUCKET).download_file(download_from_path, download_to_path)
except ClientError as err:
msg = 'The file requested was not found on AWS S3.\n' \
if err.response['Error']['Code'] == '404' \
else 'An error occurred while trying to download from AWS S3.\n'
msg += '- File requested: ' + download_from_path
raise PmaApiDbInteractionError(msg)
return download_to_path
def dataset_version_to_name(version_number: int) -> str:
"""From dataset version number, get dataset name
Args:
version_number (int): Version number of dataset file
Raises:
FileNotFoundError: If dataset version not found on S3.
Returns:
str: Dataset name
"""
err = 'Dataset version {} not found.'.format(str(version_number))
filename: str = ''
datasets: List[Dict[str, str]] = list_cloud_datasets()
for d in datasets:
if int(d['version_number']) == version_number:
filename: str = d['name']
if not filename:
raise FileNotFoundError(err)
return filename
def download_dataset(version_number: int) -> str:
"""Download dataset file from AWS S3
Args:
version_number (int): Version number of dataset file to download
Returns:
str: Path to downloaded file
"""
filename: str = dataset_version_to_name(version_number)
downloaded_file_path: str = download_file_from_s3(
filename=filename,
file_dir=S3_DATASETS_DIR_PATH,
dl_dir=TEMP_DIR)
return downloaded_file_path
@aws_s3
def list_s3_objects(bucket_name: str = BUCKET) \
-> []:
"""List objects on AWS S3
Args:
bucket_name (str): Name of bucket holding object storage
Returns:
list[boto3.resources.factory.s3.ObjectSummary]: List of S3 objects
"""
s3 = boto3.resource('s3')
objects = s3.Bucket(bucket_name).objects.all()
# result: List[boto3.resources.factory.s3.ObjectSummary]
result: List = [x for x in objects]
return result
def _format_datetime(dt: datetime) -> str:
"""Format datetime: YYYY-MM-DD #:##am/pm GMT
Args:
dt: Datetime object
Returns:
str: formatted datetime
"""
utc_tell = '+0000'
the_datetime_base: str = dt.strftime('%b %d, %Y %I:%M%p')
utc_timezone_offset: str = dt.strftime('%z')
formatted: str = the_datetime_base if utc_timezone_offset != utc_tell \
else the_datetime_base + ' GMT'
return formatted
def list_filtered_s3_files(
path: str, detailed: bool = True, include_e_tag: bool = True) \
-> Union[List[str], List[Dict[str, str]]]:
"""Gets list of S3 files w/ directories and path prefixes filtered out
Args:
path (str): Path to directory holding files
detailed (bool): Print more than just object/file name? E.g. default
metadata regarding upload date, custom metadata such as file version,
etc.
include_e_tag (bool): Include AWS S3 auto-generated unique e_tag
identifiers? If true, any object dictionaries returned will include
the first 6 characters of the e_tag under the key 'id'.
Returns:
list: Filenames
"""
path2 = path + '/' if not path.endswith('/') else path
path3 = path2[1:] if path2.startswith('/') else path2
# objects: List[boto3.resources.factory.s3.ObjectSummary]
objects: List = list_s3_objects(silent=True)
# filtered: List[boto3.resources.factory.s3.ObjectSummary]
filtered: List = [x for x in objects
if x.key.startswith(path3)
and x.key != path3]
if not detailed:
names_only: List[str] = [x.key for x in filtered]
formatted: List[str] = [os.path.basename(x) for x in names_only]
formatted.sort()
else:
formatted: List[Dict[str, str]] = []
basic_metadata: List[Dict[str, str]] = []
basic_metadata2: List[Dict[str, str]] = []
# Sort
# sorted: List[boto3.resources.factory.s3.ObjectSummary]
sorted_list: List = \
sorted(filtered, key=lambda x: x.last_modified, reverse=True)
# Get basic metadata ascertainable from filename
for x in sorted_list:
obj_dict: Dict[str, str] = {
'key': x.key,
'name': os.path.basename(x.key),
'owner': x.owner['DisplayName'],
'last_modified': _format_datetime(x.last_modified)}
if include_e_tag:
obj_dict['id']: str = \
x.e_tag.replace('"', '').replace("'", "")[0:6]
basic_metadata.append(obj_dict)
# Get metadata explicitly stored in S3 object
for x in basic_metadata:
# client: botocore.client.S3
client = boto3.client('s3')
obj_metadata_request: Dict = \
client.head_object(Bucket=BUCKET, Key=x['key'])
obj_metadata: Dict[str, str] = obj_metadata_request['Metadata']
x2 = {**copy(x), **obj_metadata}
basic_metadata2.append(x2)
# Remove no-longer necessary 'key' key
for x2 in basic_metadata2:
x3 = copy(x2)
x3.pop('key') # no longer need key used to lookup obj in s3
formatted.append(x3)
return formatted
def list_cloud_backups() -> [str]:
"""List available cloud backups
Returns:
list: backups
"""
files = list_filtered_s3_files(S3_BACKUPS_DIR_PATH)
return files
def list_cloud_ui_data() -> [str]:
"""List ui data spec files on AWS S3
Returns:
list: List of files
"""
files = list_filtered_s3_files(S3_UI_DATA_DIR_PATH)
return files
def list_cloud_datasets(detailed: bool = True) \
-> Union[List[str], List[Dict[str, str]]]:
"""List pma api dataset spec files on AWS S3
Args:
detailed (bool): 'detailed' param to pass down to
list_filtered_s3_files function.
Returns:
list: List of file names if not detailed, else list of objects
containing file names and metadata.
"""
# files: List[str] if not detailed else List[Dict[str, str]]
files: List = list_filtered_s3_files(
path=S3_DATASETS_DIR_PATH,
detailed=detailed)
return files
def list_local_files(path: str, name_contains: str = '') -> [str]:
"""List applicable files in directory
Args:
path (str): Path to a directory containing files
name_contains (str): Additional filter to discard any files that do not
contain this string
Returns:
list: files
"""
try:
all_files = os.listdir(path)
except FileNotFoundError:
msg = 'Path \'{}\' does not appear to exist.'.format(path)
raise PmaApiException(msg)
filenames = [x for x in all_files
if x not in FILE_LIST_IGNORES
and not os.path.isdir(os.path.join(path, x))]
filenames = [x for x in filenames if name_contains in x] if name_contains \
else filenames
return filenames
def list_local_datasets(path: str = DATASETS_DIR) -> [str]:
"""List available local datasets
Args:
path (str): Path to datasets directory
Returns:
list: datasets
"""
from_file_system: List[str] = \
list_local_files(path=path, name_contains='api_data')
# TODO: Remove this line after Dataset model removed
# from_db: [str] = [x.dataset_display_name for x in Dataset.query.all()]
from_db: [str] = []
filenames: [str] = list(set(from_file_system + from_db))
return filenames
def list_local_ui_data(path: str = UI_DATA_DIR) -> [str]:
"""List available local backups
Args:
path (str): Path to backups directory
Returns:
list: backups
"""
filenames = list_local_files(path=path, name_contains='ui_data')
return filenames
def list_local_backups(path: str = BACKUPS_DIR) -> [str]:
"""List available local backups
Args:
path (str): Path to backups directory
Returns:
list: backups
"""
filenames = list_local_files(path=path)
return filenames
def list_backups() -> {str: [str]}:
"""List available backups
Returns:
dict: available backups, of form...
{'local': [...], 'cloud': [...]
"""
return {
'local': list_local_backups(),
'cloud': list_cloud_backups()
}
def list_ui_data() -> {str: [str]}:
"""List available ui data spec files
Returns:
dict: available backups, of form...
{'local': [...], 'cloud': [...]
"""
return {
'local': list_local_ui_data(),
'cloud': list_cloud_ui_data()
}
def list_datasets(detailed: bool = True) \
-> Dict[str, List[Union[str, Dict[str, str]]]]:
"""List available api data spec files
Args:
detailed (bool): 'detailed' param to pass down to list_cloud_datasets()
Returns:
dict: available backups, of form...
{'local': [...], 'cloud': [...]
"""
return {
'local': list_local_datasets(),
'cloud': list_cloud_datasets(detailed=detailed)
}
def seed_users():
"""Creates users for fresh instance; currently just a superuser"""
create_superuser()
def create_superuser(
name: str = os.getenv('SUPERUSER_NAME', 'admin'),
pw: str = os.getenv('SUPERUSER_PW')):
"""Create default super user
The current iteration of PMA API only allows for one user, the super user.
During DB initialization, this function is run. If there are no existing,
users it will create the super user, else does nothing.
Side effects:
- Creates a new user in db with max privileges
Args:
name (str): Username
pw (str): Plain text for password
"""
users: List[User] = User.query.all()
if not users:
user_manager: UserManager = current_app.user_manager
# TODO: Am I getting an error here just because of UserMixin / no
# __init__ present in child class?
# noinspection PyArgumentList
user = User(
active=True,
username=name,
password=<PASSWORD>(pw),
first_name='PMA API',
last_name='Admin')
db.session.add(user)
db.session.commit()
def restore_db_cloud(filename: str, silent: bool = False):
"""Restore database
Args:
filename (str): Name of file to restore
silent (bool): Don't print updates?
Side effects:
Reverts database to state of backup file
"""
if os.getenv('ENV_NAME') == 'development':
path: str = download_file_from_s3(
filename=filename,
file_dir=S3_BACKUPS_DIR_PATH,
dl_dir=BACKUPS_DIR)
restore_db_local(path=path, silent=silent)
else:
# TODO: make same as test file
dl_path: str = os.path.join(BACKUPS_DIR, filename)
dl_url_base = 'https://{bucket}.s3.amazonaws.com/{key}'
dl_url = dl_url_base.format(bucket=BUCKET, key=dl_path)
restore_using_heroku_postgres(s3_url=dl_url, silent=silent)
def restore_db(path_or_filename: str):
"""Restore database
Args:
path_or_filename (str): Either path to a backup file, or file name. If
file name, will restore from local backup if file exists in default
backups directory, else will restore from the cloud.
Side effects:
Reverts database to state of backup file
"""
dirpath, filename = os.path.split(path_or_filename)
local_path = os.path.join(BACKUPS_DIR, filename) if not dirpath \
else path_or_filename
if os.path.exists(local_path):
restore_db_local(local_path)
else:
restore_db_cloud(filename)
def restore_db_local(path: str, silent: bool = False):
"""Restore database
Args:
path (str): Path to backup file
silent (bool): Don't print updates?
Side effects:
Reverts database to state of backup file
"""
err_msg = '\n\nAn error occurred while trying to restore db from file: {}'\
'.In the process of db restoration, a last-minute backup of db' \
' was created: {}. If you are seeing this message, then this ' \
'last-minute backup should have already been restored. However' \
', if it appears that your db has been dropped, you may restore'\
' from this file manually.\n\n' \
'- Original error: \n{}'
emergency_backup = new_backup_path()
backup_local(emergency_backup)
if os.path.getsize(emergency_backup) == 0: # no db existed
os.remove(emergency_backup)
# noinspection PyBroadException
# drop_db(hard=True)
try:
restore_using_pgrestore(path=path, dropdb=True, silent=silent)
except Exception as err:
if os.path.exists(emergency_backup):
restore_using_pgrestore(path=emergency_backup, dropdb=True)
err_msg = err_msg.format(path, emergency_backup, str(err))
raise PmaApiDbInteractionError(err_msg)
else:
raise err
if os.path.exists(emergency_backup):
os.remove(emergency_backup)
@aws_s3
def delete_s3_file(file_path: str):
"""Delete a file from AWS S3
Args:
file_path (str): Path to file on S3
Side effects:
- deletes file
"""
import boto3
s3 = boto3.resource('s3')
s3.Object(BUCKET, file_path).delete()
def delete_backup(filename: str):
"""Delete backup file from storage
Args:
filename (str): Name of file
Side effects:
- deletes file
"""
file_path: str = os.path.join(S3_BACKUPS_DIR_PATH, filename)
delete_s3_file(file_path)
def delete_dataset(version_number: int):
"""Delete dataset from storage
Args:
version_number (int): Version of dataset to delete
Side effects:
- deletes file
"""
filename: str = dataset_version_to_name(version_number)
file_path: str = os.path.join(S3_DATASETS_DIR_PATH, filename)
delete_s3_file(file_path)
```
#### File: pma_api/manage/functional_subtask.py
```python
from typing import Callable, Dict
class FunctionalSubtask:
"""A single granule of a multistep task; runs a simple function.
Can be sync or async.
"""
def __init__(self, name: str, prints: str, pct_starts_at: float,
func: Callable = None, *args, **kwargs):
"""Initializer
name (str): Subtask name
prints (str): A string that the subtask will return to be printed out
pct_starts_at (float): The percent that subtask is expected to begin
within a larger group of subtasks.
func (Callable): A function to run
"""
self.name: str = name
self.prints: str = prints
self.pct_starts_at: float = pct_starts_at
self.func_ref: Callable = func
self.args: tuple = args
self.kwargs: Dict = kwargs
def func(self):
"""Runs function w/ arguments 'args' or keyword arguments 'kwargs'."""
self.func_ref(*self.args, **self.kwargs)
```
#### File: pma_api/manage/multistep_task.py
```python
from collections import OrderedDict
from copy import copy
from typing import List, Dict, Union, Generator
from pma_api.error import PmaApiException
from pma_api.manage.functional_subtask import FunctionalSubtask
class MultistepTask:
"""A synchronous multi-step task
Subtasks groups are of the form:
Dict[str, Dict[str, Union[str, int, Callable]]] = {
'subtask_1_name': {
'prints': 'Doing first thing',
'pct_starts_at': 0
},
'subtask_2_name': {
'prints': 'Doing second thing',
'pct_starts_at': 10
...
}
Or it could be of the form:
'subtask_1_name': <subtask obj>
This class is meant to be used either by itself, in which print statements
are typically made to show task progress, or in tandem with a task queue
such as Celery, where a callback is utilized to report progress.
With each call to `begin(<subtask>)`, progress is reported, utilizing each
sub-task objects' print statement and percentage. When creating a sub-task
to build up a sub-task dictionary as shown above, it is necessary to assign
semi-arbitrary percentages. These percentages will represent the task
authors' best guess at how long a task should take.
"""
start_status = 'PENDING'
def __init__(
self, silent: bool = False, name: str = '',
callback: Generator = None,
subtasks: Dict = None):
"""Tracks progress of task queue
If queue is empty, calls to TaskTracker methods will do nothing.
Args:
subtasks: Queue'd dictionary of subtasks to run.
silent: Don't print updates?
callback: Callback function to use for every iteration
of the queue. This callback must take a single dictionary as its
parameter, with the following schema...
{'status': str, 'current': float}
...where the value of 'current' is a float with value between 0
and 1.
"""
self.subtasks = subtasks if subtasks else OrderedDict()
self.silent = silent
self.name = name
self.callback = callback
self.tot_sub_tasks = len(subtasks.keys()) if subtasks else 0
self.status = self.start_status
self.completion_ratio = float(0)
@staticmethod
def _calc_subtask_grp_pcts(
subtask_grp_list: List[Dict[str, Union[Dict, FunctionalSubtask]]],
start: float, stop: float) \
-> Dict:
"""Calculate percents that each subtask in group should start at.
Args:
subtask_grp_list: Collection of subtasks in form of a list.
start: Percent that the first subtask should start at.
stop: Percent that the *next* subtask should start at. The last
subtask in group will not start at this number, but before it.
Return
Collection of subtasks in form of a dictionary.
"""
subtask_grp_dict = {}
pct_each_consumes = (stop - start) / len(subtask_grp_list)
pct_each_begins = [start]
for i in range(len(subtask_grp_list) - 1):
pct_each_begins.append(pct_each_begins[-1] + pct_each_consumes)
is_functional_subtask: bool = isinstance(
list(subtask_grp_list[0].values())[0], FunctionalSubtask)
if not is_functional_subtask:
subtask_grp_list_calculated = []
for subtask in subtask_grp_list:
calculated_subtask = copy(subtask)
subtask_name: str = list(subtask.keys())[0]
pct_start: float = pct_each_begins.pop(0)
calculated_subtask[subtask_name]['pct_starts_at'] = pct_start
subtask_grp_list_calculated.append(calculated_subtask)
for subtask in subtask_grp_list_calculated:
for k, v in subtask.items():
subtask_grp_dict[k] = v
else:
for item in subtask_grp_list:
for subtask_name, subtask in item.items():
pct_start: float = pct_each_begins.pop(0)
subtask.pct_starts_at = pct_start
subtask_grp_dict[subtask_name] = subtask
return subtask_grp_dict
def _report(
self,
silence_status: bool = False,
silence_percent: bool = False,
status: str = '',
completion_ratio: float = None):
"""Report progress
Side effects:
- Prints if not silent
- Sends update to callback if present
Args:
silence_status (bool): Silence status?
silence_percent (bool): Silence percent?
status (str): Message for reporting current status
completion_ratio (float): Current estimated completion percentage
within the entire task
"""
the_status: str = status if status else self.status
completion_pct: float = completion_ratio if completion_ratio \
else self.completion_ratio
if not the_status or completion_pct is None:
return
if not self.silent:
pct: str = str(int(completion_pct * 100)) + '%'
msg = ' '.join([
the_status if not silence_status else '',
'({})'.format(pct) if not silence_percent else ''])
print(msg)
if self.callback:
self.callback.send({
'name': self.name,
'status': the_status,
'current': completion_pct})
def _begin_subtask(self, subtask_name: str,
subtask_queue: OrderedDict = None):
"""Begin subtask. Prints/returns subtask message and percent
Side effects:
- self._report
- Runs subtask function if present
Args:
subtask_name: Name of subtask to report running. If absent,
prints that task has already begun.
subtask_queue: Ordered dictionary of subtasks to run
"""
subtask_queue: OrderedDict = subtask_queue if subtask_queue \
else self.subtasks
if not subtask_queue:
return
subtask: Union[Dict, FunctionalSubtask] = subtask_queue[subtask_name]
pct: float = subtask['pct_starts_at'] if isinstance(subtask, dict) \
else subtask.pct_starts_at
self.completion_ratio = float(pct if pct < 1 else pct / 100)
self.status: str = subtask['prints'] if isinstance(subtask, dict) \
else subtask.prints
self._report()
if isinstance(subtask, dict):
if subtask['func']:
subtask['func']()
else:
if hasattr(subtask, 'func'):
subtask.func()
def _begin_task(self):
"""Begin multistep task. Prints/returns task name.
Side effects:
- Sets instance attributes
- self._report
Raises:
PmaApiException: If task was called to begin more than once.
"""
err = 'Task \'{}\' has already started, but a call was made to ' \
'start it again. If intent is to start a subtask, subtask name' \
' should be passed when calling'.format(self.name)
if self.status != self.start_status:
raise PmaApiException(err)
if not self.subtasks:
return
self.completion_ratio: float = float(0)
self.status: str = 'Task start: ' + \
' {}'.format(self.name) if self.name else ''
self._report(silence_percent=True)
def begin(self, subtask_name: str = '',
subtask_queue: OrderedDict = None):
"""Register and report task or subtask begin
Side effects:
- self._begin_multistep_task
- self._begin_subtask
Args:
subtask_name: Name of subtask to report running. If absent,
prints that task has already begun.
subtask_queue: Ordered dictionary of subtasks to run
"""
if not subtask_name:
self._begin_task()
else:
self._begin_subtask(
subtask_name=subtask_name,
subtask_queue=subtask_queue)
def complete(self, seconds_elapsed: int = None):
"""Register and report all sub-tasks and task itself complete"""
if not self.subtasks:
return
self.completion_ratio: float = float(1)
if self.name and seconds_elapsed:
self.status = 'Task completed in {} seconds: {}'\
.format(str(seconds_elapsed), self.name)
elif self.name:
self.status = 'Task complete: {}'.format(self.name)
else:
self.status = ''
self._report()
```
#### File: pma_api/models/string.py
```python
from pma_api.models import db
from pma_api.utils import next64
class EnglishString(db.Model):
"""EnglishString model."""
__tablename__ = 'english_string'
id = db.Column(db.Integer, primary_key=True)
code = db.Column(db.String, unique=True)
english = db.Column(db.String, nullable=False)
translations = db.relationship('Translation')
def to_string(self, lang=None):
"""Return string in specified language if supplied, else English.
Args:
lang (str): The language, if specified.
Returns:
str: The text.
"""
result = self.english
if lang is not None and lang.lower() != 'en':
lang = lang.lower()
gen = iter(t for t in self.translations if t.language_code == lang)
found = next(gen, None)
if found is not None:
result = found.translation
return result
def to_json(self):
"""Return dictionary ready to convert to JSON as response.
Contains URL for resource entity.
Returns:
dict: API response ready to be JSONified.
"""
json_obj = {
# 'url': url_for('api.get_text', code=self.code, _external=True),
'id': self.code,
'text': self.english,
'langCode': 'en'
}
return json_obj
@staticmethod
def insert_or_update(english, code):
"""Insert or update an English record.
Args:
english (str): The string in English to insert.
code (str): The code for the string.
Returns:
The EnglishString record inserted or updated.
"""
record = EnglishString.query.filter_by(code=code).first()
if record and record.english != english:
record.english = english
# TODO: Resolve - Committing causes slow, but remove causes error
db.session.add(record)
db.session.commit()
elif not record:
record = EnglishString.insert_unique(english, code)
return record
@staticmethod
def insert_unique(english, code=None):
"""Insert a unique record into the database.
Creates a code and combines with English text to as the parameters for
new record.
Args:
english (str): The string in English to insert.
code (str): The code for the string. None if it should be random.
Returns:
The new EnglishString record.
"""
# TODO: (jkp 2017-08-29) This is not necessary because next64 now
# returns unique. Needs: Nothing.
if code is None:
code = next64()
record = EnglishString(code=code, english=english)
db.session.add(record)
# TODO: Resolve - Committing causes slow, but remove causes error
db.session.commit()
return record
def datalab_init_json(self):
"""Datalab init json: EnglishString."""
this_dict = {
'en': self.english
}
for translation in self.translations:
this_dict[translation.language_code] = translation.translation
to_return = {
self.code: this_dict
}
return to_return
def __repr__(self):
"""Return a representation of this object."""
if len(self.english) < 20:
preview = '{}...'.format(self.english[:17])
else:
preview = self.english
return '<EnglishString {} "{}">'.format(self.code, preview)
class Translation(db.Model):
"""Translation model."""
__tablename__ = 'translation'
id = db.Column(db.Integer, primary_key=True)
english_id = db.Column(db.Integer, db.ForeignKey('english_string.id'))
language_code = db.Column(db.String, nullable=False)
translation = db.Column(db.String, nullable=False)
languages_info = {
'english': {
'code': 'en',
'label': 'English',
'active': True,
'string_records': 'english'
},
'french': {
'code': 'fr',
'label': 'French',
'active': True,
'string_records': 'To be implemented.'
}
}
def __init__(self, **kwargs):
"""Initialize instance of model.
Does a few things: (1) Gets english code if it is already supplied and
creates a record in EnglishString. This happens when inserting a
record for UI data. Otherwise, gets the english code and (2) Calls
super init.
"""
self.prune_ignored_fields(kwargs)
if kwargs.get('english_code'):
english = EnglishString.insert_or_update(
kwargs['english'], kwargs['english_code'].lower())
kwargs.pop('english_code')
else:
english = EnglishString.query.filter_by(english=kwargs['english'])\
.first()
try:
kwargs['english_id'] = english.id
except AttributeError:
new_record = EnglishString.insert_unique(kwargs['english'])
kwargs['english_id'] = new_record.id
kwargs.pop('english')
super(Translation, self).__init__(**kwargs)
@staticmethod
def prune_ignored_fields(kwargs):
"""Prune ignored fields.
Args:
kwargs (dict): Keyword arguments.
"""
from pma_api.models.api_base import prune_ignored_fields
prune_ignored_fields(kwargs)
@staticmethod
def languages():
"""Languages list."""
languages = {v['code']: v['label'] for _, v in
Translation.languages_info.items()}
return languages
def __repr__(self):
"""Return a representation of this object."""
if len(self.translation) < 20:
preview = '{}...'.format(self.translation[:17])
else:
preview = self.translation
return '<Translation ({}) "{}">'.format(self.language_code, preview)
```
#### File: endpoints/api_1_0/dynamic.py
```python
import os
from typing import Union, List, Dict
from flask import request
from flask_sqlalchemy import Model
from pma_api.models import db
from pma_api.response import QuerySetApiResult
from pma_api.config import PROJECT_ROOT_PATH, \
SQLALCHEMY_MODEL_ATTR_QUERY_IGNORES as IGNORES
from pma_api.utils import get_db_models
from pma_api.routes.endpoints.api_1_0 import api
db_models: List[Model] = get_db_models(db)
# PyUnresolvedReferences: Doesn't recognize existing attr __tablename__
# noinspection PyUnresolvedReferences
resource_model_map = {
x.__tablename__: x for x in db_models
}
def models_to_dicts(models: List[Model], ignores: () = IGNORES) -> List[Dict]:
"""Converts list of SqlAlchemy Model objects to dictionaries
Args:
models (list(Model)): List of SqlAlchemy Model objects
ignores (tuple): Attributes to not include in dict
Returns:
listd: List of dictionaries
"""
dicts: List[Dict] = [
{
k: v
for k, v in x.__dict__.items() if k not in ignores
}
for x in models
]
return dicts
@api.route('/<resource>')
def dynamic_route(resource: str) -> Union[QuerySetApiResult, str]:
"""Dynamically resource-based routing
For any model resources that do not have explicit static routes created,
this route will attempt to return a standardized list of results for that
model.
Args:
resource(str): Resource requested in url of request
Returns:
QuerySetApiResult: Records queried for resource
str: Standard 404
# TODO 1: Allow for public/non-public access settings. Psuedo code:
# access_ok = hasattr(model, 'access') and model['access']['api'] \
# and model['access']['api']['public']
# public_attrs = [x for x in model['access']['api']['attributes']
# if x['public']]
# # filter out key value pairs that are not public
# # return json
# TODO 5: Ideally I'd like to use a different approach, i.e. dynamically
# generate and register a list of routes at server start.
"""
model = resource_model_map[resource] \
if resource in resource_model_map else None
if model is None:
# TODO 2: There's probably a better way to handle 404's in this case
msg_404 = 'Error 404: Page not found' + '<br/>'
resource_h1 = 'The resources available are limited to the following ' \
+ '<ul>'
resources: str = '<li>' + \
'</li><li>'.join(resource_model_map.keys()) + '</ul>'
msg = '<br/>'.join([msg_404, resource_h1, resources])
return msg
objects: List[Model] = model.query.all()
if not request.args:
dict_objs: [{}] = models_to_dicts(objects)
QuerySetApiResult(record_list=dict_objs, return_format='json')
query_dir = os.path.join(PROJECT_ROOT_PATH, 'pma_api')
query_template_path = os.path.join(query_dir, 'python_query_template.py')
query_tempfile_path = os.path.join(query_dir, 'python_query_tempfile.py')
# TODO: review https://nedbatchelder.com/blog/201206/
# eval_really_is_dangerous.html
arg_str = ''
for k, v in request.args.items():
# TODO 3: Lots of other conversions. Consider as well using the literal
# url string rather than request.args
v = 'True' if v == 'true' else 'False' if v == 'false' else v
arg_str += '_.{} == {}'.format(k, v)
with open(query_template_path, 'r') as file:
txt = file.read()
# TODO 4: Use actual temp files with random names for concurrency
with open(query_tempfile_path, 'w') as file:
txt = txt.replace("'$'", arg_str)
file.write(txt)
# noinspection PyUnresolvedReferences
from pma_api.python_query_tempfile import interpolated_query
filtered_objs: List[Model] = interpolated_query(objects)
os.remove(query_tempfile_path)
dict_objs: [{}] = models_to_dicts(filtered_objs)
response = QuerySetApiResult(record_list=dict_objs, return_format='json')
return response
```
#### File: pma-api/pma_api/task_utils.py
```python
import os
import time
from io import BytesIO
from typing import Dict, List, BinaryIO, Union
from celery import Celery
from celery.exceptions import NotRegistered
from celery.result import AsyncResult
from werkzeug.datastructures import FileStorage
from pma_api.config import TEMP_DIR, \
ACCEPTED_DATASET_EXTENSIONS as EXTENSIONS, S3_DATASETS_DIR_PATH, \
AWS_S3_STORAGE_BUCKETNAME as BUCKET, PACKAGE_DIR_NAME, CELERY_QUEUE
from pma_api.error import PmaApiException
from pma_api.manage.db_mgmt import download_dataset
from pma_api.routes.administration import ExistingDatasetError
from pma_api.utils import get_app_instance
app = get_app_instance()
def save_file_from_request(file: FileStorage, file_path: str):
"""Save file at a specific location.
Args:
file (FileStorage): File.
file_path (str): File name.
Raises:
PmaApiException: If file saved is 0 bytes
"""
if not os.path.exists(os.path.dirname(file_path)):
os.mkdir(os.path.dirname(file_path))
file.save(file_path)
file.close()
if os.path.getsize(file_path) == 0:
raise PmaApiException('File saved, but was 0 bytes.\n- Path: {}'
.format(file_path))
def save_file_from_bytesio(file: BytesIO, file_path: str, _attempt: int = 1):
"""Save file at a specific location.
Args:
file (BytesIO): File.
file_path (str): File name.
_attempt (int): Attempt number for trying to save file.
"""
max_attempts = 2
try:
with open(file_path, 'wb') as f:
f.write(file.read())
f.close()
except FileNotFoundError:
os.mkdir(os.path.dirname(file_path))
if _attempt < max_attempts:
save_file_from_bytesio(file=file, file_path=file_path,
_attempt=_attempt + 1)
def save_file_from_bytes(file_bytes: bytes, file_path: str,
_attempt: int = 1):
"""Save file_bytes at a specific location.
Args:
file_bytes (bytes): File bytes.
file_path (str): File name.
_attempt (int): Attempt number for trying to save file_bytes.
"""
max_attempts = 2
try:
with open(file_path, 'wb') as f:
f.write(file_bytes)
f.close()
except FileNotFoundError:
os.mkdir(os.path.dirname(file_path))
if _attempt < max_attempts:
save_file_from_bytes(file_bytes=file_bytes, file_path=file_path,
_attempt=_attempt + 1)
def load_local_dataset_from_db(dataset_id: str) -> BinaryIO:
"""Load a dataset that exists in local database
Side effects:
- download_dataset_from_db()
- Reads file
Args:
dataset_id (str): ID of dataset that should exist in db
Returns:
werkzeug.datastructures.FileStorage: In-memory file, in bytes
"""
file_path: str = download_dataset(int(dataset_id))
data: BinaryIO = open(file_path, 'rb')
return data
def upload_dataset(filename: str, file) -> str:
"""Upload file to data storage
Args:
filename (str): File name.
file: File.
Side effects:
- Stores file on AWS S3 using: store_file_on_s3
Raises:
ExistingDatasetError: If dataset already exists
Returns:
str: Url where file is stored
"""
from pma_api.manage.db_mgmt import store_file_on_s3, list_cloud_datasets
from pma_api.models import Dataset
# 1. Save file
default_ext = 'xlsx'
has_ext: bool = any(filename.endswith(x) for x in EXTENSIONS)
filename_with_ext: str = filename if has_ext \
else filename + '.' + default_ext
tempfile_path: str = os.path.join(TEMP_DIR, filename_with_ext)
save_file_from_request(file=file, file_path=tempfile_path)
# 2. Validate
this_dataset: Dataset = Dataset(tempfile_path)
this_version: int = this_dataset.version_number
uploaded_datasets: List[Dict[str, str]] = list_cloud_datasets()
uploaded_versions: List[int] = \
[int(x['version_number']) for x in uploaded_datasets]
already_exists: bool = this_version in uploaded_versions
if already_exists:
msg = 'ExistingDatasetError: Dataset version "{}" already exists.'\
.format(str(this_version))
if os.path.exists(tempfile_path):
os.remove(tempfile_path)
raise ExistingDatasetError(msg)
# 3. Upload file
filename: str = store_file_on_s3(
path=tempfile_path,
storage_dir=S3_DATASETS_DIR_PATH)
# 4. Closeout
if os.path.exists(tempfile_path):
os.remove(tempfile_path)
file_url = 'https://{bucket}.s3.amazonaws.com/{path}{object}'.format(
bucket=BUCKET,
path=S3_DATASETS_DIR_PATH,
object=filename)
return file_url
def _get_task_status_report(task_id: str) -> Dict[str, Union[str, int, float]]:
"""Get task status report as custom dictionary
Args:
task_id (str): Task id
Returns:
dict(Union[str,int,float]: Custom task status report
"""
from pma_api.tasks import celery as celery_instance
err = 'Server: Unexpected error occurred while processing task.'
task: Union[AsyncResult, NotRegistered] = \
celery_instance.AsyncResult(task_id)
state: str = task.state
dynamic_report = {}
# noinspection PyTypeChecker
info: Union[Dict, NotRegistered, Exception] = task.info
info_available: bool = \
info is not None and not isinstance(info, NotRegistered)
if isinstance(info, Exception):
# TODO: [Errno 2] No such file or directory -- being reported here
# - why?
exc: Exception = info
static_report: Dict[str, Union[str, int]] = {
'id': task_id,
'url': '',
'state': state,
'status': str(exc),
'current': 0,
'total': 1,
'result': state} # to-do: remove all instances of 'result'?}
elif state == 'FAILURE':
# to-do: I know I can receive tuple when fail, but not sure what type
# 'task' is in that case
# noinspection PyUnresolvedReferences
info2: tuple = info.args
status: str = '' if not info2 \
else info2[0] if isinstance(info2, tuple) and len(info2) == 1 \
else str(list(info2))
# For some reason, unknown failures can happen. When this happens,
# the module path is displayed, e.g.: 'pma_api.tasks.activate_dataset'
status: str = \
status if not status.startswith(PACKAGE_DIR_NAME) else err
static_report: Dict[str, Union[str, int]] = {
'id': task_id,
'url': '',
'state': state,
'status': status,
'current': 0,
'total': 1,
'result': state} # to-do: remove all instances of 'result'?}
else:
# TODO: state and status == 'PENDING'. Why?
# pg_restore: [archiver (db)] Error while PROCESSING TOC:
# pg_restore: [archiver (db)] Error from TOC entry 2470; 1262 197874
# DATABASE pmaapi postgres
# pg_restore: [archiver (db)] could not execute query: ERROR:
# database "pmaapi" is being accessed by other users
# DETAIL: There are 2 other sessions using the database.
# Command was: DROP DATABASE pmaapi;
#
# Offending command: pg_restore --exit-on-error --create --clean
# --dbname=postgres --host=localhost --port=5432 --username=postgres
# /Users/joeflack4/projects/pma-api/data/db_backups/pma-api-backup_Mac
# OS_development_2019-04-04_13-04-54.568706.dump
status: str = \
info['status'] if info_available and 'status' in info else state
current: Union[int, float] = \
info['current'] if info_available and 'current' in info else 0
total: int = \
info['total'] if info_available and 'total' in info else 1
static_report: Dict[str, Union[str, int, float]] = {
'id': task_id,
'url': '',
'state': state,
'status': status,
'current': current,
'total': total}
# noinspection PyBroadException
try: # TO-DO 1
dynamic_report: Dict = info['args']
except Exception:
pass
report: Dict[str, Union[str, int, float]] = {
**static_report,
**dynamic_report}
return report
def get_task_status(
task_id: str, return_format: str = 'str', attempt: int = 1) -> \
Union[str, Dict[str, Union[str, int, float]]]:
"""Get task status from message broker through celery
TODO 2019.04.16-jef: Maybe the restarting of this shouldn't happen at this
level, but at the level of routing, or wherever the root source of the
request is coming from.
Args:
task_id (str): Task id
return_format (str): Can be 'str' or 'dict'. If 'str', will return
celery's default single-word task state. Else if 'dict', will return a
custom dictionary.
attempt (int): Attempt number
Raises:
PmaApiException: If 'format' arg passed is not in valid.
Returns:
Union[str,Dict]: See arg 'format' for more info
"""
from pma_api.tasks import celery as celery_instance
max_attempts: int = 15
sleep_secs: int = 1
err = 'Call to get task status did not request in a valid format.\n' \
'- Format requested: {}\n' \
'- Valid formats: str, dict'.format(return_format)
if return_format == 'str':
task: Union[AsyncResult, NotRegistered] = \
celery_instance.AsyncResult(task_id)
try:
status: str = task.state
# 2019.04.16-jef: Not sure what caused BrokenPipeError; maybe was a
# server restart?
except BrokenPipeError as exc:
if attempt < max_attempts:
time.sleep(sleep_secs)
return get_task_status(
task_id=task_id,
return_format=return_format,
attempt=attempt+1)
else:
raise exc
elif return_format == 'dict':
status: Dict[str, Union[str, int, float]] = \
_get_task_status_report(task_id)
else:
raise PmaApiException(err)
return status
def validate_active_task_status(task_id: str) -> bool:
"""Validate task status
Args:
task_id (str): Celery task ID
Returns:
bool: True if task is actually active, else False.
"""
from pma_api.tasks import CELERY_COMPLETION_CODES
status_code: str = get_task_status(task_id)
return False if status_code in CELERY_COMPLETION_CODES else True
def progress_update_callback(task_obj: Celery, verbose: bool = False):
"""Progress update callback generator
Side effects:
- task_obj.update_state(): Updates task state.
- print(): if verbose
Args:
task_obj (Celery): Celery task object
verbose (bool): Print update yields?
"""
while True:
# 1. Receive update via progress_update_callback.send()
# noinspection PyUnusedLocal
update_obj: Dict[str, Union[str, float]]
update_obj = yield
# 2. Set some static variables
status: str = update_obj['status'] if 'status' in update_obj else ''
current: Union[float, int] = update_obj['current'] \
if 'current' in update_obj else 0
total: int = update_obj['total'] if 'total' in update_obj \
else 100 if current and current > 1 else 1
# 3. Create report
static_report: Dict[str, Union[str, float, int]] = {
'status': status,
'current': current,
'total': total}
dynamic_report: Dict = {
k: v
for k, v in update_obj.items()
if k not in static_report.keys()}
report: Dict = {**static_report, **{'args': dynamic_report}} \
if dynamic_report else static_report
# 4. Send report
if verbose:
percent: str = str(int(current * 100)) + '%'
print('{} ({})'.format(status, percent))
task_obj.update_state(state='PROGRESS', meta=report)
def start_task(
func: Celery, kwarg_dict: Dict = None, queue: str = CELERY_QUEUE) \
-> str:
"""Start a task, handling unexplaiend failures along the way
Args:
func (Celery): Celery task function to call to start task
kwarg_dict (dict): Dicitonary to pass to Celery.apply_async's single
kwargs parameter
queue (str): Name of the celery queue to use
Returns:
str: Task ID
"""
task: AsyncResult = func.apply_async(
kwargs=kwarg_dict if kwarg_dict else {},
queue=queue)
task_id: str = task.id
return task_id
```
#### File: pma-api/test/test_dataset.py
```python
# import datetime
# import os
# import unittest
#
# from pma_api import db, create_app
# from pma_api.db_models import Dataset
#
# from .config import TEST_STATIC_DIR
#
#
# # TODO: incomplete
# class TestDataset(unittest.TestCase):
# """Test that the dataset class works.
#
# To run this test directly, issue this command from the root directory:
# python -m test.test_dataset
# """
# file_name = 'api_data-2018.03.19-v29-SAS.xlsx'
#
# def setUp(self):
# """Set up: (1) Put Flask app in test mode, (2) Create temp DB."""
# # Continue from here next time
# # 1 set up the test
# import tempfile
# app = create_app()
# # TODO: Will this work?
# self.db_fd, app.config['DATABASE'] = tempfile.mkstemp()
# app.testing = True
# self.app = app.test_client()
# with app.app_context():
# # 2. new dataset object
# new_dataset = Dataset(
# file_path=TEST_STATIC_DIR + TestDataset.file_name)
# # 3. write to db
# db.session.add(new_dataset)
# db.session.commit()
#
# def test_dataset(self):
# """Create a new entry in 'dataset' table and read data."""
# # 4. read from the db
# dataset_from_db = Dataset.query\
# .filter_by(dataset_display_name=TestDataset.file_name).first()
#
# # 5. make assertions
# self.assertTrue(dataset_from_db.ID != '')
# self.assertTrue(dataset_from_db.data != '')
# self.assertTrue(dataset_from_db.dataset_display_name ==
# 'api_data-2018.03.19-v29-SAS.xlsx')
# self.assertTrue(type(dataset_from_db.upload_date) ==
# datetime.date.today())
# self.assertTrue(dataset_from_db.version_number == 'v29')
# self.assertTrue(dataset_from_db.dataset_type in
# ('data', 'metadata', 'full'))
# self.assertTrue(dataset_from_db.is_active_staging is False)
# self.assertTrue(dataset_from_db.is_active_production is False)
#
# def tearDown(self):
# """Tear down: (1) Close temp DB."""
# # 5: remove the stuff we wrote to the db
# os.close(self.db_fd)
# os.unlink(self.app.config['DATABASE'])
#
#
# # TODO: Use this example from tutorial for the above test
# class TestDB(unittest.TestCase):
# """Test database functionality.
#
# Tutorial: http://flask.pocoo.org/docs/0.12/testing/
# """
#
# def setUp(self):
# """Set up: (1) Put Flask app in test mode, (2) Create temp DB."""
# import tempfile
# from manage import initdb
# self.db_fd, app.config['DATABASE'] = tempfile.mkstemp()
# app.testing = True
# self.app = app.test_client()
# with app.app_context():
# initdb()
#
# def tearDown(self):
# """Tear down: (1) Close temp DB."""
# os.close(self.db_fd)
# os.unlink(app.config['DATABASE'])
#
# def test_empty_db(self):
# """Test empty database."""
# resp = self.app.get('/')
# assert b'No entries here so far' in resp.data
``` |
{
"source": "joeflack4/PMA-Survey-Hub",
"score": 3
} |
#### File: modules/ppp/app.py
```python
import os
from flask import Flask, send_from_directory
from flask import request
from flask import send_file
from .config import config
env_name = os.getenv('ENV', 'default')
app_config = config[env_name]
# def add_views(_app, namespace=''):
# """add views to application
# Args:
# _app: flask application
# namespace (String): additional url to put in front
# """
# from .routes import IndexView
# _app.add_url_rule(namespace + '/', view_func=IndexView.as_view('index'))
#
# @_app.route(namespace + '/favicon.ico')
# def favicon():
# """Renders favicon."""
# return send_from_directory(
# os.path.join(_app.root_path, 'static'),
# 'favicon.ico',
# mimetype='image/vnd.microsoft.icon')
#
# @_app.route(namespace + '/export', methods=['POST'])
# def export():
# """Takes POST form fields and send file which was already stored."""
# pdf_doc_file_path = request.form['pdf_doc_file_path']
# mime_type = request.form['mime_type']
# attachment_filename = request.form['attachment_filename']
# return send_file(pdf_doc_file_path,
# as_attachment=True,
# mimetype=mime_type,
# attachment_filename=attachment_filename)
def create_app(config_name=env_name):
"""create, configure and return a flask app"""
new_app = Flask(__name__)
new_app.config.from_object(config[config_name])
# add_views(new_app)
return new_app
app = create_app()
def run():
"""run"""
if env_name in ('development', 'default'):
app.run(host='127.0.0.1', port=8080, debug=True)
else:
app.run()
if __name__ == '__main__':
run()
``` |
{
"source": "joeflack4/sssom-py",
"score": 2
} |
#### File: sssom-py/tests/test_data.py
```python
import os
import yaml
cwd = os.path.abspath(os.path.dirname(__file__))
test_data_dir = os.path.join(cwd, "data")
test_out_dir = os.path.join(cwd, "tmp")
test_validate_dir = os.path.join(cwd, "validate_data")
schema_dir = os.path.join(cwd, "../schema")
TEST_CONFIG = os.path.join(cwd, "test_config.yaml")
DEFAULT_CONTEXT_PATH = os.path.join(schema_dir, "sssom.context.jsonld")
def get_test_file(filename):
return os.path.join(test_data_dir, filename)
def ensure_test_dir_exists():
if not os.path.exists(test_out_dir):
os.makedirs(test_out_dir)
def load_config():
with open(TEST_CONFIG) as file:
config = yaml.load(file, Loader=yaml.FullLoader)
return config
def get_all_test_cases():
test_cases = []
config = load_config()
for test in config["tests"]:
test_cases.append(SSSOMTestCase(test, config["queries"]))
return test_cases
def get_multiple_input_test_cases():
test_cases = []
config = load_config()
for test in config["tests"]:
if test["multiple_input"]:
test_cases.append(SSSOMTestCase(test, config["queries"]))
return test_cases
class SSSOMTestCase:
def __init__(self, config, queries):
self.filepath = get_test_file(config["filename"])
self.filename = config["filename"]
if "metadata_file" in config:
self.metadata_file = config["metadata_file"]
else:
self.metadata_file = None
self.graph_serialisation = "turtle"
self.ct_json_elements = config["ct_json_elements"]
self.ct_data_frame_rows = config["ct_data_frame_rows"]
if "inputformat" in config:
self.inputformat = config["inputformat"]
else:
self.inputformat = None
self.ct_graph_queries_owl = self._query_tuple(
config, "ct_graph_queries_owl", queries
)
self.ct_graph_queries_rdf = self._query_tuple(
config, "ct_graph_queries_rdf", queries
)
if "curie_map" in config:
self.curie_map = config["curie_map"]
else:
self.curie_map = None
def _query_tuple(self, config, tuple_id, queries_dict):
queries = []
for t in config[tuple_id]:
query = queries_dict[t]
queries.append((query, config[tuple_id][t]))
return queries
def get_out_file(self, extension):
return os.path.join(test_out_dir, f"{self.filename}.{extension}")
def get_validate_file(self, extension):
return os.path.join(test_validate_dir, f"{self.filename}.{extension}")
``` |
{
"source": "joeflack4/tccm-api",
"score": 2
} |
#### File: tccm_api/db/tccm_graph.py
```python
from typing import Union, List
from neo4j import Driver, GraphDatabase
from tccm_api.config import get_settings, Settings
from contextlib import contextmanager
from tccm_api.db.cypher_queries import *
from tccm_api.enums import ConceptReferenceKeyName, ConceptSystemKeyName, SearchModifier
from tccm_api.utils import *
class TccmGraph:
def __init__(self, settings: Settings = None):
if not settings:
settings = get_settings()
self.user = settings.neo4j_username
self.password = <PASSWORD>.<PASSWORD>
self.uri = f"bolt://{settings.neo4j_host}:{settings.neo4j_bolt_port}"
self._driver: Union[Driver, None] = None
def connect(self):
if not self._driver:
self._driver = GraphDatabase.driver(self.uri, auth=(self.user, self.password))
def disconnect(self):
if self._driver:
self._driver.close()
@contextmanager
def create_session(self):
if not self._driver:
self.connect()
session = self._driver.session()
try:
yield session
finally:
session.close()
@staticmethod
def get_concept_references_by_value_tx(tx, key: ConceptReferenceKeyName, value: str, modifier: SearchModifier):
query = concept_reference_query_by_value(key, modifier)
result = tx.run(query, value=value)
return concept_references_from_results(result)
@staticmethod
def get_concept_references_by_values_and_concept_system_tx(tx, key: ConceptReferenceKeyName, values: List[str], concept_system: str):
query = concept_references_query_by_values_and_concept_system(key)
result = tx.run(query, values=values, concept_system=concept_system)
return concept_references_from_results(result)
@staticmethod
def get_concept_references_by_descendants_of_tx(tx, uri: str, depth: int = sys.maxsize):
query = concept_references_query_by_descendants_of(depth)
result = tx.run(query, uri=uri)
nodes = []
total = 0
for record in result:
n, nt, cs, total = record
node = dict(n.items())
if len(nt) > 0:
node['narrower_than'] = nt
if cs:
node['defined_in'] = cs
nodes.append(node)
return total, nodes
@staticmethod
def get_code_set_by_id_tx(self, code_set_id: str):
records = []
return records
@staticmethod
def get_concept_systems_by_value_tx(tx, key: ConceptSystemKeyName, value: str, modifier: SearchModifier):
records = []
query = concept_system_query_by_value(key, modifier)
result = tx.run(query, value=value)
for record in result:
records.append(record['n'])
return records
def get_concept_references_by_value(self, key: ConceptReferenceKeyName, value: str, modifier: SearchModifier):
with self._driver.session() as session:
return session.read_transaction(self.get_concept_references_by_value_tx, key, value, modifier)
def get_concept_references_by_values_and_concept_system(self, key: ConceptReferenceKeyName, values: List[str], concept_system: str):
with self._driver.session() as session:
return session.read_transaction(self.get_concept_references_by_values_and_concept_system_tx, key, values, concept_system)
def get_concept_references_by_descendants_of(self, uri: str, depth: int = sys.maxsize):
with self._driver.session() as session:
return session.read_transaction(self.get_concept_references_by_descendants_of_tx, uri, depth)
def get_code_set_by_id(self, code_set_id: str):
with self._driver.session() as session:
return session.read_transaction(self.get_code_set_by_id_tx, code_set_id)
def get_concept_systems_by_value(self, key: ConceptSystemKeyName, value: str, modifier: SearchModifier):
with self._driver.session() as session:
return session.read_transaction(self.get_concept_systems_by_value_tx, key, value, modifier)
```
#### File: tccm_api/routers/concept_reference.py
```python
from typing import Optional, Union, List
from fastapi import APIRouter, Response, Request, Depends, HTTPException
from urllib.parse import unquote
from pydantic.main import BaseModel
from tccm_api.db.tccm_graph import TccmGraph
from tccm_api.utils import curie_to_uri, build_jsonld_link_header
from tccm_api.enums import ConceptReferenceKeyName, SearchModifier
router = APIRouter(
prefix='/conceptreferences',
tags=['ConceptReferences'],
dependencies=[],
responses={404: {"description": "Not found"}},
)
class ConceptReference(BaseModel):
code: Optional[str]
defined_in: Optional[str]
uri: str
designation: Optional[str]
definition: Optional[str]
reference: Optional[str]
narrower_than: Optional[List[str]]
@router.get('', response_model=List[ConceptReference])
def get_concept_references(key: ConceptReferenceKeyName, value: str, modifier: SearchModifier, request: Request, response: Response):
graph: TccmGraph = request.app.state.graph
new_value = value
if key == ConceptReferenceKeyName.uri:
new_value = unquote(value)
elif key == ConceptReferenceKeyName.curie:
new_value = unquote(curie_to_uri(value))
records = graph.get_concept_references_by_value(key, new_value, modifier)
if not records:
raise HTTPException(status_code=404, detail=f"ConceptReference {key}={value}|{modifier} not found.")
response.headers['Link'] = build_jsonld_link_header(str(request.base_url) + request.scope.get("root_path"), 'termci_schema')
return records
@router.get('/{curie}', response_model=ConceptReference)
def get_concept_reference_by_id(curie: str, request: Request, response: Response):
graph: TccmGraph = request.app.state.graph
new_value = unquote(curie_to_uri(curie))
records = graph.get_concept_references_by_value(ConceptReferenceKeyName.curie, new_value, SearchModifier.equals)
if not records:
raise HTTPException(status_code=404, detail=f"ConceptReference curie={curie} not found.")
response.headers['Link'] = build_jsonld_link_header(str(request.base_url) + request.scope.get("root_path"), 'termci_schema')
return records[0]
```
#### File: tccm-api/tccm_loader/ncit.py
```python
from pathlib import Path
from io import BytesIO
from zipfile import ZipFile
from urllib.request import urlopen
from rdflib import Graph, Literal, RDFS, RDF
from tccm_model.tccm_model import *
from tccm_api.namespaces import NAMESPACES, NCIT
from utils import curie_to_uri
ROOT = Path(__file__).parent.parent
def get_ncit():
resp = urlopen("https://evs.nci.nih.gov/ftp1/NCI_Thesaurus/Thesaurus.FLAT.zip")
zipfile = ZipFile(BytesIO(resp.read()))
graph = Graph()
graph.namespace_manager.bind('skos', SKOS)
graph.namespace_manager.bind('sh', SH)
graph.namespace_manager.bind('dc', DC)
graph.namespace_manager.bind('ncit', NCIT)
cs_uri = URIRef("http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl")
graph.add((cs_uri, RDF.type, SKOS.ConceptScheme))
graph.add((cs_uri, DC.description, Literal(
"A vocabulary for clinical care, translational and basic research, and public information and administrative activities.")))
graph.add((cs_uri, RDFS.seeAlso, Literal("https://ncithesaurus.nci.nih.gov/ncitbrowser/")))
graph.add((cs_uri, SH.namespace, URIRef("http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl#")))
graph.add((cs_uri, SH.prefix, Literal("NCIT")))
for line in zipfile.open("Thesaurus.txt"):
tokens = line.decode("utf-8").split("\t")
uri = URIRef(tokens[1][1:-1])
graph.add((uri, RDF.type, SKOS.Concept))
graph.add((uri, SKOS.notation, Literal(tokens[0])))
graph.add((uri, SKOS.definition, Literal(tokens[4])))
graph.add((uri, SKOS.prefLabel, Literal(tokens[3].split("|")[0])))
if tokens[2]:
for code in tokens[2].split("|"):
code = code.strip()
sc_uri = URIRef(curie_to_uri(f"NCIT:{code}", NAMESPACES))
graph.add((uri, SKOS.broader, sc_uri))
see_also = f"https://ncit.nci.nih.gov/ncitbrowser/pages/concept_details.jsf?dictionary=NCI%20Thesaurus&code={tokens[0]}"
graph.add((uri, RDFS.seeAlso, Literal(see_also)))
graph.add((uri, SKOS.inScheme, cs_uri))
with open('ncit-termci.ttl', 'w') as file:
file.write(graph.serialize(format='turtle').decode('utf-8'))
if __name__ == '__main__':
get_ncit()
```
#### File: db/neo4j/test_transaction.py
```python
import logging
from neo4j.exceptions import ServiceUnavailable
from tccm_api.db.tccm_graph import TccmGraph
def get_concepts(tx):
query = "MATCH(n:Resource) RETURN n LIMIT 10"
result = tx.run(query)
try:
return [record for record in result]
# Capture any errors along with the query and data for traceability
except ServiceUnavailable as exception:
logging.error(f"{query} raised an error: \n {exception}")
raise
def test_check_graph(termci_graph: TccmGraph):
with termci_graph.create_session() as session:
records = session.read_transaction(get_concepts)
assert len(records) == 0
def test_sum():
assert 1 + 1 == 2
```
#### File: tccm-api/tests/test_utils.py
```python
from tccm_api.namespaces import NAMESPACES
from tccm_api.utils import curie_to_uri
def test_curie_to_uri_ncit():
curie = "NCIT:C7227"
uri = curie_to_uri(curie, NAMESPACES)
assert uri == "http://purl.obolibrary.org/obo/NCIT_C7227"
def test_curie_to_uri_icdo3m():
curie = "ICDO3M:800"
uri = curie_to_uri(curie, NAMESPACES)
assert uri == "https://ontologies-r.us/ontology/ICD-O-3-M/800"
def test_curie_to_uri_uri():
curie = "https://ontologies-r.us/ontology/ICD-O-3-M/800"
uri = curie_to_uri(curie, NAMESPACES)
assert uri == "https://ontologies-r.us/ontology/ICD-O-3-M/800"
``` |
{
"source": "joeflack4/xform-test-with-javarosa-history",
"score": 3
} |
#### File: xform-test-with-javarosa-history/test/test.py
```python
import os
import subprocess
import unittest
from distutils.version import LooseVersion
from glob import glob
from re import search
TEST_DIR = os.path.dirname(os.path.realpath(__file__)) + '/'
TEST_STATIC_DIR = TEST_DIR + 'static/'
PROJECT_ROOT_DIR = TEST_DIR + '../'
LIBS_DIR = PROJECT_ROOT_DIR + 'build/libs/'
COMMAND_HEAD = ['java', '-jar', LIBS_DIR + 'xform-test-0.2.0.jar']
def command_base_for_latest_jar(directory):
"""Get command list for highest versioned jar w/out options in dir.
Args:
directory (str): Path to directory containing jar files with semvar
version named files, e.g. `my-package-x.y.z.jar`.
Returns:
list: Command list for jar.
"""
return ['java', '-jar', latest_jar(directory)]
def latest_jar(directory):
"""Gets name of highest versioned jar in directory.
Args:
directory (str): Path to directory containing jar files with semvar
version named files, e.g. `my-package-x.y.z.jar`.
Returns:
str: Name of jar file.
"""
files = glob(directory + '*.jar')
path_version_map = {}
if len(files) < 1:
return ''
elif len(files) == 1:
return files[0]
else:
latest_version_num = ''
for file in files:
version = search('[0-9]\.[0-9]\.[0-9]', file)[0]
path_version_map[version] = file
for k, v in path_version_map.items():
if latest_version_num == '':
latest_version_num = k
else:
if LooseVersion(k) > LooseVersion(latest_version_num):
latest_version_num = k
latest_version_file_path = path_version_map[latest_version_num]
return latest_version_file_path
class CliTest(unittest.TestCase):
"""Base class for running simple CLI tests."""
@classmethod
def files_dir(cls):
"""Return name of test class."""
return TEST_STATIC_DIR + cls.__name__
def input_path(self):
"""Return path of input file folder for test class."""
return self.files_dir() + '/input/'
def input_files(self):
"""Return paths of input files for test class."""
all_files = glob(self.input_path() + '*')
# With sans_temp_files, you can have Excel files open while testing.
sans_temp_files = [x for x in all_files
if not x[len(self.input_path()):].startswith('~$')]
return sans_temp_files
@staticmethod
def _dict_options_to_list(options):
"""Converts a dictionary of options to a list.
Args:
options (dict): Options in dictionary form, e.g. {
'OPTION_NAME': 'VALUE',
'OPTION_2_NAME': ...
}
Returns:
list: A single list of strings of all options of the form
['--OPTION_NAME', 'VALUE', '--OPTION_NAME', ...]
"""
new_options = []
for k, v in options.items():
new_options += ['--'+k, v]
return new_options
def standard_cli(self, options=[]):
"""Runs CLI.
Args:
options (list): A single list of strings of all options of the form
['--OPTION_NAME', 'VALUE', '--OPTION_NAME', ...]
Returns:
1. str: TODO
2. str: TODO
"""
in_files = self.input_files()
print(COMMAND_HEAD)
command = \
command_base_for_latest_jar(LIBS_DIR) + in_files + options
subprocess.call(command)
# TODO
expected = True
actual = False
return expected, actual
def standard_cli_test(self, options={}):
"""Checks CLI success.
Args:
options (dict): Options in dictionary form, e.g. {
'OPTION_NAME': 'VALUE',
'OPTION_2_NAME': ...
}
Side effects:
assertEqual()
"""
options_list = CliTest._dict_options_to_list(options)
expected, actual = self.standard_cli(options_list)
self.assertEqual(expected, actual)
class MultipleFiles(CliTest):
"""Can run CLI on multiple files at once?"""
def test_cli(self):
"""Simple smoke test to see that CLI runs without error."""
self.standard_cli_test()
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joefreeman/strawberry",
"score": 2
} |
#### File: tests/asgi/test_query.py
```python
from starlette.background import BackgroundTask
from starlette.testclient import TestClient
import strawberry
from strawberry.asgi import GraphQL as BaseGraphQL
from strawberry.types import ExecutionResult, Info
def test_simple_query(schema, test_client):
response = test_client.post("/", json={"query": "{ hello }"})
assert response.json() == {"data": {"hello": "Hello world"}}
def test_returns_errors(schema, test_client):
response = test_client.post("/", json={"query": "{ donut }"})
assert response.json() == {
"data": None,
"errors": [
{
"locations": [{"column": 3, "line": 1}],
"message": "Cannot query field 'donut' on type 'Query'.",
"path": None,
}
],
}
def test_can_pass_variables(schema, test_client):
response = test_client.post(
"/",
json={
"query": "query Hello($name: String!) { hello(name: $name) }",
"variables": {"name": "James"},
},
)
assert response.json() == {"data": {"hello": "Hello James"}}
def test_returns_errors_and_data(schema, test_client):
response = test_client.post("/", json={"query": "{ hello, alwaysFail }"})
assert response.status_code == 200
assert response.json() == {
"data": {"hello": "Hello world", "alwaysFail": None},
"errors": [
{
"locations": [{"column": 10, "line": 1}],
"message": "You are not authorized",
"path": ["alwaysFail"],
}
],
}
def test_root_value(schema, test_client):
response = test_client.post("/", json={"query": "{ rootName }"})
assert response.json() == {"data": {"rootName": "Query"}}
def test_context_response():
@strawberry.type
class Query:
@strawberry.field
def something(self, info: Info) -> str:
r = info.context["response"]
r.raw_headers.append((b"x-bar", b"bar"))
return "foo"
schema = strawberry.Schema(query=Query)
app = BaseGraphQL(schema)
test_client = TestClient(app)
response = test_client.post("/", json={"query": "{ something }"})
assert response.status_code == 200
assert response.json() == {"data": {"something": "foo"}}
assert response.headers.get("x-bar") == "bar"
def test_can_set_custom_status_code():
@strawberry.type
class Query:
@strawberry.field
def something(self, info: Info) -> str:
r = info.context["response"]
r.status_code = 418
return "foo"
schema = strawberry.Schema(query=Query)
app = BaseGraphQL(schema)
test_client = TestClient(app)
response = test_client.post("/", json={"query": "{ something }"})
assert response.status_code == 418
assert response.json() == {"data": {"something": "foo"}}
def test_can_set_background_task():
task_complete = False
def task():
nonlocal task_complete
task_complete = True
@strawberry.type
class Query:
@strawberry.field
def something(self, info: Info) -> str:
r = info.context["response"]
r.background = BackgroundTask(task)
return "foo"
schema = strawberry.Schema(query=Query)
app = BaseGraphQL(schema)
test_client = TestClient(app)
response = test_client.post("/", json={"query": "{ something }"})
assert response.json() == {"data": {"something": "foo"}}
assert task_complete
def test_custom_context():
class CustomGraphQL(BaseGraphQL):
async def get_context(self, request, response):
return {
"request": request,
"custom_context_value": "Hi!",
}
@strawberry.type
class Query:
@strawberry.field
def custom_context_value(self, info: Info) -> str:
return info.context["custom_context_value"]
schema = strawberry.Schema(query=Query)
app = CustomGraphQL(schema)
test_client = TestClient(app)
response = test_client.post("/", json={"query": "{ customContextValue }"})
assert response.status_code == 200
assert response.json() == {"data": {"customContextValue": "Hi!"}}
def test_custom_process_result():
class CustomGraphQL(BaseGraphQL):
async def process_result(self, request, result: ExecutionResult):
return {}
@strawberry.type
class Query:
@strawberry.field
def abc(self) -> str:
return "ABC"
schema = strawberry.Schema(query=Query)
app = CustomGraphQL(schema)
test_client = TestClient(app)
response = test_client.post("/", json={"query": "{ abc }"})
assert response.status_code == 200
assert response.json() == {}
```
#### File: pydantic/schema/test_basic.py
```python
import textwrap
from enum import Enum
from typing import List, Optional, Union
import pydantic
import strawberry
def test_basic_type():
class UserModel(pydantic.BaseModel):
age: int
password: Optional[str]
@strawberry.experimental.pydantic.type(UserModel, fields=["age", "password"])
class User:
pass
@strawberry.type
class Query:
@strawberry.field
def user(self) -> User:
return User(age=1, password="<PASSWORD>")
schema = strawberry.Schema(query=Query)
expected_schema = """
type Query {
user: User!
}
type User {
age: Int!
password: String
}
"""
assert str(schema) == textwrap.dedent(expected_schema).strip()
query = "{ user { age } }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data["user"]["age"] == 1
def test_basic_alias_type():
class UserModel(pydantic.BaseModel):
age_: int = pydantic.Field(..., alias="age")
password: Optional[str]
@strawberry.experimental.pydantic.type(UserModel, fields=["age_", "password"])
class User:
pass
@strawberry.type
class Query:
@strawberry.field
def user(self) -> User:
return User(age=1, password="<PASSWORD>")
schema = strawberry.Schema(query=Query)
expected_schema = """
type Query {
user: User!
}
type User {
age: Int!
password: String
}
"""
assert str(schema) == textwrap.dedent(expected_schema).strip()
def test_basic_type_with_list():
class UserModel(pydantic.BaseModel):
age: int
friend_names: List[str]
@strawberry.experimental.pydantic.type(UserModel, fields=["age", "friend_names"])
class User:
pass
@strawberry.type
class Query:
@strawberry.field
def user(self) -> User:
return User(age=1, friend_names=["A", "B"])
schema = strawberry.Schema(query=Query)
query = "{ user { friendNames } }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data["user"]["friendNames"] == ["A", "B"]
def test_basic_type_with_nested_model():
class Hobby(pydantic.BaseModel):
name: str
@strawberry.experimental.pydantic.type(Hobby, fields=["name"])
class HobbyType:
pass
class User(pydantic.BaseModel):
hobby: Hobby
@strawberry.experimental.pydantic.type(User, fields=["hobby"])
class UserType:
pass
@strawberry.type
class Query:
@strawberry.field
def user(self) -> UserType:
return UserType(hobby=HobbyType(name="Skii"))
schema = strawberry.Schema(query=Query)
query = "{ user { hobby { name } } }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data["user"]["hobby"]["name"] == "Skii"
def test_basic_type_with_list_of_nested_model():
class Hobby(pydantic.BaseModel):
name: str
@strawberry.experimental.pydantic.type(Hobby, fields=["name"])
class HobbyType:
pass
class User(pydantic.BaseModel):
hobbies: List[Hobby]
@strawberry.experimental.pydantic.type(User, fields=["hobbies"])
class UserType:
pass
@strawberry.type
class Query:
@strawberry.field
def user(self) -> UserType:
return UserType(
hobbies=[
HobbyType(name="Skii"),
HobbyType(name="Cooking"),
]
)
schema = strawberry.Schema(query=Query)
query = "{ user { hobbies { name } } }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data["user"]["hobbies"] == [
{"name": "Skii"},
{"name": "Cooking"},
]
def test_basic_type_with_extended_fields():
class UserModel(pydantic.BaseModel):
age: int
@strawberry.experimental.pydantic.type(UserModel, fields=["age"])
class User:
name: str
@strawberry.type
class Query:
@strawberry.field
def user(self) -> User:
return User(name="Marco", age=100)
schema = strawberry.Schema(query=Query)
expected_schema = """
type Query {
user: User!
}
type User {
age: Int!
name: String!
}
"""
assert str(schema) == textwrap.dedent(expected_schema).strip()
query = "{ user { name age } }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data["user"]["name"] == "Marco"
assert result.data["user"]["age"] == 100
def test_type_with_custom_resolver():
class UserModel(pydantic.BaseModel):
age: int
def get_age_in_months(root):
return root.age * 12
@strawberry.experimental.pydantic.type(UserModel, fields=["age"])
class User:
age_in_months: int = strawberry.field(resolver=get_age_in_months)
@strawberry.type
class Query:
@strawberry.field
def user(self) -> User:
return User(age=20)
schema = strawberry.Schema(query=Query)
query = "{ user { age ageInMonths } }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data["user"]["age"] == 20
assert result.data["user"]["ageInMonths"] == 240
def test_basic_type_with_union():
class BranchA(pydantic.BaseModel):
field_a: str
class BranchB(pydantic.BaseModel):
field_b: int
class User(pydantic.BaseModel):
union_field: Union[BranchA, BranchB]
@strawberry.experimental.pydantic.type(BranchA, fields=["field_a"])
class BranchAType:
pass
@strawberry.experimental.pydantic.type(BranchB, fields=["field_b"])
class BranchBType:
pass
@strawberry.experimental.pydantic.type(User, fields=["age", "union_field"])
class UserType:
pass
@strawberry.type
class Query:
@strawberry.field
def user(self) -> UserType:
return UserType(union_field=BranchBType(field_b=10))
schema = strawberry.Schema(query=Query)
query = "{ user { unionField { ... on BranchBType { fieldB } } } }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data["user"]["unionField"]["fieldB"] == 10
def test_basic_type_with_enum():
@strawberry.enum
class UserKind(Enum):
user = 0
admin = 1
class User(pydantic.BaseModel):
age: int
kind: UserKind
@strawberry.experimental.pydantic.type(User, fields=["age", "kind"])
class UserType:
pass
@strawberry.type
class Query:
@strawberry.field
def user(self) -> UserType:
return UserType(age=10, kind=UserKind.admin)
schema = strawberry.Schema(query=Query)
query = "{ user { kind } }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data["user"]["kind"] == "admin"
def test_basic_type_with_interface():
class Base(pydantic.BaseModel):
base_field: str
class BranchA(Base):
field_a: str
class BranchB(Base):
field_b: int
class User(pydantic.BaseModel):
interface_field: Base
@strawberry.experimental.pydantic.interface(Base, fields=["base_field"])
class BaseType:
pass
@strawberry.experimental.pydantic.type(BranchA, fields=["field_a"])
class BranchAType(BaseType):
pass
@strawberry.experimental.pydantic.type(BranchB, fields=["field_b"])
class BranchBType(BaseType):
pass
@strawberry.experimental.pydantic.type(User, fields=["age", "interface_field"])
class UserType:
pass
print(BranchAType._type_definition)
print(BaseType._type_definition)
@strawberry.type
class Query:
@strawberry.field
def user(self) -> UserType:
return UserType(interface_field=BranchBType(base_field="abc", field_b=10))
schema = strawberry.Schema(query=Query, types=[BranchAType, BranchBType])
query = "{ user { interfaceField { baseField, ... on BranchBType { fieldB } } } }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data["user"]["interfaceField"]["baseField"] == "abc"
assert result.data["user"]["interfaceField"]["fieldB"] == 10
``` |
{
"source": "joefriedrich/pyVacationCalculator",
"score": 3
} |
#### File: joefriedrich/pyVacationCalculator/pyVacationCalculator.py
```python
import requests
import re
import xml.etree.ElementTree as ET
from requests_ntlm import HttpNtlmAuth
from datetime import datetime
def get_root_xml():
'''
Use requests to grab .xml file containing calendar data
returns root of xml (fromstring or the file version[test version])
'''
raw_website = requests.get(
"Sharepoint Website",
auth=HttpNtlmAuth('Domain\\Username','Password'))
raw_text = raw_website.text
return ET.fromstring(raw_text)
def generate_item_from_root_xml(root):
'''
Write!
'''
tags = ['{http://schemas.microsoft.com/ado/2007/08/dataservices}Title',
'{http://schemas.microsoft.com/ado/2007/08/dataservices}EventDate',
'{http://schemas.microsoft.com/ado/2007/08/dataservices}EndDate']
for element in root.iter():
if element.tag in tags:
yield element.text
def calculate_vacation_days(start_date, end_date):
'''
Takes start and end date of entry.
--Verifies that the entry doesn't span multiple years.
Returns number of days.
'''
if start_date.year != end_date.year:
print("=========ERROR! VACATION SPANS MULTIPLE YEARS======"
"\nSTART: " + start_date + " END: " + end_date)
else:
total_days = end_date - start_date
return total_days.days + 1
def build_data_from_xml(xml):
'''
Write!
'''
data = []
names = []
years = []
for item in xml:
vacation_item = re.split(r' - ', item.upper())
if vacation_item[0] not in names:
names.append(vacation_item[0])
start_date_raw = next(xml_generator)
start_date = datetime.strptime(start_date_raw[:10], '%Y-%m-%d')
vacation_item.append(start_date)
end_date_raw = next(xml_generator)
end_date = datetime.strptime(end_date_raw[:10], '%Y-%m-%d')
vacation_item.append(end_date)
if end_date.year not in years:
years.append(end_date.year)
vacation_item.append(calculate_vacation_days(start_date, end_date))
data.append(vacation_item)
return data, names, years
def get_employee_from_user(names, years):
'''
Get employee name and employee year from user.
(Change: should verify year in Years and name in Names)
'''
while True:
employee_name = input("\nPlease enter the employee's name: ").upper()
if employee_name in names:
employee_year = int(input("Please enter the year to calculate: "))
if employee_year in years:
return employee_name, employee_year
print('Employee or year not in list, please try again.')
def get_employee_time_data(vacation_data, employee_name, employee_year):
'''
Takes list of entries(vacation_data), name(user input), year(user input).
Generator that returns the next item that matches employee name and year
'''
employee_time_data = []
for item in vacation_data:
if item[0] == employee_name:
if item[2].year == employee_year:
yield item
def output(time_type, time_data, employee_name):
'''
Write!
'''
print('--------' + time_type + ' TIME-------')
total_time = 0
for event in time_data:
total_time = total_time + event[4]
print(event[0] + ' was ' + event[1] + ' from ' + event[2].isoformat()[:10] +
' to ' + event[3].isoformat()[:10] + ' for ' + str(event[4]) + ' day(s).')
print('Total ' + time_type + ' time for ' + employee_name +
' is ' + str(total_time))
#========================Begin Main Program============================
print('Retrieving, parsing, and organizing data.')
root = get_root_xml()
xml_generator = generate_item_from_root_xml(root)
vacation_data, names, years = build_data_from_xml(xml_generator)
stop = False
while(stop == False):
employee_name, employee_year = get_employee_from_user(names, years)
employee_calendar_data = list(get_employee_time_data(vacation_data, employee_name, employee_year))
#replace these 2 snippits with a function
employee_sick_data = []
employee_vacation_data = []
employee_wfh_data = [] #wfh = work from home
for item in employee_calendar_data:
if item[1][0].isdigit():
item[4] = int(item[1][0]) / int(item[1][2]) #replace 1 with decimal
item[1] = item[1][4:] #removes number and space
if item[1] in ('SICK', 'OUT'):
employee_sick_data.append(item)
elif item[1] in ('VACATION', 'DAY'):
employee_vacation_data.append(item)
elif item[1] == 'WFH':
employee_wfh_data.append(item)
output('SICK', employee_sick_data, employee_name)
output('VACATION', employee_vacation_data, employee_name)
output('Work From Home', employee_wfh_data, employee_name)
pause = input('Press Enter to select another employee/year combination.')
``` |
{
"source": "joe-fromCocaMoe/nltk_dictionary",
"score": 3
} |
#### File: joe-fromCocaMoe/nltk_dictionary/tk_dictionary002b.py
```python
from nltk.corpus import wordnet as wn
from tkinter import (Frame,Tk,Canvas,Button,Label,Entry,
Text,Checkbutton,IntVar)
import logging, nltk
logging.basicConfig(level= logging.DEBUG)
#logging.disable(logging.CRITICAL)
help_str= """
Enter your correctly spelled word into the entry widget
and press submit.
Output color code:
blue= synset.name()
none= synset.definitions()
red= lemma_names()
purple= synset.examples()
"""
class MyDictionary(Frame):
def __init__(self, parent=None):
self.parent= parent
self.parent.title('NLTK Dictionary')
Frame.__init__(self, self.parent)
self.pack(expand='yes',fill='both')
self.canvas= Canvas(self)
self.canvas.config(width=900, height=850, bg='gray80')
self.canvas.pack(expand='yes', fill='both')
self.make_components()
def make_components(self):
font_1= ('times',16,'normal')
font_2= ('times',16,'bold')
self.label= Label(self.canvas, text='Word to submit',font=font_1)
self.label.place(x=60,y=100)
self.entry1= Entry(self.canvas, width=70,font=font_1,)
self.entry1.insert('end', 'mint')
self.entry1.focus()
self.entry1.place(x=200,y=100)
self.btn= Button(self.canvas, text= 'Submit',font=font_1,
command= self.find_def)
self.btn.place(x=775,y=165)
self.text= Text(self.canvas, relief='sunken',font=font_1,
wrap='word', )
self.text.tag_configure('n.',foreground='blue',font=font_2)
self.text.tag_configure('*.',foreground='red',font=font_2)
self.text.tag_configure('p.',foreground='purple',font=font_2)
self.text.tag_configure('hyp',foreground='green',font=font_2)
self.text.tag_configure('hpe',foreground='orange',font=font_2)
self.text.tag_configure('qq',foreground='dodgerblue',font=font_2)
self.text.place(x=30,y=200)
self.var_8= IntVar()
self.c_box8= Checkbutton(self.canvas, variable=self.var_8,
text= 'member holonyms', font=font_1)
self.c_box8.place(x=550,y=150)
self.var_7= IntVar()
self.c_box7= Checkbutton(self.canvas, variable=self.var_7,
text= 'entailments', font=font_1)
self.c_box7.place(x=550,y=175)
self.var_6= IntVar()
self.c_box6= Checkbutton(self.canvas, variable=self.var_6,
text= 'substance meronyms', font=font_1)
self.c_box6.place(x=350,y=175)
self.var_5= IntVar()
self.c_box5= Checkbutton(self.canvas, variable=self.var_5,
text= 'part meronyms', font=font_1)
self.c_box5.place(x=350,y=150)
self.var_4= IntVar()
self.c_box4= Checkbutton(self.canvas, variable=self.var_4,
text= 'hypernyms', font=font_1)
self.c_box4.place(x=350,y=125)
self.var_3= IntVar()
self.c_box3= Checkbutton(self.canvas, variable=self.var_3,
text= 'hyponyms', font=font_1)
self.c_box3.place(x=150,y=125)
self.var_1= IntVar()
self.c_box= Checkbutton(self.canvas, text='lemma name',
font=font_1, variable=self.var_1)
self.c_box.place(x=150,y=150)
self.var_2= IntVar()
self.c_box2= Checkbutton(self.canvas, text='def example',
font=font_1,variable=self.var_2)
self.c_box2.place(x=150,y=175)
self.btn.invoke()
def find_def(self):
logging.debug('looking for definition...')#be patient first lookup
word= self.entry1.get() #get the entry
defs= wn.synsets(word) #feed entry to dictionary
lem= self.var_1.get() #checkbutton info twice
ex_= self.var_2.get()
hym= self.var_3.get()
hyp= self.var_4.get()
pm= self.var_5.get()
sm= self.var_6.get()
ent= self.var_7.get()
hol= self.var_8.get()
self.text.delete('1.0','end')
for synset in defs:
name= synset.name() #output name
d_f= synset.definition() #output definition
self.text.insert('end',name,'n.')
self.text.insert('end','\n')
self.text.insert('end',d_f)
self.text.insert('end','\n')
l_n= synset.lemma_names()
exa= synset.examples()
h_y= synset.hyponyms()
h_m= synset.hypernyms()
p_m= synset.part_meronyms()
s_m= synset.substance_meronyms()
m_h= synset.member_holonyms()
ant= synset.entailments()
#a_m= wn.lemma(l_n).antonyms()
if lem: #output lemma name
self.text.insert('end', l_n, '*.')
self.text.insert('end','\n')
if ex_: # ouput example purple
self.text.insert('end', exa, 'p.')
self.text.insert('end','\n')
if hym:
self.text.insert('end', h_y, 'hyp')
self.text.insert('end', '\n')
if hyp:
self.text.insert('end', h_m, 'hpe')
self.text.insert('end', '\n')
if pm:
self.text.insert('end', p_m, 'qq')
self.text.insert('end', '\n')
if sm:
self.text.insert('end', s_m, '*.')
self.text.insert('end', '\n')
if ent:
self.text.insert('end', ant, 'p.')
self.text.insert('end', '\n')
if hol:
self.text.insert('end', m_h, 'hyp')
self.text.insert('end', '\n')
if __name__ == '__main__':
root= Tk()
MyDictionary(root)
root.mainloop()
``` |
{
"source": "joe-fromCocaMoe/zener",
"score": 2
} |
#### File: joe-fromCocaMoe/zener/tk_zener_test.py
```python
from tkinter import Canvas,Frame,Button,Label,Tk,IntVar
from random import shuffle, random, choice
from tkinter.messagebox import *
from tkinter.simpledialog import askstring
import logging, sys
from time import time,sleep
from PIL.ImageTk import PhotoImage, Image
from functools import partial
logging.basicConfig(level= logging.DEBUG)
#logging.disable(logging.CRITICAL)
class Zener_Test(Frame):
def __init__(self, parent=None):
self.parent= parent
Frame.__init__(self, self.parent)
self.pack(expand='yes', fill='both')
self.canvas= Canvas(self)
self.canvas.config(width= 1000, height= 880, bg='skyblue')
self.canvas.pack(expand='yes', fill='both')
self.btn= Button(self.canvas, text='start', command= self.get_entry)
self.btn.place(x=800,y=60)
self.bind_all('<Key>', self.key)
self.zener_deck_base= ['Yellow Circle','Red Plus','Blue Waves',
'Black Square','Green Star']
self.zener_pics= ['Yellow_Circle.png','Red_Plus.png',
'Blue_Waves.png','Black_Square.png',
'Green_Star.png']
self.photo_path= 'zener/'
self.image_ref= []
self.image_ref2= []
self.zener_buttons= []
self.my_font=('arial',26,'bold')
self.circle_count= IntVar(value=0)
self.circle_lbl= Label(self.canvas, textvariable=self.circle_count,
font= self.my_font, bg='skyblue')
self.circle_lbl.place(x=100,y=240)
self.plus_count= IntVar(value=0)
self.plus_lbl= Label(self.canvas, textvariable=self.plus_count,
font=self.my_font, bg='skyblue')
self.plus_lbl.place(x=230,y=240)
self.wave_count= IntVar(value=0)
self.wave_lbl= Label(self.canvas, textvariable=self.wave_count,
font=self.my_font, bg='skyblue')
self.wave_lbl.place(x=370,y=240)
self.square_count= IntVar(value=0)
self.square_lbl= Label(self.canvas, textvariable=self.square_count,
font=self.my_font, bg='skyblue')
self.square_lbl.place(x=500,y=240)
self.star_count= IntVar(value=0)
self.star_lbl= Label(self.canvas, textvariable=self.star_count,
font=self.my_font, bg='skyblue')
self.star_lbl.place(x=630,y=240)
img= Image.open('zener/blank_card.png')
image_= PhotoImage(img)
self.guess_card= Label(self.canvas, image=image_,
width=360,height=600,bg='skyblue')
self.guess_card.place(x=300,y=280)
self.image_ref2.append(image_)
self.card_count= IntVar(value=25)
self.cards_left= Label(self.canvas, textvariable=self.card_count,
font=self.my_font,bg='skyblue')
self.cards_left.place(x=140,y=600)
self.deck= self.zener_deck_base * 5
self.current_card= 0
self.win= 0
self.loss= 0
self.outcome_answer= {}
self.g_count=0
def make_deck(self):
shuffle(self.deck)
front= self.deck[:12]
back= self.deck[12:]
shuffle(front)
shuffle(back)
final_deck= front + back
return final_deck
def key(self, event):
self.g_count +=1
message= 'count:{0} key:{1} num:{2} state:{3}'.format(self.g_count,
event.keysym,event.keysym_num,
event.state)
logging.debug(message)
def make_card_buttons(self):
x2= 50
y2= 40
img_dir= self.photo_path
for pic in self.zener_pics:
img_obj= Image.open( img_dir + pic)
image= PhotoImage(img_obj)
self.zener_buttons.append(Button(self.canvas, image= image,
width=120,height=180,bg='skyblue',
command= partial(self.check_guess, pic)))
self.zener_buttons[-1].place(x=x2,y=y2)
x2+=130
self.image_ref.append(image)
def compare_card(self,btn_str, card_):
if btn_str == card_:
self.win +=1
self.current_card +=1
val_= self.card_count.get()
val_ -=1
self.card_count.set(val_)
win= {val_:('yes',card_,None)}
self.outcome_answer.update(win)
else:
self.loss +=1
self.current_card +=1
val_= self.card_count.get()
val_ -=1
self.card_count.set(val_)
loss= {val_:('no',card_,btn_str)}
self.outcome_answer.update(loss)
if val_ == 0:
self.guess_card.place_forget()
self.cards_left.place_forget()
txt_win_loss= 'Correct: {} Wrong: {}'.format(self.win, self.loss)
percentage= 'Percentage correct: {:.2%}'.format(self.win/25)
self.canvas.create_text(400,600,text=txt_win_loss,
font=self.my_font, tag='end_game')
self.canvas.create_text(400,670,text=percentage,
font=self.my_font,tag='end_game')
self.print_result()
def print_result(self):
print("key y/n actual your guess")
for key in self.outcome_answer:
val= self.outcome_answer[key]
if val[0] == 'yes':
print(key,val[0],val[1])
else:
print(key,val[0], val[1],val[2])
def check_guess(self, picname):
pn= picname.strip('.png')
p_n= pn.replace('_',' ')
guess_card= self.working_deck[self.current_card]
if pn == 'Yellow_Circle':
val=self.circle_count.get()
val +=1
self.circle_count.set(val)
if val == 5:
self.zener_buttons[0].config(state='disabled')
self.compare_card(p_n,guess_card)
elif pn == 'Red_Plus':
val= self.plus_count.get()
val +=1
self.plus_count.set(val)
if val == 5:
self.zener_buttons[1].config(state='disabled')
self.compare_card(p_n,guess_card)
elif pn == 'Blue_Waves':
val= self.wave_count.get()
val +=1
self.wave_count.set(val)
if val == 5:
self.zener_buttons[2].config(state='disabled')
self.compare_card(p_n,guess_card)
elif pn == 'Black_Square':
val= self.square_count.get()
val +=1
self.square_count.set(val)
if val == 5:
self.zener_buttons[3].config(state='disabled')
self.compare_card(p_n,guess_card)
elif pn == 'Green_Star':
val= self.star_count.get()
val +=1
self.star_count.set(val)
if val == 5:
self.zener_buttons[4].config(state='disabled')
self.compare_card(p_n,guess_card)
def get_entry(self):
self.btn.config(state='disabled')
self.working_deck= self.make_deck()
self.make_card_buttons()
if __name__ == '__main__':
root= Tk()
Zener_Test(root)
root.mainloop()
``` |
{
"source": "JoeFrost2884/it-4320-project3a",
"score": 4
} |
#### File: it-4320-project3a/flask_wtforms_tutorial/charts.py
```python
import requests
from datetime import datetime
from datetime import date
import pygal
import requests
import json
#api docs https://www.alphavantage.co/documentation/
def getData(time_series,symbol,chart_type,start_date,end_date):
api_key = "<KEY>"
apistring = "https://www.alphavantage.co/query?function="
if (time_series == "1"):
time_series = "TIME_SERIES_INTRADAY"
time = "Time Series (30min)"
elif (time_series == "2"):
time_series = "TIME_SERIES_DAILY"
time = "Time Series (Daily)"
elif (time_series == "3"):
time_series = "TIME_SERIES_WEEKLY"
time = "Weekly Time Series"
elif (time_series == "4"):
time_series = "TIME_SERIES_MONTHLY"
time = "Monthly Time Series"
apistring = (apistring + (time_series + "&symbol=" + symbol))
if (time_series == "TIME_SERIES_INTRADAY"):
apistring += "&interval=30min"
apistring = apistring + ("&apikey=" + api_key)
data = requests.get(apistring).json()
# variables for data transfer to lists.
x = 0
newdata = {}
datedata = []
newdate = []
opendata = []
highdata =[]
lowdata = []
closeddata = []
try:
for key, value in data[time].items():
datedata = list(data[time].keys())
x+=1
holder = {x : value}
newdata.update(holder)
except KeyError:
err = "error"
return err
for i in range(0, len(datedata)):
if(datedata[i] >= start_date and datedata[i] <= end_date):
newdate.append(datedata[i])
opendata.append(newdata[i + 1]['1. open'])
highdata.append(newdata[i + 1]['2. high'])
lowdata.append(newdata[i + 1]['3. low'])
closeddata.append(newdata[i + 1]['4. close'])
# method to convert data in list to float for chart data.
def convert(data):
for i in range(0, len(data)):
data[i] = float(data[i])
return data
# if statement to choose a line or bar chart and display onto default browser.
if (chart_type == '1'):
bar_chart = pygal.Bar(x_label_rotation = 70)
bar_chart.title = ('Stock Data for ' + symbol + ": " + start_date + ' to ' + end_date)
newdate.reverse()
bar_chart.x_labels = newdate
opendata.reverse()
bar_chart.add('Open', convert(opendata))
highdata.reverse()
bar_chart.add('High', convert(highdata))
lowdata.reverse()
bar_chart.add('Low', convert(lowdata))
closeddata.reverse()
bar_chart.add('Close', convert(closeddata))
return bar_chart.render_data_uri()
pass
if (chart_type == '2'):
line_chart = pygal.Line(x_label_rotation = 70)
line_chart.title = ('Stock Data for ' + symbol + ": " + start_date + ' to ' + end_date)
newdate.reverse()
line_chart.x_labels = newdate
opendata.reverse()
line_chart.add('Open', convert(opendata))
highdata.reverse()
line_chart.add('High', convert(highdata))
lowdata.reverse()
line_chart.add('Low', convert(lowdata))
closeddata.reverse()
line_chart.add('Close', convert(closeddata))
return line_chart.render_data_uri()
pass
``` |
{
"source": "joe-fuentes/FitnessPal-by-AddamCoddington",
"score": 3
} |
#### File: FitnessPal-by-AddamCoddington/myfitnesspal/day.py
```python
from myfitnesspal.base import MFPBase
class Day(MFPBase):
def __init__(self, date, meals=None, goals=None, notes=None,
water=None, complete=False):
self._date = date
self._meals = meals
self._goals = goals
self._notes = notes
self._water = water
self._totals = None
self._complete = complete
def __getitem__(self, value):
for meal in self._meals:
if meal.name.lower() == value.lower():
return meal
raise KeyError("No meal named '%s' exists for this date" % value)
def keys(self):
keys = []
for meal in self.meals:
keys.append(meal.name)
return keys
@property
def meals(self):
return self._meals
@property
def complete(self):
return self._complete
@property
def entries(self):
for meal in self._meals:
for entry in meal.entries:
yield entry
@property
def totals(self):
if self._totals is None:
self._compute_totals()
return self._totals
@property
def goals(self):
return self._goals
@property
def date(self):
return self._date
@property
def notes(self):
return self._notes()
@property
def water(self):
return self._water()
def get_as_dict(self):
return dict(
(m.name, m.get_as_list(), ) for m in self.meals
)
def _compute_totals(self):
totals = {}
for entry in self.entries:
for k, v in entry.nutrition_information.items():
if k not in totals:
totals[k] = v
else:
totals[k] += v
self._totals = totals
def __unicode__(self):
return u'%s %s' % (
self.date.strftime('%x'),
self.totals,
)
``` |
{
"source": "joefutrelle/habcam-image-service",
"score": 3
} |
#### File: habcam-image-service/imgsrv/utils.py
```python
import re
from werkzeug.routing import BaseConverter
def parse_params(path):
"""Parse a path fragment and convert to a list of tuples.
Slashes separate alternating keys and values.
For example /a/3/b/5 -> [ ['a', '3'], ['b', '5'] ]."""
parts = re.split('/',path)
keys = parts[:-1:2]
values= parts[1::2]
return zip(keys,values)
class ParamsConverter(BaseConverter):
def __init__(self, url_map):
super(ParamsConverter, self).__init__(url_map)
self.regex = r'(([^/]+/[^/]+/)*[^/]+/[^/]+)'
def to_python(self, value):
return parse_params(value)
def to_url(self, value):
return value
``` |
{
"source": "joefutrelle/ifcb-dashboard",
"score": 2
} |
#### File: alembic_dashboard/versions/16bb7a50388b_lat_lon_depth_tags_comments.py
```python
revision = '16bb7a50388b'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
eval("upgrade_%s" % engine_name)()
def downgrade(engine_name):
eval("downgrade_%s" % engine_name)()
def upgrade_dashboard():
### commands auto generated by Alembic - please adjust! ###
op.create_table('bin_comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('bin_id', sa.Integer(), nullable=True),
sa.Column('ts', sa.DateTime(timezone=True), nullable=True),
sa.Column('user_name', sa.String(), nullable=True),
sa.Column('comment', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['bin_id'], ['bins.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_bin_comments_comment', 'bin_comments', ['comment'], unique=False)
op.create_table('bin_tags',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('bin_id', sa.Integer(), nullable=True),
sa.Column('tag', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['bin_id'], ['bins.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('bin_id', 'tag')
)
op.create_index('ix_bin_tags_tag', 'bin_tags', ['tag'], unique=False)
op.add_column(u'bins', sa.Column('depth', sa.Numeric(), nullable=True))
op.add_column(u'bins', sa.Column('lat', sa.Numeric(), nullable=True))
op.add_column(u'bins', sa.Column('lon', sa.Numeric(), nullable=True))
op.create_index('ix_bins_depth', 'bins', ['depth'], unique=False)
op.create_index('ix_bins_lat', 'bins', ['lat'], unique=False)
op.create_index('ix_bins_lon', 'bins', ['lon'], unique=False)
### end Alembic commands ###
def downgrade_dashboard():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_bins_lon', 'bins')
op.drop_index('ix_bins_lat', 'bins')
op.drop_index('ix_bins_depth', 'bins')
op.drop_column(u'bins', 'lon')
op.drop_column(u'bins', 'lat')
op.drop_column(u'bins', 'depth')
op.drop_index('ix_bin_tags_tag', 'bin_tags')
op.drop_table('bin_tags')
op.drop_index('ix_bin_comments_comment', 'bin_comments')
op.drop_table('bin_comments')
### end Alembic commands ###
``` |
{
"source": "joefutrelle/ladder",
"score": 2
} |
#### File: joefutrelle/ladder/__init__.py
```python
import sys
import operator
import os
from urllib2 import urlopen
from StringIO import StringIO
from lxml import etree
import re
from glob import iglob
import fileinput
import csv
import json
import logging
import traceback
from ladder.utils import coalesce, memoize, search_path
from ladder.structs import structs
from ladder.jsonquery import jsonquery
class UnboundVariable(Exception):
pass
@memoize
def compile_regex(pattern):
return re.compile(pattern)
# foo.bar corresponds to an XPath of /namespace[@name='foo']/rule[@name='bar']
def find_names(e):
def descend(e,namespace=[]):
if e.tag=='namespace':
sub_ns = namespace + [e.get('name')]
for se in e:
for name, name_e in descend(se,namespace=sub_ns):
yield name, name_e
elif e.tag=='rule':
yield namespace + [e.get('name')], e
return dict(('.'.join(n),ne) for n, ne in descend(e))
LDR_INTERP_PATTERN = re.compile(r'([^\$]*)(\$\{([a-zA-Z0-9_.]+)\})')
LDR_WS_SEP_REGEX = r'\s+'
LDR_WS_SEP_PATTERN = re.compile(LDR_WS_SEP_REGEX)
# supports time-like regexes e.g., IFCB9_yyyy_YYY_HHMMSS
def timestamp2regex(pattern):
# FIXME handle unfortunate formats such as
# - non-zero-padded numbers
# - full and abbreviated month names
pattern = re.sub(r'(([0-9])\2*)',r'(?P<n\2>[0-9]+)',pattern) # fixed-length number eg 111 88
pattern = re.sub(r's+','(?P<sss>[0-9]+)',pattern) # milliseconds
pattern = re.sub(r'yyyy','(?P<yyyy>[0-9]{4})',pattern) # four-digit year
pattern = re.sub(r'mm','(?P<mm>0[1-9]|1[0-2])',pattern) # two-digit month
pattern = re.sub(r'dd','(?P<dd>0[1-9]|[1-2][0-9]|3[0-1])',pattern) # two-digit day of month
pattern = re.sub(r'YYY','(?P<YYY>[0-3][0-9][0-9])',pattern) # three-digit day of year
pattern = re.sub(r'HH','(?P<HH>[0-1][0-9]|2[0-3])',pattern) # two-digit hour
pattern = re.sub(r'MM','(?P<MM>[0-5][0-9])',pattern) # two-digit minute
pattern = re.sub(r'SS','(?P<SS>[0-5][0-9])',pattern) # two-digit second
pattern = re.sub(r'#','[0-9]+',pattern) # any string of digits (non-capturing)
pattern = re.sub(r'i','[a-zA-Z][a-zA-Z0-9_]*',pattern) # an identifier (e.g., jpg2000) (non-capturing)
pattern = re.sub(r'\.ext',r'(?:.(?P<ext>[a-zA-Z][a-zA-Z0-9_]*))',pattern) # a file extension
pattern = re.sub(r'\.',r'\.',pattern) # a literal '.'
pattern = re.sub(r'\\.','.',pattern) # a regex '.'
pattern = re.sub(r'any','.*',pattern) # a regex .*
return pattern
def flatten(dictlike, include=None, exclude=None):
# an appropriate copy operation. this WILL NOT COPY internal dicts or lists
result = dict(dictlike.items())
if include is not None:
for k in result.keys():
if k not in include:
del result[k]
if exclude is not None:
for k in result.keys():
if k in exclude:
del result[k]
return result
# substitute patterns like ${varname} for their values given
# scope = values for the names (dict-like)
# e.g., interpolate('${x}_${blaz}',{'x':'7','bork':'z','blaz':'quux'}) -> '7_quux'
#import jinja2
def interpolate(template,scope,fail_fast=True):
if not '$' in coalesce(template,''):
return template
s = StringIO()
end = 0
for m in re.finditer(LDR_INTERP_PATTERN,template):
end = m.end()
(plain, expr, key) = m.groups()
s.write(plain)
try:
s.write(scope[key])
except KeyError:
if fail_fast:
raise UnboundVariable(key)
s.write(template[end:])
interpolated = s.getvalue()
# interpolated = jinja2.Environment().from_string(interpolated).render(**scope.flatten())
return interpolated
## interpolate a template using Jinja2
#def interpolate(template,scope):
# return jinja2.Environment().from_string(template).render(**scope.flatten())
class ScopedExpr(object):
def __init__(self,elt,bindings={}):
self.elt = elt
self.bindings = bindings
def get(self,attr_name):
template = self.elt.get(attr_name)
if template is None:
return None
return interpolate(template, self.bindings)
def get_list(self,attr_name,delim=None):
delim = coalesce(delim, LDR_WS_SEP_PATTERN)
templates = re.split(delim, self.elt.get(attr_name))
return map(lambda t: interpolate(t,self.bindings), templates)
@property
def tag(self):
return self.elt.tag
def findall(self,tagname):
return self.elt.findall(tagname)
@property
def text(self):
return interpolate(self.elt.text, self.bindings)
@property
def raw_text(self):
return self.elt.text
def __iter__(self):
return self.elt.__iter__()
def __repr__(self):
return '<%s/>' % self.tag
def eval_test(value,op,test_value):
op_fn = getattr(operator,op)
try:
return op_fn(float(value),float(test_value))
except ValueError:
return op_fn(value,test_value)
return False
# utility to parse "vars" argument
def parse_vars_arg(expr,attr='vars'):
var_name_list = expr.get(attr)
if var_name_list:
return [var for var in re.split(LDR_WS_SEP_PATTERN,var_name_list) if var != '']
return None
# utility to parse single var argument, which always defaults to '_'
def parse_var_arg(expr,attr='var'):
return coalesce(expr.get(attr),'_')
# utility to parse a source file or url and get a stream
def parse_source_arg(expr):
url = expr.get('url')
file_path = coalesce(expr.get('file'),'-')
return url, file_path
def open_source_arg(url=None, file_arg=None):
if url is not None:
return urlopen(url)
elif file_arg is not None:
return open(file_arg)
else:
raise ValueError
# filter out (and optionally count) distinct solutions. if vars is specified,
# retain only those vars prior to testing for uniqueness.
# if expr is specified parse the 'distinct' argument from it
# to get the var list.
# if neither is specified, allow all solutions
def with_distinct(solution_generator,distinct=None):
distinct_solutions = set()
for raw_solution in solution_generator:
solution = flatten(raw_solution,distinct)
f_solution = frozenset(solution.items())
if f_solution not in distinct_solutions:
distinct_solutions.add(f_solution)
yield solution
# count is used to specify variable to hold the 1-based distinct/nondistinct
# solution count.
# nth is used to select a specific solution by solution number and ignore the rest
def with_count(solution_generator,count=None,nth=None):
c = 1
for s in solution_generator:
if count is not None:
s[count] = c
if nth is not None:
if c==nth:
yield s
return
else:
yield s
c += 1
# apply aliasing to a solution generator.
# if expr is specified parse the "rename" and "as" arguments from
# it to get the aliases dict.
# if neither is specified allow all solutions
def with_aliases(solution_generator,aliases={}):
for raw_solution in solution_generator:
s = {}
for k,v in raw_solution.items():
try:
if k in aliases: s[aliases[k]] = v
else: s[k] = v
except KeyError:
raise KeyError('unbound variable %s' % k)
yield s
# apply block-level include/excludes
def with_inc_exc(solution_generator,include=None,exclude=None):
for raw_solution in solution_generator:
s = flatten(raw_solution,include,exclude)
yield s
# apply block-level modifications such as distinct, rename, include/exclude, count, and nth
def with_block(S,expr,bindings={}):
return with_post_block(with_pre_block(S,expr,bindings),expr,bindings)
# apply block-level modifications *before* the inner block produces solutions
def with_pre_block(S,expr,bindings={}):
bindings = flatten(bindings)
# include/exclude
include = parse_vars_arg(expr,'include')
exclude = parse_vars_arg(expr,'exclude')
if include is not None or exclude is not None:
S = with_inc_exc(S,include,exclude)
# rename/as
rename = parse_vars_arg(expr,'rename')
rename_as = parse_vars_arg(expr,'as')
try:
aliases = dict((o,n) for o,n in zip(rename,rename_as))
S = with_aliases(S,aliases)
except TypeError:
pass
# now yield from the stack of solution generators
for s in S:
yield s
# apply block-level modifiers *after* the inner block produces solutions
def with_post_block(S,expr,bindings={}):
bindings = flatten(bindings)
# distinct
distinct = parse_vars_arg(expr,'distinct')
if distinct is not None:
S = with_distinct(S,distinct)
# count
count = expr.get('count')
nth = expr.get('nth')
if nth is not None:
nth = int(nth)
if count is not None or nth is not None:
S = with_count(S,count,nth)
# now yield from the stack of solution generators
for s in S:
yield s
# evaluate a block of expressions using recursive descent to generate and filter
# solutions a la Prolog
def evaluate_block(exprs,bindings={},global_namespace={}):
# utility to parse arguments to match and split
def parse_match_args(expr,bindings,default_pattern='.*'):
if expr.get('value'):
value = expr.get('value')
else:
var_arg = parse_var_arg(expr)
try:
value = str(bindings[var_arg])
except KeyError: # the caller is attempting to match an unbound variable
raise UnboundVariable(var_arg)
timestamp = expr.get('timestamp')
if timestamp is not None:
pattern = timestamp2regex(timestamp)
else:
pattern = coalesce(expr.get('pattern'),default_pattern)
return pattern, value
# utility block evaluation function using this expression's bindings and global namespace
def local_block(exprs,inner_bindings={}):
bb = dict(bindings.items()) # an appropriate copy operation
bb.update(inner_bindings)
return evaluate_block(exprs,bb,global_namespace)
# utility recurrence expression establishes an inner scope and evaluates
# the remaining expressions (which will yield solutions to the head expression)
# usage: for s in rest(expr,bindings): yield s
def rest(expr,inner_bindings={}):
# this is where we have an opportunity to discard variables before recurring
# based on this expression
discard = set()
if expr.get('retain'):
discard = set(bindings.keys()).difference(set(parse_vars_arg(expr,'retain')))
for ss in local_block(exprs[1:],inner_bindings):
yield flatten(ss,exclude=discard)
# utility recurrence expression for unnamed block
# accepts either a solution generator which produces inner bindings for the inner block,
# or simply a single set of bindings (the outer bindings)
def inner_block(expr,inner_bindings={},solution_generator=None):
if solution_generator is None:
S = with_block(local_block(list(expr),inner_bindings),expr,bindings)
else:
# wrap the solution generator in with_block
def SS():
for s in with_pre_block(solution_generator,expr,bindings):
for ss in local_block(list(expr),s):
yield ss
S = with_post_block(SS(),expr,bindings)
# now recur
for s in S:
for ss in rest(expr,s):
yield ss
# terminal case; we have arrived at the end of the block with a solution, so yield it
if len(exprs)==0:
yield flatten(bindings)
return
# handle the first expression
# wrap in interpolation wrapper that interpolates all arguments
expr = ScopedExpr(exprs[0], bindings)
# The miss expression indicates no match has been found.
# So refuse to recur, will not yield any solutions
if expr.tag=='miss':
return
# The hit expression means a match has been found.
# So yield the current set of bindings.
# <hit/>
# it is also an implicit block supporting block-level modifiers,
# and generates a hit for every solution of that inner block
elif expr.tag=='hit':
for s in with_block(local_block(list(expr)),expr,bindings):
yield s
for ss in rest(expr,s):
yield ss
# Invoke means descend, once, into a named rule, evaluating it as a block,
# with the current bindings in scope, and recur for each of its solutions.
# options include filtering the input variables, including
# all block level operations e.g., distinct and rename/as
# <invoke rule="{name}" [using="{var1} {var2}"]/>
elif expr.tag=='invoke':
rule_name = expr.get('rule')
using = parse_vars_arg(expr,'using')
args = flatten(bindings,using)
S = invoke(rule_name,args,global_namespace)
for s in inner_block(expr,bindings,S):
yield s
# The var expression sets variables to interpolated values
# <var name="{name}">{value}</var>
# or
# <var name="{name}">
# <val>{value1}</val>
# <val>{value2}</val>
# </var>
elif expr.tag=='var':
var_name = parse_var_arg(expr,'name')
sub_val_exprs = expr.findall('val')
try:
if len(sub_val_exprs) == 0:
var_val = expr.text
for s in rest(expr,{var_name:var_val}):
yield s
else:
for sub_val_expr in sub_val_exprs:
var_val = ScopedExpr(sub_val_expr, bindings).text
for s in rest(expr,{var_name:var_val}):
yield s
except UnboundVariable, uv:
logging.warn('var %s: unbound variable in template "%s": %s' % (var_name, expr.raw_text, uv))
return # miss
# The vars expression is the plural of var, for multiple assignment
# with any regex as a delimiter between variable values.
# <vars names="{name1} {name2} [delim="{delim}"]>{value1}{delim}{value2}</vars>
# or
# <vars names="{name1} {name2} [delim="{delim}"]>
# <vals>{value1}{delim}{value2}</vals>
# <vals>{value1}{delim}{value2}</vals>
# </vars>
elif expr.tag=='vars':
try:
var_names = re.split(LDR_WS_SEP_PATTERN,expr.get('names'))
sub_val_exprs = expr.findall('vals')
delim = coalesce(expr.get('delim'),LDR_WS_SEP_PATTERN)
if len(sub_val_exprs) == 0:
var_vals = map(lambda t: interpolate(t,bindings), re.split(delim,expr.raw_text))
for s in rest(expr,dict(zip(var_names,var_vals))):
yield s
else:
for sub_val_expr in sub_val_exprs:
sub_val_expr = ScopedExpr(sub_val_expr, bindings)
var_vals = map(lambda t: interpolate(t,bindings), re.split(delim,sub_val_expr.raw_text))
for s in rest(expr,dict(zip(var_names,var_vals))):
yield s
except UnboundVariable, uv:
logging.warn('vars: unbound variable %s' % uv)
return # miss
# all is a conjunction. it is like an unnamed namespace block
# and will yield any solution that exists after all exprs are evaluated
# in sequence
# <all>
# {expr1}
# {expr2}
# ...
# {exprn}
# </all>
elif expr.tag=='all':
for s in inner_block(expr):
yield s
# any is a disjunction. it will yield all solutions of each expr
# <any>
# {expr1}
# {expr2}
# ...
# {exprn}
# </any>
# first is like any except it only yields the solutions of the first clause that produces any
# <first>
# {expr1}
# {expr2}
# ...
# {exprn}
# </first>
elif expr.tag in ('any','first'):
for sub_expr in list(expr): # iterate over subexpressions
done = False
for s in local_block([sub_expr]): # treat each one as a block
for ss in rest(expr,s): # and recur for each of its solutions
done = True
yield ss
if done and expr.tag=='first': # if all we want is the first subexpr
return # then stop
# none is negation. if the enclosed block generates any solutions,
# this will generate a miss rather than a hit. otherwise it will recur.
# <none>
# {expr1}
# {expr2}
# ...
# {exprn}
# </none>
elif expr.tag=='none':
for s in inner_block(expr):
return # miss
# if we fell through, there were no solutions
for s in rest(expr):
yield s
# log interpolates its text and prints it. useful for debugging
# <log>{template}</log>
elif expr.tag=='log':
print expr.text
for s in rest(expr):
yield s
# match generates solutions for every regex match
# <match [pattern="{regex}"|timestamp="{date pattern}"] [value="{template}"|var="{variable to match}"] [groups="{name1} {name2}"] [optional="true/false"/>
# if "value" is specified, the template is interpolated and then matched against,
# if "var" is specified, the variable's value is looked up and then matched.
# var="foo" is equivalent to value="${foo}"
# if pattern is not specified the default pattern is ".*"
# match also acts as an implicit, unnamed block supporting block-level modifiers.
elif expr.tag=='match':
optional = expr.get('optional')
optional = optional is not None and optional in ['true', 'True', 'yes', 'Yes']
m = False
try:
pattern, value = parse_match_args(expr,bindings,'.*')
group_name_list, group_names = expr.get('groups'), []
if group_name_list:
group_names = re.split(LDR_WS_SEP_PATTERN,group_name_list)
p = compile_regex(pattern)
m = p.match(value)
except UnboundVariable, uv:
if not optional:
logging.warn('match: unbound variable %s' % uv)
return # miss
if m:
groups = m.groups()
named_ixs = p.groupindex.values()
groups_minus_named = [n for n in range(len(groups)) if n+1 not in named_ixs]
inner_bindings = {}
# print 'pattern = %s' % pattern
# print 'groups = %s' % (groups,)
# print 'group names = %s' % group_names
# print 'named_ixs = %s' % named_ixs
# print 'gmn = %s' % groups_minus_named
# print 'groupindex = %s' % p.groupindex
# print 'groupdict = %s' % m.groupdict()
# bind user-specified groups to group names
for name,n in zip(group_names, groups_minus_named):
try:
if groups[n] is not None:
inner_bindings[name] = groups[n]
except IndexError:
pass # innocuous
# bind pattern-specified groups to group names
for name,group in m.groupdict().items():
if group is not None:
inner_bindings[name] = group
# now invoke the (usually empty) inner unnamed block and recur
for s in inner_block(expr,inner_bindings):
yield s
elif optional:
for s in rest(expr):
yield s
else:
return # miss
# test performs equality and inequality tests over strings and numbers
# <test [var={var}|value={template}] [eq|gt|lt|ge|le|ne]={template}/>
# and is also an implicit block.
elif expr.tag=='test':
try:
var = expr.get('var')
value = expr.get('value')
if value is None:
value = bindings[var]
except KeyError:
logging.warn('test: unbound variable %s' % var)
return # miss
except UnboundVariable, uv:
logging.warn('test: unbound variable %s' % uv)
return # miss
op = coalesce(*[a for a in ['eq','gt','lt','ge','le','ne'] if expr.get(a) is not None])
tv = expr.get(op)
if eval_test(value,op,tv): # hit
for s in inner_block(expr):
yield s
else:
return # miss
# split iterates over the result of splitting a value by a regex, assigning it repeatedly
# to the variable specified by group. like match it recurs and is also an implicit block
# <split [var="{var}"|value="{template}"] [pattern="{pattern}"] [group="{name}|vars="{name1} {name}"]/>
# if pattern is not specified the default pattern is " *"
# alternatively one can use split to do multiple assignment, as in this example
# <split value="foo bar baz" vars="a b c"/>
# which will set a=foo, b=bar, c=baz
elif expr.tag=='split':
try:
pattern, value = parse_match_args(expr,bindings,default_pattern=LDR_WS_SEP_REGEX)
except UnboundVariable, uv:
logging.warn('split: unbound variable %s' % uv)
return # miss
var_names = parse_vars_arg(expr)
group = expr.get('group')
if group:
def S():
for val in re.split(pattern,value):
yield {group: val}
# now invoke the (usually empty) inner unnamed block and recur
for s in inner_block(expr,solution_generator=S()):
yield s
elif var_names:
inner_bindings = dict((n,v) for n,v in zip(var_names, re.split(pattern,value)))
# now invoke the (usually empty) inner unnamed block and recur
for s in inner_block(expr,inner_bindings):
yield s
# path checks for the existence of files in the local filesystem and yields a hit if it does
# <path match="{template}" [var="{name}"]/>
# it is also an implicit anonymous block.
# if template expands to a nonexistent filename it will be attempted as a glob, which will then
# produce solutions binding to the named var for each glob match
elif expr.tag=='path':
try:
match_expr = coalesce(expr.get('match'),'')
except UnboundVariable, uv:
logging.warn('path: unbound variable %s' % uv)
return # pass
if os.path.exists(match_expr) and os.path.isfile(match_expr):
inner_bindings = {parse_var_arg(expr): match_expr}
# hit; recur on inner block
for s in inner_block(expr,inner_bindings):
yield s
else:
def S():
for glob_hit in sorted(list(iglob(match_expr))):
yield {parse_var_arg(expr): glob_hit}
for s in inner_block(expr,solution_generator=S()):
yield s
# read produces each line of a specified source as solution bound to the given var.
# if no var is specified each line is bound to a variable named '_'
# <lines [file="{filename}|url="{url}"] [var="{name}"]/>
# and is also an implicit block. if no file is specified stdin is read
elif expr.tag=='lines':
var_name = parse_var_arg(expr)
url, file_path = parse_source_arg(expr)
if url is not None:
iterable = urlopen(url)
else:
iterable = fileinput.input(file_path)
def S():
for raw_line in iterable:
yield {var_name: raw_line.rstrip()}
for s in inner_block(expr,solution_generator=S()):
yield s
# csv reads CSV data from a source to bind selected variables.
# <csv [file="{filename}|url="{url}"] [vars="{name1} {name2}"]/>
# if no vars are specified the CSV data must have a header row
# and those headers will be used as variable names
elif expr.tag=='csv':
vars = parse_vars_arg(expr)
url, file_path = parse_source_arg(expr)
stream = open_source_arg(url, file_path)
reader = csv.DictReader(stream,vars)
def S():
for s in reader:
yield flatten(s,vars)
for ss in inner_block(expr,solution_generator=S()):
yield ss
# <json var={name} [select={query}] [file={pathname}|url={url}|from={name}]/>
elif expr.tag=='json':
url, file_path = parse_source_arg(expr)
select = expr.get('select')
from_arg = expr.get('from')
var = expr.get('var') # important: don't use parse_var_arg so not to default to _
if from_arg is not None:
parsed = bindings[from_arg]
else:
parsed = json.load(open_source_arg(url, file_path))
if select is None and var is not None:
for ss in inner_block(expr,{var:parsed}):
yield ss
else:
def S():
for result in jsonquery(parsed, select):
if var is None:
yield result
else:
yield {var: result}
for ss in inner_block(expr,solution_generator=S()):
yield ss
# all other tags are no-ops, but because this a block will recur
# to subsequent expressions
# FIXME change this behavior
else:
for s in rest(expr):
yield s
# invoke a named rule
def invoke(name,bindings={},global_namespace={}):
try:
expr = global_namespace[name]
except KeyError:
logging.warn('invoke: no such rule %s' % name)
return
if expr.tag == 'rule':
# enforce required variables
uses = parse_vars_arg(expr,'uses')
if uses is not None:
for u in uses:
if u not in bindings:
logging.warn('invoke: missing variable in uses: %s' % u)
return
# generate block-level solutions (pre-filter)
raw_block = evaluate_block(list(expr),bindings,global_namespace=global_namespace)
# now filter the solutions with block-level modifiers
for solution in with_block(raw_block,expr,bindings):
yield solution
else:
logging.warn('invoke: %s is not a rule' % name)
def parse(*ldr_streams):
namespace = {}
for ldr_stream in ldr_streams:
try:
xml = etree.parse(ldr_stream).getroot()
except etree.XMLSyntaxError:
raise
except:
xml = etree.fromstring(ldr_stream)
# first, strip comments
for c in xml.xpath('//comment()'):
p = c.getparent()
p.remove(c)
namespace.update(find_names(xml).items())
return namespace
class Resolver(object):
def __init__(self,*files):
self.namespace = parse(*files)
self._add_positional_functions()
def invoke(self,_name,**bindings):
for s in invoke(_name,bindings,self.namespace):
yield s
def as_function(self,name):
def _fn(**bindings):
return self.invoke(name,**bindings)
return _fn
def as_positional_function(self,name):
e = self.namespace[name]
uses = coalesce(parse_vars_arg(e,'uses'),[]) # FIXME encapsulation violation
def _fn(*args,**bindings):
kw = dict(zip(uses,args))
kw.update(bindings)
return self.invoke(name,**kw)
return _fn
def _add_positional_functions(self):
"""decorate this object so that if you call R.foo.bar.baz
with positional arguments it will invoke 'foo.bar.baz' eg
foo.bar.baz that is using x and y, that you can invoke it
r.foo.bar.baz(x,y)"""
obj = self
for name in sorted(self.namespace,key=lambda k: len(k)):
level = obj
parts = re.split(r'\.',name)
for part in parts[:-1]:
if not getattr(level, part, None):
setattr(level, part, lambda _: None)
level = getattr(level, part)
setattr(level, parts[-1], self.as_positional_function(name))
# utilities for finding resolvers on the Python path
@memoize()
def locate_resolver(relative_path):
return search_path(relative_path)
def get_resolver(*relative_paths):
locations = [locate_resolver(p) for p in relative_paths]
return Resolver(*locations)
``` |
{
"source": "joefutrelle/pyalgomidi",
"score": 2
} |
#### File: joefutrelle/pyalgomidi/brownian_controllers.py
```python
from itertools import chain
from time import time, sleep
from random import random, gauss, shuffle
PORT=2
import rtmidi as rt
def gen_all_off(now):
return ((now, tuple(chain(*((0x90, n, 0) for n in range(127))))),)
def next(prev):
prev += gauss(0,3)
return max(0,min(prev,127))
value = [[random() * 127 for n in range(16)] for controller in range(64)]
def emitter():
source = rt.MidiOut()
avail_ports = source.get_ports()
for i,p in zip(range(len(avail_ports)),avail_ports):
print (i,p)
if avail_ports:
source.open_port(PORT)
else:
source.open_virtual_port('my virt port')
sleep(4)
channels = range(1)
controllers = range(12)
while True:
shuffle(channels)
shuffle(controllers)
for channel in channels:
for controller in controllers:
value[controller][channel] = next(value[controller][channel])
source.send_message((0xB0 | channel, controller, int(value[controller][channel])))
sleep(0.15)
if __name__=='__main__':
emitter()
``` |
{
"source": "joefutrelle/pyifcb",
"score": 3
} |
#### File: ifcb/data/files.py
```python
import os
from functools import lru_cache
import pandas as pd
from .identifiers import Pid
from .adc import AdcFile, AdcFragment
from .hdr import parse_hdr_file
from .roi import RoiFile
from .utils import BaseDictlike, CaseInsensitiveDict
from .bins import BaseBin
DEFAULT_BLACKLIST = ['skip','beads']
DEFAULT_WHITELIST = ['data']
class Fileset(object):
"""
Represents a set of three raw data files
"""
def __init__(self, basepath):
"""
:param basepath: the base path of the files (no extension)
"""
self.basepath = basepath
@property
def adc_path(self):
"""
The path of the ``.adc`` file.
"""
return self.basepath + '.adc'
@property
def hdr_path(self):
"""
The path of the ``.hdr`` file.
"""
return self.basepath + '.hdr'
@property
def roi_path(self):
"""
The path of the ``.roi`` file.
"""
return self.basepath + '.roi'
@property
@lru_cache()
def pid(self):
"""
A ``Pid`` representing the bin PID
"""
return Pid(os.path.basename(self.basepath))
@property
def lid(self):
"""
The bin's LID
"""
return self.pid.bin_lid
def exists(self):
"""
Checks for existence of all three raw data files.
:returns bool: whether or not all files exist
"""
if not os.path.exists(self.adc_path):
return False
if not os.path.exists(self.hdr_path):
return False
if not os.path.exists(self.roi_path):
return False
return True
# metrics
def getsizes(self):
"""
Get the sizes of the files.
:returns dict: sizes of files with keys
'hdr', 'adc', and 'roi'
"""
hdr_size = os.path.getsize(self.hdr_path)
adc_size = os.path.getsize(self.adc_path)
roi_size = os.path.getsize(self.roi_path)
return {
'hdr': hdr_size,
'adc': adc_size,
'roi': roi_size
}
def getsize(self):
"""
Get the total size of all three files.
:returns int: the total size of all three files
"""
return sum(self.getsizes().values())
def as_bin(self):
"""
:returns: a Bin view of this fileset.
"""
return FilesetBin(self)
def __repr__(self):
return '<IFCB Fileset %s>' % self.basepath
def __str__(self):
return self.basepath
# bin interface to Fileset
class FilesetBin(BaseBin):
"""
Bin interface to Fileset.
Context manager support opens and closes the ``.roi`` file for image
access.
"""
def __init__(self, fileset):
"""
:param fileset: the ``Fileset`` to represent
"""
self.fileset = fileset
self.adc_file = AdcFile(fileset.adc_path)
self.roi_file = RoiFile(self.adc_file, fileset.roi_path)
# oo interface to fileset
@property
@lru_cache()
def hdr_attributes(self):
"""
A ``dict`` representing the headers
"""
return parse_hdr_file(self.fileset.hdr_path)
@property
def timestamp(self):
"""
The bin's timestamp (as a ``datetime``)
"""
return self.pid.timestamp
def to_hdf(self, hdf_file, group=None, replace=True, archive=False):
"""
Convert the fileset to HDF.
:param hdf_file: the root HDF file pathname or
object (``h5py.File`` or ``h5py.Group``) in which to write all raw data
:param group: a path below the sub-group
to use
:param replace: whether to replace any existing data
at that location in the HDF file
:param archive: whether to include the full text of the .hdr
and .roi files
"""
from .hdf import filesetbin2hdf
filesetbin2hdf(self, hdf_file, group=group, replace=replace, archive=archive)
# bin interface
@property
def pid(self):
"""
The bin's PID
"""
return self.fileset.pid
@property
def schema(self):
"""
The bin's schema
"""
return self.adc_file.schema
@property
def images(self):
"""
The images
"""
return self.roi_file
@property
def headers(self):
"""
The header dict
"""
return self.hdr_attributes
def header(self, key):
ci_dict = CaseInsensitiveDict(self.hdr_attributes)
return ci_dict[key]
@property
def adc(self):
"""
The bin's ADC data as a ``pandas.DataFrame``
"""
return self.adc_file.csv
# context manager implementation
def isopen(self):
"""
Is the ``.roi`` file open?
"""
return self.roi_file.isopen()
def close(self):
"""
Close the ``.roi`` file, if it is open.
"""
if self.isopen():
self.roi_file.close()
def __enter__(self):
if not self.isopen():
self.roi_file._open()
return self
def __exit__(self, *args):
self.close()
# support for single image reading
def as_single(self, target):
"""Return a new FilesetBin that only provides access to
a single target. If called immediately upon construction
(before accessing any data) this will avoid parsing the
entire ADC file. Otherwise it will raise ValueError."""
if self.isopen():
raise ValueError('as_single must be called before opening FilesetBin')
return FilesetFragmentBin(self.fileset, target)
def __repr__(self):
return '<FilesetBin %s>' % self
def __str__(self):
return self.fileset.__str__()
# special fileset bin subclass for reading one image fast
class FilesetFragmentBin(FilesetBin):
def __init__(self, fileset, target):
self.fileset = fileset
self.adc_file = AdcFragment(fileset.adc_path, target, target+2)
self.roi_file = RoiFile(self.adc_file, fileset.roi_path)
# listing and finding raw filesets and associated bin objects
def validate_path(filepath, blacklist=DEFAULT_BLACKLIST, whitelist=DEFAULT_WHITELIST):
"""
Validate an IFCB raw data file path.
A well-formed raw data file path relative to some root
only contains path components that are
not blacklisted and either
either whitelisted or part of the file's basename (without
extension).
:param filepath: the pathname of the file
:param blacklist: directory names to ignore
:param whitelist: directory names to include, even if they
do not match the path's basename
:returns bool: if the pathname is valid
"""
if not set(blacklist).isdisjoint(set(whitelist)):
raise ValueError('whitelist and blacklist must be disjoint')
dirname, basename = os.path.split(filepath)
lid, ext = os.path.splitext(basename)
components = dirname.split(os.sep)
for c in components:
if c in blacklist:
return False
if c not in whitelist and c not in lid:
return False
return True
def list_filesets(dirpath, blacklist=DEFAULT_BLACKLIST, whitelist=DEFAULT_WHITELIST, sort=True, validate=True):
"""
Iterate over entire directory tree and yield a Fileset
object for each .adc/.hdr/.roi fileset found. Warning: for
large directories, this is slow.
:param blacklist: list of directory names to ignore
:param whitelist: list of directory names to include, even if they
do not match a file's basename
:param sort: whether to sort output (sorts by alpha)
:param validate: whether to validate each path
"""
if not set(blacklist).isdisjoint(set(whitelist)):
raise ValueError('whitelist and blacklist must be disjoint')
for dp, dirnames, filenames in os.walk(dirpath):
for d in dirnames:
if d in blacklist:
dirnames.remove(d)
if sort:
dirnames.sort(reverse=True)
filenames.sort(reverse=True)
for f in filenames:
basename, extension = f[:-4], f[-3:]
if extension == 'adc' and basename+'.hdr' in filenames and basename+'.roi' in filenames:
if validate:
reldir = dp[len(dirpath)+1:]
if not validate_path(os.path.join(reldir,basename), whitelist=whitelist, blacklist=blacklist):
continue
yield dp, basename
def list_data_dirs(dirpath, blacklist=DEFAULT_BLACKLIST, sort=True, prune=True):
"""
Yield the paths of any descendant directories that contain at least
one ``.adc`` file.
:param blacklist: list of directory names to ignore
:param sort: whether to sort output (sorts by alpha)
:param prune: whether, given a dir with an ``.adc`` file in it, to skip
subdirectories
"""
dirlist = os.listdir(dirpath)
if sort:
dirlist.sort()
for name in dirlist:
if name[-3:] == 'adc':
yield dirpath
if prune:
return
for name in dirlist:
if name not in blacklist:
child = os.path.join(dirpath,name)
if os.path.isdir(child):
yield from list_data_dirs(child, sort=sort, prune=prune)
def find_fileset(dirpath, lid, whitelist=['data'], blacklist=['skip','beads']):
"""
Find a fileset anywhere below the given directory path
given the bin's lid. This assumes that the file's path
is valid.
:returns Fileset: the ``Fileset``, or ``None`` if it is not found.
"""
dirlist = os.listdir(dirpath)
for name in dirlist:
if name == lid + '.adc':
basepath = os.path.join(dirpath,lid)
return Fileset(basepath)
elif name in whitelist or name in lid:
# is the name whitelisted or contains part of the lid?
fs = find_fileset(os.path.join(dirpath,name), lid, whitelist=whitelist, blacklist=blacklist)
if fs is not None:
return fs
# not found
return None
class DataDirectory(object):
"""
Represents a directory containing IFCB raw data.
Provides a dict-like interface allowing access to FilesetBins by LID.
"""
def __init__(self, path='.', whitelist=DEFAULT_WHITELIST, blacklist=DEFAULT_BLACKLIST, filter=lambda x: True):
"""
:param path: the path of the data directory
:param whitelist: a list of directory names to allow
:param blacklist: a list of directory names to disallow
"""
self.path = path
self.whitelist = whitelist
self.blacklist = blacklist
self.filter = filter
def list_filesets(self):
"""
Yield all filesets.
"""
for dirpath, basename in list_filesets(self.path, whitelist=self.whitelist, blacklist=self.blacklist):
basepath = os.path.join(dirpath, basename)
fs = Fileset(basepath)
if self.filter(fs):
yield fs
def find_fileset(self, lid):
"""
Locate a fileset by LID. Returns None if it is not found.
:param lid: the LID to search for
:type lid: str
:returns Fileset: the fileset, or None if not found
"""
fs = find_fileset(self.path, lid, whitelist=self.whitelist, blacklist=self.blacklist)
if fs is None:
return None
elif self.filter(fs):
return fs
def __iter__(self):
# yield from list_filesets called with no keyword args
for fs in self.list_filesets():
yield FilesetBin(fs)
def has_key(self, lid):
# fast contains method that avoids iteration
return self.find_fileset(lid) is not None
def __getitem__(self, lid):
fs = self.find_fileset(lid)
if fs is None:
raise KeyError('No fileset for %s found at or under %s' % (lid, self.path))
return FilesetBin(fs)
def __len__(self):
"""warning: for large datasets, this is very slow"""
return sum(1 for _ in self)
# subdirectories
def list_descendants(self, **kw):
"""
Find all 'leaf' data directories and yield ``DataDirectory``
objects for each one. Note that this enforces blacklisting
but not whitelisting (no fileset path validation is done).
Accepts ``list_data_dirs`` keywords, except ``blacklist`` which
takes on the value given in the constructor.
"""
for dd in list_data_dirs(self.path, blacklist=self.blacklist, **kw):
yield DataDirectory(dd)
def __repr__(self):
return '<DataDirectory %s>' % self.path
def __str__(self):
return self.path
# filters for DataDirectory
def time_filter(start='1970-01-01', end='3000-01-01'):
start = pd.to_datetime(start, utc=True)
end = pd.to_datetime(end, utc=True)
def inner(fs):
ts = fs.pid.timestamp
return ts >= start and ts < end
return inner
``` |
{
"source": "JoeGakumo21/Instagram",
"score": 2
} |
#### File: Instagram/myinstagram/views.py
```python
from django.http.response import HttpResponseRedirect
from django.shortcuts import render,redirect
from .models import Post,Comment,Like
from .forms import CreateUserForm,PostForm,CommentForm
from django.contrib.auth.forms import UserCreationForm
from django.utils import timezone
from datetime import datetime
from django.contrib import messages
from django.contrib.auth import authenticate,login,logout
def post(request):
posts = Post.objects.all().filter(created_date__lte = timezone.now()).order_by('-created_date')
user = request.user
return render(request,'all-in-one/post.html',{'posts':posts,'user':user})
# login code goes here
def loginPage(request):
# form= UserCreationForm()
if request.method == "POST":
username=request.POST.get('username')
password=<PASSWORD>('password')
user=authenticate(request,username=username, password=password )
if user is not None:
login(request, user)
return redirect('post')
else:
messages.warning(request,'Username Or Password is incorrect')
context={}
return render(request,'all-in-one/login.html', context)
def logoutUser(request):
logout(request)
return redirect('login')
def registerPage(request):
form= CreateUserForm()
if request.method=='POST':
form=UserCreationForm(request.POST)
if form.is_valid():
form.save()
holdeofaccount=form.cleaned_data.get('username')
messages.success(request,'Account was created for'+ holdeofaccount)
return redirect('login')
context={'form':form}
return render(request,'all-in-one/register.html', context)
def create_post(request):
current_user = request.user
if request.method == "POST":
form = PostForm(request.POST,request.FILES)
if form.is_valid:
post = form.save(commit= False)
post.author = current_user
post.save()
return redirect('post')
else:
form = PostForm()
return render(request,'all-in-one/newpost.html',{'form':form})
#likes goes here
def like_post(request):
user = request.user
if request.method == 'POST':
post_id = request.POST.get('post_id')
joepost = Post.objects.get(id= post_id)
if user in joepost.liked.all():
joepost.liked.remove(user)
else:
joepost.liked.add(user)
like, created = Like.objects.get_or_create(user=user, post_id = post_id)
if not created:
if like.value == 'Like':
like.value = 'Unlike'
else:
like.value = 'Like'
like.save()
return redirect('post')
# comments
def add_comment(request,pk):
post = Post.objects.get(pk = pk)
form = CommentForm(request.POST,instance=post)
if request.method == "POST":
if form.is_valid():
name = request.user.username
body = form.cleaned_data['body']
comment_content = Comment(post=post,name=name ,body=body,date=datetime.now())
comment_content.save()
return redirect('post')
else:
print('form is invalid')
else:
form = CommentForm
context = {
'form':form
}
return render(request,'all-in-one/comments.html',context)
def search_results(request):
if 'author' in request.GET and request.GET["author"]:
search_term = request.GET.get("author")
searched_articles = Post.search_category(search_term)
message = f"{search_term}"
return render(request, 'search.html',{"message":message,"categories": searched_articles})
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message})
``` |
{
"source": "JoeGakumo21/Newspaper-App-Flask",
"score": 3
} |
#### File: Newspaper-App-Flask/app/news.py
```python
# class Allarticles:
# '''
# method to get all the articles
# '''
# def __init__(self,author,title,description,url,urlToImage,publisherAt,content):
# '''
# method to display the objects
# '''
# self.author=author
# self.title=title
# self.description=description
# self.url=url
# self.urlToImage=urlToImage
# self.publisherAt=publisherAt
# self.content=content
```
#### File: app/tests/news_test.py
```python
import unittest
from app.model import News
# News =news.News
class NewsTest(unittest.TestCase):
'''
method to test the news class
'''
def setUp(self):
'''
setup that run before any other method to test the news class
'''
self.new_news=News("12,MacRumors,Deals: Get the Lowest Price on the 2021 32GB Apple TV 4K, Available for $169 ($10 Off),https://www.macrumors.com/2021/09/08/dutch-bank-de-volksbank-apple-pay-support/,https://images.macrumors.com/t/tzJhmO9DcRnY3jtlKgvHUk2kZfQ=/2500x/https://images.macrumors.com/article-new/2021/04/apple-tv-4k-design-clue.jpg,2021-09-08T17:29:02Z,Today we're tracking the best discount seen to date on the 2021 Apple TV 4K, which provides up to $10 in savings on the 32GB and 64GB models. Starting with the 32GB Apple TV 4K, you can get this mode… [+838 chars]",)
def test_instance(self):
'''
test to check for the instances when created
'''
self.assertTrue(isinstance(self.new_news,News))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JoeGakumo21/Password-Locker",
"score": 4
} |
#### File: Password-Locker/Users/user.py
```python
from .userDetails import read_from_file, write_to_file
data_file = "users.csv"
class User:
'''
A user class initialization
'''
users= []
def __init__(self,username, password) :
"""
Args:
Username(string)
password(string)
"""
self.username=username
self.password=password
#method to save user details
def save_user(self):
'''
saving Users
'''
write_to_file(data_file, self)
User.users.append(self)
#method to display username in string
def __repr__(self) -> str:
return f"{self.username}"
@classmethod
def show_user(cls):
return cls.users
def find_by_username(cls,username):
'''
method to find the user using username
'''
for user in cls.users:
if user.username==username:
return user
def find_user(cls):
'''
method to find user in the list
'''
return cls.users
def delete_user(cls,username):
'''
method to delete an existing user
'''
for user in cls.users:
if username==username:
cls.users.remove(user)
return True
return False
def del_user(self):
'''
method to delete a single user
'''
User.users.remove(self)
return True
``` |
{
"source": "JoeGakumo21/updateNeighbours",
"score": 3
} |
#### File: updateNeighbours/main/test.py
```python
from django.test import TestCase
from .models import NeighbourHood, Business
# Create your tests here.
class NeighbourhoodTestClass(TestCase):
# Set up method
def setUp(self):
self.james= NeighbourHood(first_name = 'James', last_name ='Muriuki', email ='<EMAIL>')
def test_instance(self):
self.assertTrue(isinstance(self.james,NeighbourHood))
def test_save_method(self):
self.james.save_post()
editors = NeighbourHood.objects.all()
self.assertTrue(len(editors) > 0)
class NewsDetailTestClass(TestCase):
def setUp(self):
# Creating a new neighbourhood and saving it
self.james= NeighbourHood(first_name = 'James', last_name ='Muriuki', email ='<EMAIL>')
self.james.save_post()
# adding a bussiness and saving it
self.new_tag = Business(name = 'testing')
self.new_tag.save()
self.new_article= Business(title = 'Test Article',post = 'This is a random test Post',editor = self.james)
self.new_article.save()
self.new_article.author.add(self.new_tag)
def tearDown(self):
Business.objects.all().delete()
Business.objects.all().delete()
Business.objects.all().delete()
``` |
{
"source": "joegasewicz/api-tools",
"score": 2
} |
#### File: api-tools/api_tools/response.py
```python
class Response:
"""
Handles the request headers
"""
def send(self, request, response):
request.sendall(response)
class ResponseMixin:
response_tool = Response()
```
#### File: api-tools/api_tools/router.py
```python
class NoRoutesFoundError(Exception):
pass
class Routes:
routes = []
def add_routes(self, routes):
self.routes = routes
def handle(self, request):
if not self.routes:
raise NoRoutesFoundError("No Routes found!")
for method, route, handler in self.routes:
# check handler is a function or throw
method = request.method
if method == request.method:
handler_data = handler(request)
return handler_data
class RouterMixin:
router_tool = Routes()
``` |
{
"source": "joegasewicz/pymail-io",
"score": 2
} |
#### File: pymail-io/pymail_io/pymail_io.py
```python
from abc import ABC, abstractmethod
from typing import Dict, Any, Awaitable, Union, List
from pytask_io import PyTaskIO
from pymail_io.email import Email
from pymail_io.callables import unit_of_work_callable, send_email_with_async
class AbstractPyMailIO(ABC):
@abstractmethod
def send_email(self, *, subject: str, body: str, receiver_email: Union[str, List[str]]) -> Dict[str, Any]:
pass
class PyMailIO:
"""
:kwargs:
:key password: <PASSWORD>.
:key receiver_email: This can be either a string or a list of email addresses.
:key sender_email: The senders email address.
:key db: The Redis store database name.
:key workers: The number of workers created to run tasks in the queue.
:key email_host: The email server host.
:key email_port: The email server SSL or TLS port.
"""
#: PyMailIO iss a python library built on CPython's AsyncIO library.
#: The entree to asyncio is via `PyTaskIO <https://github.com/joegasewicz/pytask-io>`_ which is
#: an asynchronous task queue library that runs an event loop in a background thread.
#:
#: Setting up the library for debugging. Example::
#:
#: export PYTASKIO_DEBUG=1
#:
#:
#: The senders email password.
password: str
#: The senders email address.
sender_email: str
#: The email server host.
email_host: str
email: Email
#: Accesses the `PyTaskIO <https://github.com/joegasewicz/pytask-io>`_ task queue library
queue: PyTaskIO = None
_SMPT_SSL_PORT = 465
_START_TLS_PORT = 587
def __init__(self, *args, **kwargs):
self.password = kwargs.get("password")
self.receiver_email = kwargs.get("receiver_email")
self.sender_email = kwargs.get("sender_email")
self.db = kwargs.get("db")
self.workers = kwargs.get("workers")
self.email_host = kwargs.get("email_host")
self.email_port = kwargs.get("email_port") or self._SMPT_SSL_PORT
def send_email_sync(self, subject: str, body: str):
"""
:param subject:
:param body:
:return:
"""
return self.email.send_sync_email(unit_of_work_callable, [subject, body])
async def send_email_async(self, subject: str, body: str) -> Awaitable[Dict[str, Any]]:
"""
:param subject:
:param body:
:return:
"""
return await self.email.send_async_email(send_email_with_async, [subject, body])
def add_email_to_queue(self, subject: str, body: str, receiver_email: Union[str, List[str]]):
"""
:param subject:
:param body:
:return:
"""
return self.email.add_email_to_task_queue(receiver_email, unit_of_work_callable, [subject, body])
``` |
{
"source": "joegasewicz/slack-messages",
"score": 3
} |
#### File: slack-messages/slack_messages/slack.py
```python
from typing import Dict
import requests
class ErrorFetchingChannels(Exception):
pass
class ErrorPostingMessage(Exception):
pass
class SlackMessages:
auth_headers: Dict
post_message_url = "https://slack.com/api/chat.postMessage"
conversations_list_url = "https://slack.com/api/conversations.list"
def __init__(self, token: str):
self.token = token
self.auth_headers = {"Authorization": f"Bearer {self.token}"}
def send_message(self, *, channel_name: str, message: str) -> Dict:
channel_id = self._get_channel_id(channel_name)
r = requests.post(
self.post_message_url,
headers={
"Content-Type": "application/json",
**self.auth_headers,
},
json={
"channel": channel_id,
"text": message,
}
)
data = r.json()
if not data["ok"]:
raise ErrorPostingMessage(f"There was an error posting slack message: {data['error']}")
return data
def _get_channel_id(self, channel_name: str) -> str:
r = requests.get(
self.conversations_list_url,
headers=self.auth_headers,
)
channels = r.json()["channels"]
for c in channels:
if c["name"] == channel_name:
return c["id"]
raise ErrorFetchingChannels("Error: Could not get a list of channels from your slack workspace.")
``` |
{
"source": "joegaspari/Misinformation_tiktok",
"score": 3
} |
#### File: Misinformation_tiktok/Code/main.py
```python
from TikTokApi import TikTokApi
import csv
# open the file in the write mode
f = open('C:/Users/Patri/Documents/test.csv', 'w', encoding='utf-8')
#attributes
fieldnames = ['index', 'id', 'nickname', 'desc','commentCount','playCount', 'shareCount']
# create the csv writer
writer = csv.DictWriter(f, fieldnames = fieldnames)
writer.writeheader()
i = 0
#s_v_web_id for access
veritfyFp = "verify_kvztrcrc_t2PQlzpn_sWrc_4JDY_9e4x_u45vZqqb2MhC"
def results(tiktoks, i):
for tiktok in tiktoks:
# write a row to the csv file
writer.writerow({'index' : str(i), 'id': str(tiktok['id']), 'nickname': tiktok['author']['nickname'], 'desc': tiktok['desc'] ,'commentCount':str(tiktok['stats']['commentCount']),'playCount' : str(tiktok['stats']['playCount']), 'shareCount': str(tiktok['stats']['shareCount'])})
i = i + 1
def hashtag():
api = TikTokApi.get_instance(custom_verifyFp=veritfyFp, use_test_endpoints = True)
#how many tiktoks get returned per hashtag
count = 200
#NEEDS FIXING, should loop through list instead of individual instances
set = {
"Antivaxxer",
"governmentconspiracy",
"antivax",
"vaccine",
"coronavirus"
}
#select which hashtag gets returned
tiktoks = api.by_hashtag("Antivaxxer", count=count)
#Write results
results(tiktoks, i)
tiktoks = api.by_hashtag("governmentconspiracy", count=count)
results(tiktoks, i + count)
tiktoks = api.by_hashtag("antivaxx", count=count)
results(tiktoks, i + count * 2)
tiktoks = api.by_hashtag("vaccine", count=count)
results(tiktoks, i + count * 3)
tiktoks = api.by_hashtag("coronavirus", count=count)
results(tiktoks, i + count * 4)
hashtag()
f.close()
``` |
{
"source": "joegeisz/pylith",
"score": 2
} |
#### File: applications/utilities/powerlaw_gendb.py
```python
import numpy
import math
from pyre.applications.Script import Script as Application
class PowerLawApp(Application):
"""
Python application to compute power-law parameters used by PyLith,
given input spatial databases describing the temperature and the
laboratory-derived properties for the various materials. The output is
another spatial database containing the power-law parameters for PyLith.
"""
## \b Properties
## @li \b reference_value Indicates whether reference stress or
## reference strain rate is provided as input.
## @li \b reference_stress Value for reference stress.
## @li \b reference_strain_rate Value for reference strain rate.
##
## \b Facilities
## @li \b db_exponent Spatial db for power-law exponent, n.
## @li \b db_activation_energy Spatial db for activation energy, Q.
## @li \b db_temperature Spatial db for temperature, T.
## @li \b db_powerlaw_coefficient Spatial db for power-law coefficient, Ae.
## @li \b geometry Geometry for output database.
## @li \b iohandler Object for writing database.
import pyre.inventory
from pyre.units.pressure import MPa
from pyre.units.time import s
refSelection = pyre.inventory.str("reference_value",
default="strain_rate",
validator=pyre.inventory.choice(['stress',
'strain_rate']))
refSelection.meta['tip'] = "Indicates whether reference stress or " \
"reference strain rate is provided as input."
refStress = pyre.inventory.dimensional("reference_stress", default=1.0*MPa)
refStress.meta['tip'] = "Reference stress value."
refStrainRate = pyre.inventory.dimensional("reference_strain_rate",
default=1.0e-6/s)
refStrainRate.meta['tip'] = "Reference strain rate value."
from spatialdata.spatialdb.SimpleDB import SimpleDB
dbExponent = pyre.inventory.facility("db_exponent",
family="spatial_database",
factory=SimpleDB)
dbExponent.meta['tip'] = "Spatial db for power-law exponent, n."
dbActivationE = pyre.inventory.facility("db_activation_energy",
family="spatial_database",
factory=SimpleDB)
dbActivationE.meta['tip'] = "Spatial db for activation energy, Q."
dbTemperature = pyre.inventory.facility("db_temperature",
family="spatial_database",
factory=SimpleDB)
dbTemperature.meta['tip'] = "Spatial db for temperature, T."
dbAe = pyre.inventory.facility("db_powerlaw_coefficient",
family="spatial_database",
factory=SimpleDB)
dbAe.meta['tip'] = "Spatial db for power-law coefficient, Ae."
from spatialdata.spatialdb.generator.Geometry import Geometry
geometry = pyre.inventory.facility("geometry", family="geometry",
factory=Geometry)
geometry.meta['tip'] = "Geometry for output database."
from spatialdata.spatialdb.SimpleIOAscii import SimpleIOAscii
iohandler = pyre.inventory.facility("iohandler", family="simpledb_io",
factory=SimpleIOAscii)
iohandler.meta['tip'] = "Object for writing database."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="powerlaw_gendb"):
Application.__init__(self, name)
return
def main(self, *args, **kwds):
"""
Application driver.
"""
# Get output points
self._info.log("Reading geometry.")
self.geometry.read()
points = self.geometry.vertices
coordsys = self.geometry.coordsys
(npoints, spaceDim) = points.shape
refStrainRate = numpy.zeros( (npoints,), dtype=numpy.float64)
refStress = numpy.zeros( (npoints,), dtype=numpy.float64)
# Query databases to get inputs at output points
self._info.log("Querying for parameters at output points.")
n = self._queryDB(self.dbExponent, "power-law-exponent", points, coordsys)
Q = self._queryDB(self.dbActivationE, "activation-energy", points, coordsys)
logAe = self._queryDB(self.dbAe, "log-flow-constant", points, coordsys)
scaleAe = self._queryDB(self.dbAe, "flow-constant-scale", points, coordsys)
T = self._queryDB(self.dbTemperature, "temperature", points, coordsys)
# Compute power-law parameters
self._info.log("Computing parameters at output points.")
from pyre.handbook.constants.fundamental import R
Ae = 10**(logAe - scaleAe * n)
At = 3**(0.5*(n+1))/2.0 * Ae * numpy.exp(-Q/(R.value*T))
if self.refSelection == "stress":
refStress[:] = self.refStress.value
refStrainRate = self.refStress.value**n / At
elif self.refSelection == "strain_rate":
refStrainRate[:] = self.refStrainRate.value
refStress = (self.refStrainRate.value / At)**(1.0/n)
else:
raise ValueError("Invalid value (%s) for reference value." % \
self.refSelection)
refStressInfo = {'name': "reference-stress",
'units': "Pa",
'data': refStress.flatten()}
refStrainRateInfo = {'name': "reference-strain-rate",
'units': "1/s",
'data': refStrainRate.flatten()}
exponentInfo = {'name': "power-law-exponent",
'units': "none",
'data': n.flatten()}
# Write database
self._info.log("Writing database.")
data = {'points': points,
'coordsys': coordsys,
'data_dim': self.geometry.dataDim,
'values': [refStressInfo, refStrainRateInfo, exponentInfo]}
self.iohandler.write(data)
return
def _queryDB(self, db, valueName, points, cs):
"""
Query spatial database
"""
(npoints, spaceDim) = points.shape
data = numpy.zeros( (npoints,1), dtype=numpy.float64)
err = numpy.zeros( (npoints,), dtype=numpy.int32)
db.open()
db.queryVals([valueName])
db.multiquery(data, err, points, cs)
db.close()
errSum = numpy.sum(err)
if errSum > 0:
msg = "Query for %s failed at %d points.\n" \
"Coordinates of points:\n" % (valueName, errSum)
msg += "%s" % points[err,:]
raise ValueError(msg)
return data
# ----------------------------------------------------------------------
if __name__ == '__main__':
app = PowerLawApp()
app.run()
# End of file
```
#### File: pylith/bc/ZeroDispDB.py
```python
from spatialdata.spatialdb.UniformDB import UniformDB
# ZeroDispDB class
class ZeroDispDB(UniformDB):
"""
Python object for spatial database with uniform zero displacements
for degrees of freedom.
Factory: spatial_database
"""
# INVENTORY //////////////////////////////////////////////////////////
class Inventory(UniformDB.Inventory):
"""
Python object for managing ZeroDispDB facilities and properties.
"""
## @class Inventory
## Python object for managing ZeroDispDB facilities and properties.
##
## \b Properties
## @li None
##
## \b Facilities
## @li none
import pyre.inventory
from pyre.units.length import m
values = ["displacement-x", "displacement-y", "displacement-z"]
data = [0.0*m, 0.0*m, 0.0*m]
label = pyre.inventory.str("label", default="Zero displacement BC.")
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="zerodispdb"):
"""
Constructor.
"""
UniformDB.__init__(self, name)
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Set members based on inventory.
"""
UniformDB._configure(self)
return
# FACTORIES ////////////////////////////////////////////////////////////
def spatial_database():
"""
Factory associated with ZeroDispDB.
"""
return ZeroDispDB()
# End of file
```
#### File: pylith/meshio/DataWriter.py
```python
from pylith.utils.PetscComponent import PetscComponent
# DataWriter class
class DataWriter(PetscComponent):
"""
Python abstract base class for writing finite-element data.
"""
# INVENTORY //////////////////////////////////////////////////////////
# None
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="datawriter"):
"""
Constructor.
"""
PetscComponent.__init__(self, name, facility="datawriter")
return
def verifyConfiguration(self):
"""
Verify compatibility of configuration.
"""
return
def initialize(self, normalizer, filename):
"""
Initialize writer.
"""
import os
relpath = os.path.dirname(filename)
if len(relpath) > 0 and not os.path.exists(relpath):
# Only create directory on proc 0
from pylith.mpi.Communicator import mpi_comm_world
comm = mpi_comm_world()
if not comm.rank:
os.makedirs(relpath)
return
# End of file
```
#### File: pylith/meshio/OutputSolnPoints.py
```python
from OutputManager import OutputManager
from meshio import OutputSolnPoints as ModuleOutputSolnPoints
# Validator for filename
def validateFilename(value):
"""
Validate filename with list of points.
"""
if 0 == len(value):
raise ValueError("Filename for list of points not specified.")
return value
# OutputSolnPoints class
class OutputSolnPoints(OutputManager, ModuleOutputSolnPoints):
"""
Python object for managing output of finite-element solution
information over a subdomain.
@class Inventory
Python object for managing OutputSolnPoints facilities and properties.
\b Properties
@li \b vertex_data_fields Names of vertex data fields to output.
\b Facilities
@li \b reader Reader for list of points.
Factory: output_manager
"""
# INVENTORY //////////////////////////////////////////////////////////
import pyre.inventory
vertexDataFields = pyre.inventory.list("vertex_data_fields", default=["displacement"])
vertexDataFields.meta['tip'] = "Names of vertex data fields to output."
from PointsList import PointsList
reader = pyre.inventory.facility("reader", factory=PointsList, family="points_list")
reader.meta['tip'] = "Reader for points list."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="outputsolnpoints"):
"""
Constructor.
"""
OutputManager.__init__(self, name)
self.availableFields = \
{'vertex':
{'info': [],
'data': ["displacement", "velocity"]},
'cell':
{'info': [],
'data': []}}
return
def preinitialize(self):
"""
Do
"""
OutputManager.preinitialize(self, dataProvider=self)
return
def initialize(self, mesh, normalizer):
"""
Initialize output manager.
"""
logEvent = "%sinit" % self._loggingPrefix
self._eventLogger.eventBegin(logEvent)
OutputManager.initialize(self, normalizer)
# Read points
stations, points = self.reader.read()
# Convert to mesh coordinate system
from spatialdata.geocoords.Converter import convert
convert(points, mesh.coordsys(), self.coordsys)
ModuleOutputSolnPoints.setupInterpolator(self, mesh, points, stations, normalizer)
self.mesh = ModuleOutputSolnPoints.pointsMesh(self)
self._eventLogger.eventEnd(logEvent)
return
def getDataMesh(self):
"""
Get mesh associated with data fields.
"""
return (self.mesh, None, None)
def getVertexField(self, name, fields):
"""
Get vertex field.
"""
field = None
fieldType = None
if name == "displacement":
field = fields.get("disp(t)")
elif name == "velocity":
field = fields.get("velocity(t)")
else:
raise ValueError, "Vertex field '%s' not available." % name
return field
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Set members based using inventory.
"""
try:
OutputManager._configure(self)
except ValueError, err:
aliases = ", ".join(self.aliases)
raise ValueError("Error while configuring output over points "
"(%s):\n%s" % (aliases, err.message))
return
def _createModuleObj(self):
"""
Create handle to C++ object.
"""
ModuleOutputSolnPoints.__init__(self)
return
def _open(self, mesh, nsteps, label, labelId):
"""
Call C++ open();
"""
if label != None and labelId != None:
ModuleOutputSolnPoints.open(self, mesh, nsteps, label, labelId)
else:
ModuleOutputSolnPoints.open(self, mesh, nsteps)
ModuleOutputSolnPoints.writePointNames(self)
return
# FACTORIES ////////////////////////////////////////////////////////////
def output_manager():
"""
Factory associated with OutputManager.
"""
return OutputSolnPoints()
# End of file
```
#### File: pylith/meshio/OutputSolnSubset.py
```python
from OutputManager import OutputManager
from meshio import OutputSolnSubset as ModuleOutputSolnSubset
# Validator for label
def validateLabel(value):
"""
Validate label for group/nodeset/pset.
"""
if 0 == len(value):
raise ValueError("Label for group/nodeset/pset in mesh not specified.")
return value
# OutputSolnSubset class
class OutputSolnSubset(OutputManager, ModuleOutputSolnSubset):
"""
Python object for managing output of finite-element solution
information over a subdomain.
@class Inventory
Python object for managing OutputSolnSubset facilities and properties.
\b Properties
@li \b vertex_data_fields Names of vertex data fields to output.
@li \b label Name identifier for subdomain.
\b Facilities
@li \b writer Writer for data.
Factory: output_manager
"""
# INVENTORY //////////////////////////////////////////////////////////
import pyre.inventory
vertexDataFields = pyre.inventory.list("vertex_data_fields",
default=["displacement"])
vertexDataFields.meta['tip'] = "Names of vertex data fields to output."
label = pyre.inventory.str("label", default="", validator=validateLabel)
label.meta['tip'] = "Label identifier for subdomain."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="outputsolnsubset"):
"""
Constructor.
"""
OutputManager.__init__(self, name)
self.availableFields = \
{'vertex': \
{'info': [],
'data': ["displacement","velocity"]},
'cell': \
{'info': [],
'data': []}}
return
def preinitialize(self):
"""
Do
"""
OutputManager.preinitialize(self, dataProvider=self)
return
def verifyConfiguration(self, mesh):
"""
Verify compatibility of configuration.
"""
OutputManager.verifyConfiguration(self, mesh)
ModuleOutputSolnSubset.verifyConfiguration(self, mesh)
return
def initialize(self, mesh, normalizer):
"""
Initialize output manager.
"""
logEvent = "%sinit" % self._loggingPrefix
self._eventLogger.eventBegin(logEvent)
self.submesh = self.subdomainMesh(mesh)
OutputManager.initialize(self, normalizer)
self._eventLogger.eventEnd(logEvent)
return
def getDataMesh(self):
"""
Get mesh associated with data fields.
"""
return (self.submesh, None, None)
def getVertexField(self, name, fields):
"""
Get vertex field.
"""
# :TODO: Clean this up for multiple fields
buffer = None
if name == "displacement":
field = fields.get("disp(t)")
if not fields.hasField("buffer (vector)"):
fields.add("buffer (vector)", "buffer")
buffer = fields.get("buffer (vector)")
buffer.copySubfield(field, "displacement")
elif name == "velocity":
field = fields.get("velocity(t)")
if not fields.hasField("buffer (vector)"):
fields.add("buffer (vector)", "buffer")
buffer = fields.get("buffer (vector)")
buffer.copySubfield(field, "displacement")
buffer.label(field.label()) # :KLUDGE: Fix for multiple fields
buffer.scale(field.scale()) # :KLUDGE: Fix for multiple fields
else:
raise ValueError, "Vertex field '%s' not available." % name
buffer.dimensionalizeOkay(True)
return buffer
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Set members based using inventory.
"""
try:
OutputManager._configure(self)
ModuleOutputSolnSubset.label(self, self.label)
except ValueError, err:
aliases = ", ".join(self.aliases)
raise ValueError("Error while configuring output over boundary "
"(%s):\n%s" % (aliases, err.message))
return
def _createModuleObj(self):
"""
Create handle to C++ object.
"""
ModuleOutputSolnSubset.__init__(self)
return
# FACTORIES ////////////////////////////////////////////////////////////
def output_manager():
"""
Factory associated with OutputManager.
"""
return OutputSolnSubset()
# End of file
```
#### File: pylith/meshio/SingleOutput.py
```python
from pylith.utils.PetscComponent import PetscComponent
# SingleOutput class
class SingleOutput(PetscComponent):
"""
Python container with one output manager.
Factory: object_bin
"""
# INVENTORY //////////////////////////////////////////////////////////
class Inventory(PetscComponent.Inventory):
"""
Python object for managing SingleOutput facilities and properties.
"""
## @class Inventory
## Python object for managing SingleOutput facilities and properties.
##
## \b Properties
## @li None
##
## \b Facilities
## @li \b output Output manager
import pyre.inventory
from OutputSoln import OutputSoln
output = pyre.inventory.facility("output", family="output_manager",
factory=OutputSoln)
output.meta['tip'] = "Output manager."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="singleoutput"):
"""
Constructor.
"""
PetscComponent.__init__(self, name, facility="output")
return
# End of file
```
#### File: pylith/perf/VertexGroup.py
```python
from Memory import Memory
class VertexGroup(Memory):
"""
Mesh object for holding vertex group memory and performance information.
"""
def __init__(self, label = '', numVertices = 0, numMeshVertices = 0):
"""
Constructor.
"""
self.label = label
self.nvertices = numVertices
self.nMeshVertices = numMeshVertices
return
def tabulate(self, memDict):
"""
Tabulate memory use.
"""
# Here we have data + atlas (could use uniform) + bc (use Section)
if not self.label in memDict:
memDict[self.label] = 0
memDict[self.label] += (self.sizeInt * self.nvertices) + (2 * self.sizeInt * self.nMeshVertices) + (2 * self.sizeInt * self.nMeshVertices)
return
if __name__ == '__main__':
d = {}
VertexGroup('rock', 35).tabulate(d)
print 'Memory:',d
# End of file
```
#### File: pylith/topology/MeshRefiner.py
```python
from pylith.utils.PetscComponent import PetscComponent
# MeshRefiner class
class MeshRefiner(PetscComponent):
"""
Python manager for refining mesh in parallel.
Factory: mesh_refiner
"""
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="refiner"):
"""
Constructor.
"""
PetscComponent.__init__(self, name, facility="refiner")
return
def refine(self, mesh):
"""
Refine mesh.
"""
self._setupLogging()
logEvent = "%srefine" % self._loggingPrefix
self._eventLogger.eventBegin(logEvent)
self._eventLogger.eventEnd(logEvent)
return mesh
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Set members based using inventory.
"""
PetscComponent._configure(self)
return
def _setupLogging(self):
"""
Setup event logging.
"""
self._loggingPrefix = "Refin "
from pylith.utils.EventLogger import EventLogger
logger = EventLogger()
logger.className("FE Refinement")
logger.initialize()
events = ["refine"]
for event in events:
logger.registerEvent("%s%s" % (self._loggingPrefix, event))
self._eventLogger = logger
return
# FACTORIES ////////////////////////////////////////////////////////////
def mesh_refiner():
"""
Factory associated with MeshRefiner.
"""
return MeshRefiner()
# End of file
```
#### File: pylith/utils/CollectVersionInfo.py
```python
from pyre.components.Component import Component
import pylith.utils.utils as utils
import platform
import sys
# CollectVersionInfo class
class CollectVersionInfo(Component):
"""
Python CollectVersionInfo object to collect version information for PyLith
and its dependencies.
"""
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self):
"""
Constructor.
"""
Component.__init__(self, name="collectversioninfo", facility="collectversioninfo")
return
@classmethod
def asString(cls):
info = cls._collect()
s = "Platform:\n" \
" Hostname: %(hostname)s\n" \
" Operating system: %(os)s\n" \
" Kernel: %(kernel)s\n" \
" Version: %(version)s\n" \
" Machine: %(machine)s\n" \
" Processor: %(processor)s\n" \
% info["platform"]
version = info["version"]
# PyLith
s += "\nPyLith\n"
if version["pylith"]["isRelease"]:
s += " Release v%(version)s\n" % version["pylith"]
else:
s += " Configured on %(gitDate)s, GIT branch: %(gitBranch)s, revision: %(gitRevision)s, hash: %(gitHash)s\n" % version["pylith"]
# PETSc
s += "\nPETSc\n"
if version["petsc"]["isRelease"]:
s += " Release v%(version)s\n" % version["petsc"]
else:
s += " Configured on %(gitDate)s, GIT branch: %(gitBranch)s, revision: %(gitRevision)s\n" % version["petsc"]
# Spatialdata
s += "\nSpatialdata\n"
if version["spatialdata"]["isRelease"]:
s += " Release v%(version)s\n" % version["spatialdata"]
else:
s += " Configured on %(gitDate)s, GIT branch: %(gitBranch)s, revision: %(gitRevision)s, hash: %(gitHash)s\n" % version["spatialdata"]
# MPI
s += "\nMPI standard: %(standard)s, implementation: %(implementation)s, version: %(version)s\n" % version["mpi"]
# HDF5
s += "HDF5 version: %(version)s\n" % version["hdf5"]
# NetCDF
s += "NetCDF version: %(version)s\n" % version["netcdf"]
# Proj
s += "Proj version: %(version)s\n" % version["proj"]
# Python
s += "\nPython\n" \
" v%(version)s of %(implementation)s compiled with %(compiler)s\n" % version["python"]
for (pname, pver) in version["python"]["modules"].items():
s += " %s: v%s from %s\n" % (pname, pver["version"], pver["location"])
return s
@classmethod
def asDict(cls):
info = cls._collect()
return info
# PRIVATE METHODS ////////////////////////////////////////////////////
@classmethod
def _collect(cls):
"""
Collect version infoformation.
"""
info = {
"platform": cls._collectPlatform(),
"version": cls._collectVersion(),
}
return info
@classmethod
def _collectPlatform(cls):
(os, hostname, kernel, version, machine, processor) = platform.uname()
info = {
"hostname": hostname,
"os": os,
"kernel": kernel,
"version": version,
"machine": machine,
"processor": processor,
}
return info
@classmethod
def _collectVersion(cls):
info = {
"pylith": cls._collectVersionPyLith(),
"python": cls._collectVersionPython(),
"petsc": cls._collectVersionPetsc(),
"mpi": cls._collectVersionMPI(),
"hdf5": cls._collectVersionHDF5(),
"netcdf": cls._collectVersionNetCDF(),
"spatialdata": cls._collectVersionSpatialdata(),
"proj": cls._collectVersionProj(),
}
return info
@staticmethod
def _collectVersionPyLith():
v = utils.PylithVersion()
if v.isRelease():
info = {
"isRelease": True,
"version": v.version()
}
else:
info = {
"isRelease": False,
"gitDate": v.gitDate(),
"gitBranch": v.gitBranch(),
"gitRevision": v.gitRevision(),
"gitHash": v.gitHash(),
}
return info
@staticmethod
def _collectVersionSpatialdata():
import spatialdata.utils.utils as utils
v = utils.SpatialdataVersion()
if v.isRelease():
info = {
"isRelease": True,
"version": v.version()
}
else:
info = {
"isRelease": False,
"gitDate": v.gitDate(),
"gitBranch": v.gitBranch(),
"gitRevision": v.gitRevision(),
"gitHash": v.gitHash(),
}
return info
@staticmethod
def _collectVersionPetsc():
v = utils.PetscVersion()
if v.isRelease():
info = {
"isRelease": True,
"version": v.version()
}
else:
info = {
"isRelease": False,
"gitDate": v.gitDate(),
"gitBranch": v.gitBranch(),
"gitRevision": v.gitRevision(),
}
info["petscDir"] = v.petscDir()
info["petscArch"] = v.petscArch()
return info
@classmethod
def _collectVersionPython(cls):
info = {
"version": platform.python_version(),
"implementation": platform.python_implementation(),
"compiler": platform.python_compiler(),
"modules": {},
}
pkgs = ("numpy", "spatialdata", "FIAT", "h5py", "netCDF4", "pyre")
for pkg in pkgs:
ver, loc = cls._getPackageVersion(pkg)
info["modules"][pkg] = {
"version": ver,
"location": loc,
}
return info
@staticmethod
def _collectVersionMPI():
v = utils.DependenciesVersion()
info = {
"standard": v.mpiStandard(),
"implementation": v.mpiImplementation(),
"version": v.mpiVersion(),
}
return info
@staticmethod
def _collectVersionHDF5():
v = utils.DependenciesVersion()
info = {
"version": v.hdf5Version(),
}
return info
@staticmethod
def _collectVersionNetCDF():
v = utils.DependenciesVersion()
info = {
"version": v.netcdfVersion(),
}
return info
@staticmethod
def _collectVersionProj():
import spatialdata.utils.utils as utils
v = utils.SpatialdataVersion()
info = {
"version": v.projVersion(),
}
return info
@staticmethod
def _getPackageVersion(name):
import os
m = None
location = None
version = None
try:
m = __import__(name)
location = os.path.split(m.__file__)[0]
version = m.__version__
except ImportError:
version = "not found"
location = "--"
except AttributeError:
if version is None:
version = "unknown"
if location is None:
location = "unknown"
return (version, location)
# End of file
```
#### File: pylith/utils/PetscComponent.py
```python
from pyre.components.Component import Component
# PetscComponent class
class PetscComponent(Component):
"""
Python PetscComponent object for aid in deallocating data structures
before calling PetscFinalize().
"""
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name, facility):
"""
Constructor.
"""
Component.__init__(self, name, facility)
return
def compilePerformanceLog(self, parentLogger):
"""
Compile performance and memory information.
"""
if hasattr(self, 'perfLogger'):
if not parentLogger is None:
parentLogger.join(self.perfLogger)
for component in self.components():
if isinstance(component, PetscComponent):
component.compilePerformanceLog(parentLogger)
# Facility arrays are not PetscComponents but have components().
elif hasattr(component, "components"):
for subcomponent in component.components():
if isinstance(subcomponent, PetscComponent):
subcomponent.compilePerformanceLog(parentLogger)
return
def cleanup(self):
"""
Deallocate data structures.
"""
for component in self.components():
if isinstance(component, PetscComponent):
component.cleanup()
# Facility arrays are not PetscComponents but have components().
elif hasattr(component, "components"):
for subcomponent in component.components():
if isinstance(subcomponent, PetscComponent):
subcomponent.cleanup()
self._cleanup()
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _cleanup(self):
"""
Deallocate locally managed data structures.
"""
if "deallocate" in dir(self):
self.deallocate()
return
# End of file
```
#### File: 2d/maxwell_analytic/plot_stressyy.py
```python
def plot(sim):
import h5py
import pylab
import numpy
filename = "output/%s-statevars.h5" % sim
h5 = h5py.File(filename, "r")
stress = h5['cell_fields/stress'][:,:,1]
t = h5['time'][:].ravel()
h5.close()
filename = "output/%s-statevars_info.h5" % sim
h5 = h5py.File(filename, "r")
mat_mu = h5['cell_fields/mu'][0,0,0]
mat_lambda = h5['cell_fields/lambda'][0,0,0]
mat_density = h5['cell_fields/density'][0,0,0]
mat_tm = h5['cell_fields/maxwell_time'][0,0,0]
h5.close()
K = mat_lambda + 2.0/3.0*mat_mu
G = mat_mu
viscosity = mat_tm * mat_mu
theta = viscosity / G
analytic = -10e+6*(1.0-6*G/(3*K+4*G)*numpy.exp(-3*K*t/((3*K+4*G)*theta)))
pylab.plot(t, analytic[:], 'k-', t, stress[:,0], 'r--')
pylab.show()
return
# ======================================================================
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--sim", action="store", dest="sim", required=True)
args = parser.parse_args()
plot(args.sim)
```
#### File: 2d/finitestrain/TestQuad4.py
```python
import unittest
import numpy
from pylith.tests import has_h5py
class TestQuad4(unittest.TestCase):
"""
Generic finite-strain tests for problems using 2-D mesh.
"""
def setUp(self):
"""
Setup for tests.
"""
self.mesh = {'ncells': 50,
'ncorners': 4,
'nvertices': 66,
'spaceDim': 2,
'tensorSize': 3}
if has_h5py():
self.checkResults = True
else:
self.checkResults = False
return
def test_elastic_info(self):
"""
Check elastic info.
"""
if not self.checkResults:
return
ncells= self.mesh['ncells']
filename = "output/%s-statevars_info.h5" % self.outputRoot
from rigidrotate_soln import p_mu,p_lambda,p_density
propMu = p_mu*numpy.ones( (1, ncells, 1), dtype=numpy.float64)
propLambda = p_lambda*numpy.ones( (1, ncells, 1), dtype=numpy.float64)
propDensity = p_density*numpy.ones( (1, ncells, 1), dtype=numpy.float64)
properties = {'mu': propMu,
'lambda': propLambda,
'density': propDensity}
from pylith.tests.PhysicalProperties import check_properties
check_properties(self, filename, self.mesh, properties)
return
def test_soln(self):
"""
Check solution (displacement) field.
"""
if not self.checkResults:
return
filename = "output/%s-domain.h5" % self.outputRoot
from pylith.tests.Solution import check_displacements
check_displacements(self, filename, self.mesh)
return
def test_elastic_statevars(self):
"""
Check elastic state variables.
"""
if not self.checkResults:
return
filename = "output/%s-statevars.h5" % self.outputRoot
from pylith.tests.StateVariables import check_state_variables
stateVars = ["total_strain", "stress", "cauchy_stress"]
check_state_variables(self, filename, self.mesh, stateVars)
return
# End of file
```
#### File: 2d/quad4/TestDislocation.py
```python
import numpy
from pylith.tests import run_pylith
from TestQuad4 import TestQuad4
from dislocation_soln import AnalyticalSoln
from pylith.tests.Fault import check_vertex_fields
# ----------------------------------------------------------------------
# Local version of PyLithApp
from pylith.apps.PyLithApp import PyLithApp
class LocalApp(PyLithApp):
def __init__(self):
PyLithApp.__init__(self, name="dislocation")
return
# ----------------------------------------------------------------------
class TestDislocation(TestQuad4):
"""
Test suite for fault with prescribed slip.
"""
def setUp(self):
"""
Setup for test.
"""
TestQuad4.setUp(self)
self.mesh['nvertices'] = 81+9
self.nverticesO = 81
self.faultMesh = {'nvertices': 9,
'spaceDim': 2,
'ncells': 8,
'ncorners': 2}
run_pylith(LocalApp)
self.outputRoot = "dislocation"
self.soln = AnalyticalSoln()
return
def test_fault_info(self):
"""
Check fault information.
"""
if not self.checkResults:
return
filename = "%s-fault_info.h5" % self.outputRoot
fields = ["normal_dir", "final_slip", "slip_time"]
check_vertex_fields(self, filename, self.faultMesh, fields)
return
def test_fault_data(self):
"""
Check fault information.
"""
if not self.checkResults:
return
filename = "%s-fault.h5" % self.outputRoot
fields = ["slip", "traction_change"]
check_vertex_fields(self, filename, self.faultMesh, fields)
return
def calcDisplacements(self, vertices):
"""
Calculate displacement field given coordinates of vertices.
"""
return self.soln.displacement(vertices, self.nverticesO)
def calcStateVar(self, name, vertices, cells):
"""
Calculate state variable.
"""
ncells = self.mesh['ncells']
pts = numpy.zeros( (ncells, 3), dtype=numpy.float64)
if name == "total_strain":
stateVar = self.soln.strain(pts)
elif name == "stress":
stateVar = self.soln.stress(pts)
else:
raise ValueError("Unknown state variable '%s'." % name)
return stateVar
def calcFaultField(self, name, vertices):
"""
Calculate fault info.
"""
normalDir = (-1.0, 0.0)
finalSlip = -2.0
slipTime = 0.0
nvertices = self.faultMesh['nvertices']
if name == "normal_dir":
field = numpy.zeros( (1, nvertices, 2), dtype=numpy.float64)
field[0,:,0] = normalDir[0]
field[0,:,1] = normalDir[1]
elif name == "final_slip":
field = numpy.zeros( (1, nvertices, 2), dtype=numpy.float64)
field[0,:,0] = finalSlip
elif name == "slip_time":
field = slipTime*numpy.zeros( (1, nvertices, 1), dtype=numpy.float64)
elif name == "slip":
field = numpy.zeros( (1, nvertices, 2), dtype=numpy.float64)
field[0,:,0] = finalSlip
elif name == "traction_change":
field = numpy.zeros( (1, nvertices, 2), dtype=numpy.float64)
field[0,:,0] = 0.0
else:
raise ValueError("Unknown fault field '%s'." % name)
return field
# ----------------------------------------------------------------------
# Local version of PyLithApp
from pylith.apps.PyLithApp import PyLithApp
class LocalApp2(PyLithApp):
def __init__(self):
PyLithApp.__init__(self, name="dislocation_np2")
return
# ----------------------------------------------------------------------
class TestDislocation2(TestDislocation):
"""
Test suite for fault with prescribed slip w/2 procs.
"""
def setUp(self):
"""
Setup for test.
"""
TestQuad4.setUp(self)
self.mesh['nvertices'] = 81+9
self.nverticesO = 81
self.faultMesh = {'nvertices': 9,
'spaceDim': 2,
'ncells': 8,
'ncorners': 2}
run_pylith(LocalApp2, nprocs=2)
self.outputRoot = "dislocation_np2"
self.soln = AnalyticalSoln()
return
# ----------------------------------------------------------------------
if __name__ == '__main__':
import unittest
from TestDislocation import TestDislocation as Tester
from TestDislocation import TestDislocation2 as Tester2
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Tester))
suite.addTest(unittest.makeSuite(Tester2))
unittest.TextTestRunner(verbosity=2).run(suite)
# End of file
```
#### File: 2d/quad4/testpylith.py
```python
from pylith.tests.FullTestApp import FullTestApp
import unittest
class TestApp(FullTestApp):
"""
Test application.
"""
def __init__(self):
"""
Constructor.
"""
FullTestApp.__init__(self)
return
def _suite(self):
"""
Create test suite.
"""
suite = unittest.TestSuite()
from TestAxialDisp import TestAxialDisp
suite.addTest(unittest.makeSuite(TestAxialDisp))
from TestShearDisp import TestShearDisp
suite.addTest(unittest.makeSuite(TestShearDisp))
from TestDislocation import TestDislocation
suite.addTest(unittest.makeSuite(TestDislocation))
from TestLgDeformRigidBody import TestRigidBody
suite.addTest(unittest.makeSuite(TestRigidBody))
from TestLgDeformTraction import TestTraction
suite.addTest(unittest.makeSuite(TestTraction))
from TestFrictionCompression import TestFrictionCompression
suite.addTest(unittest.makeSuite(TestFrictionCompression))
from TestFrictionOpening import TestFrictionOpening
suite.addTest(unittest.makeSuite(TestFrictionOpening))
from TestFrictionShearStick import TestFrictionShearStick
suite.addTest(unittest.makeSuite(TestFrictionShearStick))
from TestFrictionShearSliding import TestFrictionShearSliding
suite.addTest(unittest.makeSuite(TestFrictionShearSliding))
from TestSlipWeakeningCompression import TestSlipWeakeningCompression
suite.addTest(unittest.makeSuite(TestSlipWeakeningCompression))
from TestSlipWeakeningOpening import TestSlipWeakeningOpening
suite.addTest(unittest.makeSuite(TestSlipWeakeningOpening))
from TestSlipWeakeningShearStick import TestSlipWeakeningShearStick
suite.addTest(unittest.makeSuite(TestSlipWeakeningShearStick))
from TestSlipWeakeningShearSliding import TestSlipWeakeningShearSliding
suite.addTest(unittest.makeSuite(TestSlipWeakeningShearSliding))
return suite
# ----------------------------------------------------------------------
if __name__ == '__main__':
app = TestApp()
app.main()
# End of file
```
#### File: tests_auto/eqinfo/TestEqInfoLine.py
```python
import numpy
from TestEqInfo import TestEqInfo, run_eqinfo
# Local version of EqInfoApp
from pylith.apps.EqInfoApp import EqInfoApp
class LineApp(EqInfoApp):
def __init__(self):
EqInfoApp.__init__(self, name="line")
return
class TestEqInfoLine(TestEqInfo):
"""
Test suite for testing pylith_eqinfo with 1-D fault meshes.
"""
def setUp(self):
"""
Setup for test.
"""
run_eqinfo(LineApp)
return
def test_stats(self):
"""
Check fault stats.
"""
import stats_line
timestamp = numpy.array([0.0, 1.0], dtype=numpy.float64)
oneE = stats_line.RuptureStats()
oneE.timestamp = timestamp
oneE.ruparea = numpy.array([2.5, 1.5], dtype=numpy.float64)
oneE.potency = numpy.array([0.7*1.0+0.9*1.5, 0.4*1.5], dtype=numpy.float64)
oneE.moment = oneE.potency*1.0e+10
twoE = stats_line.RuptureStats()
twoE.timestamp = timestamp
area0 = (1.5**2+1.0**2)**0.5
area1 = (1.0**2+1.0**2)**0.5
twoE.ruparea = numpy.array([area0+area1, area0], dtype=numpy.float64)
twoE.potency = numpy.array([0.9*area0+0.7*area1, 0.3*area0], dtype=numpy.float64)
twoE.moment = twoE.potency*1.0e+10
allE = stats_line.RuptureStats()
allE.timestamp = timestamp
allE.ruparea = oneE.ruparea + twoE.ruparea
allE.potency = oneE.potency + twoE.potency
allE.moment = oneE.moment + twoE.moment
self._check(oneE, stats_line.one)
self._check(twoE, stats_line.two)
self._check(allE, stats_line.all)
return
# ----------------------------------------------------------------------
if __name__ == '__main__':
import unittest
from TestEqInfoLine import TestEqInfoLine as Tester
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Tester))
unittest.TextTestRunner(verbosity=2).run(suite)
# End of file
```
#### File: tests_auto/eqinfo/TestEqInfoTri3.py
```python
import numpy
from TestEqInfo import TestEqInfo, run_eqinfo
# Local version of EqInfoApp
from pylith.apps.EqInfoApp import EqInfoApp
class Tri3App(EqInfoApp):
def __init__(self):
EqInfoApp.__init__(self, name="tri3")
return
class TestEqInfoTri3(TestEqInfo):
"""
Test suite for testing pylith_eqinfo with tri3 meshes.
"""
def setUp(self):
"""
Setup for test.
"""
run_eqinfo(Tri3App)
return
def test_stats(self):
"""
Check fault stats.
"""
import stats_tri3
timestamp = numpy.array([0.0, 1.0], dtype=numpy.float64)
oneE = stats_tri3.RuptureStats()
oneE.timestamp = timestamp
oneE.ruparea = numpy.array([1.5+2.0, 1.5+2.0], dtype=numpy.float64)
slip0 = (0.2**2+0.5**2)**0.5
slip1 = (0.5**2+0.4**2)**0.5
oneE.potency = numpy.array([slip0*1.5+slip1*2.0, 0.1*1.5+0.2*2.0], dtype=numpy.float64)
oneE.moment = numpy.array([slip0*1.5*1.0e+10+slip1*2.0*2.0e+10,
0.1*1.5*1.0e+10+0.2*2.0*2.0e+10], dtype=numpy.float64)
twoE = stats_tri3.RuptureStats()
twoE.timestamp = timestamp
twoE.ruparea = numpy.array([1.5, 0.0], dtype=numpy.float64)
twoE.potency = numpy.array([0.1*1.5, 0.0], dtype=numpy.float64)
twoE.moment = numpy.array([0.1*1.5*1.0e+10, 0.0], dtype=numpy.float64)
allE = stats_tri3.RuptureStats()
allE.timestamp = timestamp
allE.ruparea = oneE.ruparea + twoE.ruparea
allE.potency = oneE.potency + twoE.potency
allE.moment = oneE.moment + twoE.moment
self._check(oneE, stats_tri3.one)
self._check(twoE, stats_tri3.two)
self._check(allE, stats_tri3.all)
return
# ----------------------------------------------------------------------
if __name__ == '__main__':
import unittest
from TestEqInfoTri3 import TestEqInfoTri3 as Tester
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Tester))
unittest.TextTestRunner(verbosity=2).run(suite)
# End of file
```
#### File: faults/data/cohesivedyn.py
```python
cell = "tri3d"
testCase = "open"
import numpy
from numpy import *
from numpy.linalg import inv
numpy.set_printoptions(precision=12)
lengthScale = 1.0e+3
# ----------------------------------------------------------------------
def printdata(data):
"""
Print data as C array.
"""
(nrows, ncols) = data.shape
style = " %16.12f,"*ncols
for row in xrange(nrows):
print (style % tuple(data[row,:]))
return
# ----------------------------------------------------------------------
def globalToFault(v, R):
"""
Convert vector from global coordinate system to fault coordinate system.
"""
(m,ndof) = v.shape
vF = numpy.dot(C, v.reshape(m*ndof,1))
return vF.reshape((m, ndof))
# ----------------------------------------------------------------------
def faultToGlobal(v, R):
"""
Convert vector from fault coordinate system to global coordinate system.
"""
(m,ndof) = v.shape
vG = numpy.dot(C.transpose(), v.reshape(m*ndof,1))
return vG.reshape((m, ndof))
# ----------------------------------------------------------------------
if cell == "tri3" or cell == "tri3d" or cell == "quad4":
if cell == "tri3":
dlagrange1 = numpy.zeros(2)
indexL = numpy.arange(12,16)
indexN = numpy.arange(2,6)
indexP = numpy.arange(8,12)
n = 16
m = 4
DOF = 2
fieldT = numpy.array([[-8.6, 9.6],
[-8.8, 9.8]])
fieldIncr = numpy.array([[-1.6, 2.6],
[-1.8, 2.8]])
L = numpy.array([[1.0, 0.0, 0.0, 0.0,],
[0.0, 1.0, 0.0, 0.0,],
[0.0, 0.0, 1.0, 0.0,],
[0.0, 0.0, 0.0, 1.0,],]);
C = numpy.array([[0.0, +1.0, 0.0, 0.0,],
[+1.0, 0.0, 0.0, 0.0,],
[0.0, 0.0, 0.0, +1.0,],
[0.0, 0.0, +1.0, 0.0,],]);
jacobianN = numpy.array(
[[ 4.0, -1.2, -2.2, -2.3,],
[ -1.2, 5.0, -1.3, -3.2,],
[ -2.2, -1.3, 4.1, -4.3,],
[ -2.3, -3.2, -4.3, 5.1,],])
jacobianP = numpy.array(
[[ 5.0, -1.2, -2.2, -2.3,],
[ -1.2, 4.0, -1.3, -3.2,],
[ -2.2, -1.3, 5.1, -4.3,],
[ -2.3, -3.2, -4.3, 4.1,],])
disp = numpy.array([[ 8.1, 9.1,],
[ 8.2, 9.2,],
[ 8.3, 9.3,],
[ 8.4, 9.4,],
[ 8.2, 9.2,],
[ 8.3, 9.3,],
[-8.6, 9.6,],
[-8.8, 9.8,],])
if testCase == "slip":
dispIncr = numpy.array([[ 9.1, 7.1,],
[ 9.2, 7.2,],
[ 9.3, 7.3,],
[ 9.4, 7.4,],
[ 9.2, 7.2,],
[ 9.3, 7.3,],
[-1.6, 2.6,],
[-1.8, 2.8,],])
elif testCase == "open":
dispIncr = numpy.array([[ 9.1, 7.1,],
[ 9.2, 7.2,],
[ 9.3, 7.3,],
[ 9.4, 7.4,],
[ 9.2, 7.2,],
[ 9.3, 7.3,],
[ +10.6, -10.6,],
[ +10.8, -10.8,],])
elif cell == "tri3d":
dlagrange1 = numpy.zeros(3)
indexL = numpy.array([18, 19, 20, 21, 22, 23])
indexN = numpy.array([2, 3, 4, 5, 8, 9])
indexP = numpy.array([12, 13, 14, 15, 16, 17])
n = 24
m = 6
DOF = 2
fieldT = numpy.array([[-3.8,-4.8],
[-3.0, 4.0],
[3.2, -4.2]])
fieldIncr = numpy.array([[-1.8,+3.6],
[-1.0, 1.1],
[ 1.7,-1.2]])
L = numpy.array([[2.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 2.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])
C = numpy.array([[-0.70710678118654757, +0.70710678118654757, 0.0, 0.0, 0.0, 0.0,],
[+0.70710678118654757, +0.70710678118654757, 0.0, 0.0, 0.0, 0.0,],
[0.0, 0.0, 0.0, +1.0, 0.0, 0.0,],
[0.0, 0.0, +1.0, 0.0, 0.0, 0.0,],
[0.0, 0.0, 0.0, 0.0, -1.0, 0.0,],
[0.0, 0.0, 0.0, 0.0, 0.0, +1.0,],])
jacobianN = numpy.array(
[[+6.0, -1.0, -1.1, -1.2, -1.3, -1.4],
[-1.0, +6.1, -0.9, -0.8, -0.7, -0.6],
[-1.1, -0.9, +6.2, -2.1, 0.0, 0.0],
[-1.2, -0.8, -2.1, +6.3, 0.0, 0.0],
[-1.3, -0.7, 0.0, 0.0, +6.4, -1.1],
[-1.4, -0.6, 0.0, 0.0, -1.1, +6.5]])
jacobianP = numpy.array(
[[+5.0, -1.0, -1.1, -1.2, -1.3, -1.4],
[-1.0, +5.1, -0.9, -0.8, -0.7, -0.6],
[-1.1, -0.9, +5.2, -2.1, 0.0, 0.0],
[-1.2, -0.8, -2.1, +5.3, 0.0, 0.0],
[-1.3, -0.7, 0.0, 0.0, +5.4, -1.1],
[-1.4, -0.6, 0.0, 0.0, -1.1, +5.5]])
disp = numpy.array([[ 6.1, 8.1,],
[ 6.2, 8.2,],
[ 6.3, 8.3,],
[ 6.4, 8.4,],
[ 6.5, 8.5,],
[ 6.6, 8.6,],
[ 6.2, 8.2,],
[ 6.3, 8.3,],
[ 6.5, 8.5,],
[-3.8,-4.8,],
[-3.0, 4.0,],
[ 3.2,-4.2,],])
if testCase == "slip":
dispIncr = numpy.array([[ 1.1, 2.1,],
[ 1.2, 2.2,],
[ 1.3, 2.3,],
[ 1.4, 2.4,],
[ 1.5, 2.5,],
[ 1.6, 2.6,],
[ 1.2, 2.2,],
[ 1.3, 2.3,],
[ 1.5, 2.5,],
[-1.8,+3.6,],
[-1.0, 1.1,],
[ 1.7,-1.2,],])
elif testCase == "open":
dispIncr = numpy.array([[ 1.1, 2.1,],
[ 1.2, 2.2,],
[ 1.3, 2.3,],
[ 1.4, 2.4,],
[ 1.5, 2.5,],
[ 1.6, 2.6,],
[ 1.2, 2.2,],
[ 1.3, 2.3,],
[ 1.5, 2.5,],
[+11.8, 11.8,],
[+10.0, 0.1,],
[ 1.2, +10.2,],])
elif cell == "quad4":
dlagrange1 = numpy.zeros(2)
indexL = numpy.arange(16,20)
indexN = numpy.arange(4,8)
indexP = numpy.arange(12,16)
n = 20
m = 4
DOF = 2
fieldT = numpy.array([[-8.6, 9.6],
[-8.8, 9.8]])
fieldIncr = numpy.array([[-1.6, 2.6],
[-1.8, 2.5]])
L = numpy.array([[1.0, 0.0, 0.0, 0.0,],
[0.0, 1.0, 0.0, 0.0,],
[0.0, 0.0, 1.0, 0.0,],
[0.0, 0.0, 0.0, 1.0,],]);
C = numpy.array([[0.0, +1.0, 0.0, 0.0,],
[+1.0, 0.0, 0.0, 0.0,],
[0.0, 0.0, 0.0, +1.0,],
[0.0, 0.0, +1.0, 0.0,],]);
jacobianN = numpy.array(
[[ 4.0, -1.2, -2.2, -2.3,],
[ -1.2, 5.0, -1.3, -3.2,],
[ -2.2, -1.3, 4.1, -4.3,],
[ -2.3, -3.2, -4.3, 5.1,],])
jacobianP = numpy.array(
[[ 5.0, -1.2, -2.2, -2.3,],
[ -1.2, 4.0, -1.3, -3.2,],
[ -2.2, -1.3, 5.1, -4.3,],
[ -2.3, -3.2, -4.3, 4.1,],])
disp = numpy.array([[ 8.1, 9.1,],
[ 8.3, 9.3,],
[ 8.2, 9.2,],
[ 8.3, 9.3,],
[ 8.5, 9.5,],
[ 8.6, 9.6,],
[ 8.2, 9.6,],
[ 8.3, 9.8,],
[-8.6, 9.6,],
[-8.8, 9.8,],])
if testCase == "slip":
dispIncr = numpy.array([[ 1.1, 2.1,],
[ 1.2, 2.2,],
[ 1.2, 2.2,],
[ 1.3, 2.3,],
[ 1.5, 2.5,],
[ 1.6, 2.6,],
[ 1.2, 2.2,],
[ 1.3, 2.3,],
[-1.6, 2.6,],
[-1.8, 2.5,],])
elif testCase == "open":
dispIncr = numpy.array([[ 1.1, 2.1,],
[ 1.2, 2.2,],
[ 1.2, 2.2,],
[ 1.3, 2.3,],
[ 1.5, 2.5,],
[ 1.6, 2.6,],
[ 1.2, 2.2,],
[ 1.3, 2.3,],
[+10.6, -12.6,],
[+10.8, -12.8,],])
# ------------------------------------------------------------------
lagrangeScale = lengthScale**1
fieldTpdt = fieldT + fieldIncr
fieldTpdt = globalToFault(fieldTpdt, C)
tractionShear = abs(fieldTpdt[:,0])
tractionNormal = fieldTpdt[:,1]
print "tractionShear",tractionShear
print "tractionNormal",tractionNormal
friction = -0.6 * tractionNormal;
print "friction",friction
dlagrange0 = (friction - tractionShear) * fieldTpdt[:,0] / tractionShear
print "dlagrange0",dlagrange0
if testCase == "slip":
dLagrange = numpy.vstack((dlagrange0, dlagrange1))
dLagrange = numpy.transpose(dLagrange)
dLagrange = faultToGlobal(dLagrange, C).reshape(m)
elif testCase == "open":
dLagrange = numpy.reshape(disp+dispIncr, n)
dLagrange = -dLagrange[indexL]
print "dLagrange \n", dLagrange
L /= lengthScale**1
RHS = numpy.dot(numpy.transpose(L),dLagrange)
print "RHS",RHS
duN = numpy.dot(inv(jacobianN),RHS)
duP = -numpy.dot(inv(jacobianP),RHS)
dispRelIncr = duP - duN
dispTpdt = disp + dispIncr
dispTpdt = numpy.reshape(dispTpdt, n)
slipVertex = dispRelIncr + dispTpdt[indexP]-dispTpdt[indexN]
slipVertex = numpy.reshape(slipVertex, (m/DOF,DOF))
slipVertex = globalToFault(slipVertex, C)
mask = slipVertex[:,1] < 0.0
#slipVertex[:,1] = 0
print "slip",slipVertex
slipVertex = faultToGlobal(slipVertex, C)
slipVertex = numpy.reshape(slipVertex, m)
disp = numpy.reshape(disp, n)
slipIncrVertex = slipVertex - (disp[indexP] - disp[indexN])
print "duN \n", duN
print "duP \n", duP
dispIncrE = dispIncr
dispIncrE = numpy.reshape(dispIncrE, n)
dispIncrE[indexL] = dispIncrE[indexL] + dLagrange
dispIncrE[indexN] = dispIncrE[indexN] - 0.5*slipIncrVertex
dispIncrE[indexP] = dispIncrE[indexP] + 0.5*slipIncrVertex
dispIncrE = numpy.reshape(dispIncrE, (n/DOF,DOF))
slipVertex = numpy.reshape(slipVertex, (m/DOF,DOF))
slipVertex = globalToFault(slipVertex, C)
print "dispIncrE\n", printdata(dispIncrE)
print "slipVertexE\n", printdata(slipVertex)
# ----------------------------------------------------------------------
elif cell == "tet4" or cell == "hex8":
lagrangeScale = lengthScale**2
if cell == "tet4":
dlagrange2 = numpy.zeros(3)
indexL = numpy.arange(24,33)
indexN = numpy.arange(3,12)
indexP = numpy.arange(15,24)
n = 33
m = 9
DOF = 3
fieldT = numpy.array([[-7.7, 18.7, 19.7],
[-7.9, 18.9, 19.9],
[-7.1, 18.1, 19.1]])
fieldIncr = numpy.array([[-4.7, 5.7, 6.7],
[-4.9, 5.9, 6.9],
[-4.1, 5.1, 6.1]])
L = numpy.array([[1.0/3.0,0,0, 0.0,0,0, 0.0,0,0,],
[0,1.0/3.0,0, 0,0.0,0, 0,0.0,0,],
[0,0,1.0/3.0, 0,0,0.0, 0,0,0.0,],
[0.0,0,0, 1.0/3.0,0,0, 0.0,0,0,],
[0,0.0,0, 0,1.0/3.0,0, 0,0.0,0,],
[0,0,0.0, 0,0,1.0/3.0, 0,0,0.0,],
[0.0,0,0, 0.0,0,0, 1.0/3.0,0,0,],
[0,0.0,0, 0,0.0,0, 0,1.0/3.0,0,],
[0,0,0.0, 0,0,0.0, 0,0,1.0/3.0,]])
Cv = numpy.array([[ 0, +1, 0,],
[ 0, 0, +1,],
[ +1, 0, 0,],])
Zv = numpy.zeros([3,3])
C = numpy.vstack( (numpy.hstack((Cv, Zv, Zv)),
numpy.hstack((Zv, Cv, Zv)),
numpy.hstack((Zv, Zv, Cv)) ) )
jacobianN = numpy.array(
[[ 4.0, -1.1, -1.2, -1.3, -1.4, -1.5, -1.6, -1.7, -1.8],
[-1.1, 4.1, -2.3, -2.4, -2.5, -2.6, -2.7, -2.8, -2.9],
[-1.2, -2.3, 4.2, -1.0, -1.1, -1.2, -1.3, -1.4, -1.5],
[-1.3, -2.4, -1.0, 4.3, -0.2, -0.3, -0.4, -0.5, -0.6],
[-1.4, -2.5, -1.1, -0.2, 4.4, -0.9, -0.8, -0.7, -0.5],
[-1.5, -2.6, -1.2, -0.3, -0.9, 4.5, -1.1, -1.2, -1.3],
[-1.6, -2.7, -1.3, -0.4, -0.8, -1.1, 4.6, -1.8, -1.5],
[-1.7, -2.8, -1.4, -0.5, -0.7, -1.2, -1.8, 4.7, -1.1],
[-1.8, -2.9, -1.5, -0.6, -0.5, -1.3, -1.5, -1.1, 4.8]])
jacobianP = numpy.array(
[[ 5.0, -1.1, -1.2, -1.3, -1.4, -1.5, -1.6, -1.7, -1.8],
[-1.1, 5.1, -2.3, -2.4, -2.5, -2.6, -2.7, -2.8, -2.9],
[-1.2, -2.3, 5.2, -1.0, -1.1, -1.2, -1.3, -1.4, -1.5],
[-1.3, -2.4, -1.0, 5.3, -0.2, -0.3, -0.4, -0.5, -0.6],
[-1.4, -2.5, -1.1, -0.2, 5.4, -0.9, -0.8, -0.7, -0.5],
[-1.5, -2.6, -1.2, -0.3, -0.9, 5.5, -1.1, -1.2, -1.3],
[-1.6, -2.7, -1.3, -0.4, -0.8, -1.1, 5.6, -1.8, -1.5],
[-1.7, -2.8, -1.4, -0.5, -0.7, -1.2, -1.8, 5.7, -1.1],
[-1.8, -2.9, -1.5, -0.6, -0.5, -1.3, -1.5, -1.1, 5.8]])
disp = numpy.array([[ 7.1, 8.1, 9.1,],
[ 7.2, 8.2, 9.2,],
[ 7.3, 8.3, 9.3,],
[ 7.4, 8.4, 9.4,],
[ 7.5, 8.5, 9.5,],
[ 7.2, 8.2, 9.2,],
[ 7.3, 8.3, 9.3,],
[ 7.4, 8.4, 9.4,],
[-7.7, 18.7, 19.7,],
[-7.9, 18.9, 19.9,],
[-7.1, 18.1, 19.1,],])
if testCase == "slip":
dispIncr = numpy.array([[ 1.1, 2.1, 3.1,],
[ 1.2, 2.2, 3.2,],
[ 1.3, 2.3, 3.3,],
[ 1.4, 2.4, 3.4,],
[ 1.5, 2.5, 3.5,],
[ 1.2, 2.2, 3.2,],
[ 1.3, 2.3, 3.3,],
[ 1.4, 2.4, 3.4,],
[-4.7, 5.7, 6.7,],
[-4.9, 5.9, 6.9,],
[-4.1, 5.1, 6.1,],])
elif testCase == "open":
dispIncr = numpy.array([[ 1.1, 2.1, 3.1,],
[ 1.2, 2.2, 3.2,],
[ 1.3, 2.3, 3.3,],
[ 1.4, 2.4, 3.4,],
[ 1.5, 2.5, 3.5,],
[ 1.2, 2.2, 3.2,],
[ 1.3, 2.3, 3.3,],
[ 1.4, 2.4, 3.4,],
[+80.7, 2.7, 3.7,],
[+80.9, 2.9, 3.9,],
[+80.1, 2.1, 3.1,],])
elif cell == "hex8":
dlagrange2 = numpy.zeros(4)
indexL = numpy.arange(48,60)
indexN = numpy.arange(12,24)
indexP = numpy.arange(36,48)
n = 60
m = 12
DOF = 3
a0 = 1.0
a1 = 0.0
a2 = 0.0
L = numpy.array([[a0, 0, 0, a1, 0, 0, a1, 0, 0, a2, 0, 0],
[0, a0, 0, 0, a1, 0, 0, a1, 0, 0, a2, 0],
[0, 0, a0, 0, 0, a1, 0, 0, a1, 0, 0, a2],
[a1, 0, 0, a0, 0, 0, a2, 0, 0, a1, 0, 0],
[0, a1, 0, 0, a0, 0, 0, a2, 0, 0, a1, 0],
[0, 0, a1, 0, 0, a0, 0, 0, a2, 0, 0, a1],
[a1, 0, 0, a2, 0, 0, a0, 0, 0, a1, 0, 0],
[0, a1, 0, 0, a2, 0, 0, a0, 0, 0, a1, 0],
[0, 0, a1, 0, 0, a2, 0, 0, a0, 0, 0, a1],
[a2, 0, 0, a1, 0, 0, a1, 0, 0, a0, 0, 0],
[0, a2, 0, 0, a1, 0, 0, a1, 0, 0, a0, 0],
[0, 0, a2, 0, 0, a1, 0, 0, a1, 0, 0, a0]])
fieldT = numpy.array([[-4.4, 2.4, 3.4],
[-4.6, 2.6, 3.6],
[-4.8, 2.8, 3.8],
[-4.0, 2.0, 3.0]])
fieldIncr = numpy.array([[-1.4, 2.4, 0.4],
[-1.6, 2.6, 0.6],
[-1.8, 2.8, 0.8],
[-1.0, 2.0, 0.2]])
Cv = numpy.array([[ 0, +1, 0,],
[ 0, 0, +1,],
[ +1, 0, 0,],])
Zv = numpy.zeros([3,3])
C = numpy.vstack( (numpy.hstack((Cv, Zv, Zv, Zv)),
numpy.hstack((Zv, Cv, Zv, Zv)),
numpy.hstack((Zv, Zv, Cv, Zv)),
numpy.hstack((Zv, Zv, Zv, Cv)) ) )
jacobianN = numpy.array(
[[+6.0, -0.5, -0.6, -0.7, -0.8, -0.9, -1.0, -0.8, -0.7, -0.6, -0.5, -0.4,],
[-0.5, +6.1, -1.0, -1.1, -1.2, -1.3, -1.4, -1.3, -1.2, -1.1, -1.0, -0.9,],
[-0.6, -1.0, +6.2, -0.5, -0.6, -0.7, -0.8, -0.9, -0.8, -0.7, -0.6, -0.5,],
[-0.7, -1.1, -0.5, +6.3, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1,],
[-0.8, -1.2, -0.6, -0.8, +6.4, -0.3, -0.4, -0.5, -0.6, -0.7, -0.8, -0.9,],
[-0.9, -1.3, -0.7, -0.7, -0.3, +6.5, -0.3, -0.8, -0.7, -0.6, -0.9, -0.7,],
[-1.0, -1.4, -0.8, -0.6, -0.4, -0.3, +6.6, -1.1, -0.8, -0.7, -0.6, -0.5,],
[-0.8, -1.3, -0.9, -0.5, -0.5, -0.8, -1.1, +6.7, -0.8, -0.9, -1.0, -1.1,],
[-0.7, -1.2, -0.8, -0.4, -0.6, -0.7, -0.8, -0.8, +6.8, -1.0, -1.1, -1.2,],
[-0.6, -1.1, -0.7, -0.3, -0.7, -0.6, -0.7, -0.9, -1.0, +6.9, -0.5, -0.4,],
[-0.5, -1.0, -0.6, -0.2, -0.8, -0.9, -0.6, -1.0, -1.1, -0.5, +6.0, -1.2,],
[-0.4, -0.9, -0.5, -0.1, -0.9, -0.7, -0.5, -1.1, -1.2, -0.4, -1.2, +6.1,],])
jacobianP = numpy.array(
[[+7.0, -0.5, -0.6, -0.7, -0.8, -0.9, -1.0, -0.8, -0.7, -0.6, -0.5, -0.4,],
[-0.5, +7.1, -1.0, -1.1, -1.2, -1.3, -1.4, -1.3, -1.2, -1.1, -1.0, -0.9,],
[-0.6, -1.0, +7.2, -0.5, -0.6, -0.7, -0.8, -0.9, -0.8, -0.7, -0.6, -0.5,],
[-0.7, -1.1, -0.5, +7.3, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1,],
[-0.8, -1.2, -0.6, -0.8, +7.4, -0.3, -0.4, -0.5, -0.6, -0.7, -0.8, -0.9,],
[-0.9, -1.3, -0.7, -0.7, -0.3, +7.5, -0.3, -0.8, -0.7, -0.6, -0.9, -0.7,],
[-1.0, -1.4, -0.8, -0.6, -0.4, -0.3, +7.6, -1.1, -0.8, -0.7, -0.6, -0.5,],
[-0.8, -1.3, -0.9, -0.5, -0.5, -0.8, -1.1, +7.7, -0.8, -0.9, -1.0, -1.1,],
[-0.7, -1.2, -0.8, -0.4, -0.6, -0.7, -0.8, -0.8, +7.8, -1.0, -1.1, -1.2,],
[-0.6, -1.1, -0.7, -0.3, -0.7, -0.6, -0.7, -0.9, -1.0, +7.9, -0.5, -0.4,],
[-0.5, -1.0, -0.6, -0.2, -0.8, -0.9, -0.6, -1.0, -1.1, -0.5, +7.0, -1.2,],
[-0.4, -0.9, -0.5, -0.1, -0.9, -0.7, -0.5, -1.1, -1.2, -0.4, -1.2, +7.1,],])
disp = numpy.array([[ 4.1, 2.1, 3.1,],
[ 4.2, 2.2, 3.2,],
[ 4.3, 2.3, 3.3,],
[ 4.4, 2.4, 3.4,],
[ 4.5, 2.5, 3.5,],
[ 4.6, 2.6, 3.6,],
[ 4.7, 2.7, 3.7,],
[ 4.8, 2.8, 3.8,],
[ 4.9, 2.9, 3.9,],
[ 4.0, 2.0, 3.0,],
[ 4.1, 2.1, 3.1,],
[ 4.2, 2.2, 3.2,],
[ 4.5, 3.2, 4.3,],
[ 4.6, 3.5, 4.6,],
[ 4.7, 3.7, 4.6,],
[ 4.8, 3.6, 4.5,],
[-4.4, 2.4, 3.4,],
[-4.6, 2.6, 3.6,],
[-4.8, 2.8, 3.8,],
[-4.0, 2.0, 3.0,],])
if testCase == "slip":
dispIncr = numpy.array([[ 1.1, 2.1, 0.1,],
[ 1.2, 2.2, 0.2,],
[ 1.3, 2.3, 0.3,],
[ 1.4, 2.4, 0.4,],
[ 1.5, 2.5, 0.5,],
[ 1.6, 2.6, 0.6,],
[ 1.7, 2.7, 0.7,],
[ 1.8, 2.8, 0.8,],
[ 1.9, 2.9, 0.9,],
[ 1.0, 2.0, 0.0,],
[ 1.1, 2.1, 0.1,],
[ 1.2, 2.2, 0.2,],
[ 1.5, 2.5, 0.5,],
[ 1.6, 2.6, 0.6,],
[ 1.7, 2.7, 0.7,],
[ 1.8, 2.8, 0.8,],
[-1.4, 2.4, 0.4,],
[-1.6, 2.6, 0.6,],
[-1.8, 2.8, 0.8,],
[-1.0, 2.0, 0.2,],
])
elif testCase == "open":
dispIncr = numpy.array([[ 1.1, 2.1, 0.1,],
[ 1.2, 2.2, 0.2,],
[ 1.3, 2.3, 0.3,],
[ 1.4, 2.4, 0.4,],
[ 1.5, 2.5, 0.5,],
[ 1.6, 2.6, 0.6,],
[ 1.7, 2.7, 0.7,],
[ 1.8, 2.8, 0.8,],
[ 1.9, 2.9, 0.9,],
[ 1.0, 2.0, 0.0,],
[ 1.1, 2.1, 0.1,],
[ 1.2, 2.2, 0.2,],
[ 1.5, 2.5, 0.5,],
[ 1.6, 2.6, 0.6,],
[ 1.7, 2.7, 0.7,],
[ 1.8, 2.8, 0.8,],
[+20.4, 2.4, 0.4,],
[+20.6, 2.6, 0.6,],
[+20.8, 2.8, 0.8,],
[+20.0, 2.0, 0.2,],])
# ------------------------------------------------------------------
fieldTpdt = fieldT + fieldIncr
fieldTpdt = globalToFault(fieldTpdt, C)
tractionShear = (fieldTpdt[:,0]**2 + fieldTpdt[:,1]**2)**0.5
tractionNormal = fieldTpdt[:,2]
print "tractionShear",tractionShear
print "tractionNormal",tractionNormal
friction = -0.6 * tractionNormal;
print "friction",friction
dlagrange0 = (friction - tractionShear) * fieldTpdt[:,0] / tractionShear
dlagrange1 = (friction - tractionShear) * fieldTpdt[:,1] / tractionShear
print "dlagrange0",dlagrange0
print "dlagrange1",dlagrange1
if testCase == "slip":
dLagrange = numpy.vstack((dlagrange0, dlagrange1, dlagrange2))
dLagrange = numpy.transpose(dLagrange)
dLagrange = faultToGlobal(dLagrange, C).reshape(m)
elif testCase == "open":
dLagrange = numpy.reshape(disp+dispIncr, n)
dLagrange = -dLagrange[indexL]
print "dLagrange \n", dLagrange
L /= lengthScale**2
RHS = numpy.dot(numpy.transpose(L),dLagrange)
print "RHS",RHS
duN = numpy.dot(inv(jacobianN),RHS)
duP = -numpy.dot(inv(jacobianP),RHS)
dispRel = duP - duN
dispTpdt = disp + dispIncr
dispTpdt = numpy.reshape(dispTpdt, n)
slipVertex = dispRel + dispTpdt[indexP]-dispTpdt[indexN]
slipVertex = numpy.reshape(slipVertex, (m/DOF,DOF))
slipVertex = globalToFault(slipVertex, C)
if testCase == "slip":
slipVertex[:,2] = 0
mask = slipVertex[:,2] < 0.0
slipVertex[mask,2] = 0
slipVertex = faultToGlobal(slipVertex, C)
slipVertex = numpy.reshape(slipVertex, m)
disp = numpy.reshape(disp, n)
slipIncrVertex = slipVertex - (disp[indexP] - disp[indexN])
print "duN \n", duN
print "duP \n", duP
dispIncrE = dispIncr
dispIncrE = numpy.reshape(dispIncrE, n)
dispIncrE[indexL] = dispIncrE[indexL] + dLagrange
dispIncrE[indexN] = dispIncrE[indexN] - 0.5*slipIncrVertex
dispIncrE[indexP] = dispIncrE[indexP] + 0.5*slipIncrVertex
dispIncrE = numpy.reshape(dispIncrE, (n/DOF,DOF))
slipVertex = numpy.reshape(slipVertex, (m/DOF,DOF))
slipVertex = globalToFault(slipVertex, C)
print "dispIncrE\n", printdata(dispIncrE)
print "slipVertexE\n", printdata(slipVertex)
```
#### File: feassemble/data/ElasticityLgDeformApp.py
```python
from ElasticityApp import ElasticityApp
import numpy
import feutils
# ----------------------------------------------------------------------
# ElasticityLgDeformApp class
class ElasticityLgDeformApp(ElasticityApp):
"""
Python application for generating C++ data files for testing C++
elasticity integrator objects.
"""
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="elasticitylgdeformapp"):
"""
Constructor.
"""
ElasticityApp.__init__(self, name)
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _calculateStiffnessMat(self):
"""
Calculate stiffness matrix.
"""
import feutils
K = numpy.zeros( (self.spaceDim*self.numVertices,
self.spaceDim*self.numVertices),
dtype=numpy.float64)
# Matrix of elasticity values
D = self._calculateElasticityMat()
for cell in self.cells:
cellK = numpy.zeros( (self.spaceDim*self.numBasis,
self.spaceDim*self.numBasis),
dtype=numpy.float64)
vertices = self.vertices[cell, :]
(jacobian, jacobianInv, jacobianDet, basisDeriv) = \
feutils.calculateJacobian(self.quadrature, vertices)
fieldTpdt = self.fieldT + self.fieldTIncr
for iQuad in xrange(self.numQuadPts):
wt = self.quadWts[iQuad] * jacobianDet[iQuad]
BL0 = self._calculateBasisDerivMatLinear0(basisDeriv, iQuad)
BL1 = self._calculateBasisDerivMatLinear1(basisDeriv, iQuad, fieldTpdt)
BL = BL0 + BL1
cellK[:] += wt * numpy.dot(numpy.dot(BL.transpose(), D), BL)
BNL = self._calculateBasisDerivMatNonlinear(basisDeriv, iQuad)
strain = self._calculateStrain(basisDeriv, iQuad, fieldTpdt)
S = self._calculateStress(strain, D)
cellK[:] += wt * numpy.dot(numpy.dot(BNL.transpose(), S), BNL)
feutils.assembleMat(K, cellK, cell, self.spaceDim)
return K
def _calculateBasisDerivMatLinear0(self, basisDeriv, iQuad):
"""
Calculate matrix of derivatives of basis functions.
"""
if 3 == self.spaceDim:
B = numpy.zeros( (6, self.spaceDim*self.numBasis),
dtype=numpy.float64)
for iBasis in xrange(self.numBasis):
B[0, iBasis*self.spaceDim+0] = basisDeriv[iQuad, iBasis, 0]
B[1, iBasis*self.spaceDim+1] = basisDeriv[iQuad, iBasis, 1]
B[2, iBasis*self.spaceDim+2] = basisDeriv[iQuad, iBasis, 2]
B[3, iBasis*self.spaceDim+0] = basisDeriv[iQuad, iBasis, 1]
B[3, iBasis*self.spaceDim+1] = basisDeriv[iQuad, iBasis, 0]
B[4, iBasis*self.spaceDim+1] = basisDeriv[iQuad, iBasis, 2]
B[4, iBasis*self.spaceDim+2] = basisDeriv[iQuad, iBasis, 1]
B[5, iBasis*self.spaceDim+0] = basisDeriv[iQuad, iBasis, 2]
B[5, iBasis*self.spaceDim+2] = basisDeriv[iQuad, iBasis, 0]
elif 2 == self.spaceDim:
B = numpy.zeros( (3, self.spaceDim*self.numBasis),
dtype=numpy.float64)
for iBasis in xrange(self.numBasis):
B[0, iBasis*self.spaceDim+0] = basisDeriv[iQuad, iBasis, 0]
B[1, iBasis*self.spaceDim+1] = basisDeriv[iQuad, iBasis, 1]
B[2, iBasis*self.spaceDim+0] = basisDeriv[iQuad, iBasis, 1]
B[2, iBasis*self.spaceDim+1] = basisDeriv[iQuad, iBasis, 0]
elif 1 == self.spaceDim:
B = numpy.zeros( (1, self.spaceDim*self.numBasis),
dtype=numpy.float64)
for iBasis in xrange(self.numBasis):
B[0, iBasis*self.spaceDim+0] = basisDeriv[iQuad, iBasis, 0]
else:
raise ValueError("Unknown spatial dimension '%d'." % self.spaceDim)
return B
def _calculateBasisDerivMatLinear1(self, basisDeriv, iQuad, disp):
"""
Calculate matrix of derivatives of basis functions.
"""
if 3 == self.spaceDim:
B = numpy.zeros( (6, self.spaceDim*self.numBasis),
dtype=numpy.float64)
l11 = 0.0
l12 = 0.0
l13 = 0.0
l21 = 0.0
l22 = 0.0
l23 = 0.0
l31 = 0.0
l32 = 0.0
l33 = 0.0
for kBasis in xrange(self.numBasis):
l11 += basisDeriv[iQuad, kBasis, 0]*disp[kBasis*self.spaceDim ]
l12 += basisDeriv[iQuad, kBasis, 1]*disp[kBasis*self.spaceDim ]
l13 += basisDeriv[iQuad, kBasis, 2]*disp[kBasis*self.spaceDim ]
l21 += basisDeriv[iQuad, kBasis, 0]*disp[kBasis*self.spaceDim+1]
l22 += basisDeriv[iQuad, kBasis, 1]*disp[kBasis*self.spaceDim+1]
l23 += basisDeriv[iQuad, kBasis, 2]*disp[kBasis*self.spaceDim+1]
l31 += basisDeriv[iQuad, kBasis, 0]*disp[kBasis*self.spaceDim+2]
l32 += basisDeriv[iQuad, kBasis, 1]*disp[kBasis*self.spaceDim+2]
l33 += basisDeriv[iQuad, kBasis, 2]*disp[kBasis*self.spaceDim+2]
for iBasis in xrange(self.numBasis):
B[0, iBasis*self.spaceDim+0] = basisDeriv[iQuad, iBasis, 0]*l11
B[0, iBasis*self.spaceDim+1] = basisDeriv[iQuad, iBasis, 0]*l21
B[0, iBasis*self.spaceDim+2] = basisDeriv[iQuad, iBasis, 0]*l31
B[1, iBasis*self.spaceDim+0] = basisDeriv[iQuad, iBasis, 1]*l12
B[1, iBasis*self.spaceDim+1] = basisDeriv[iQuad, iBasis, 1]*l22
B[1, iBasis*self.spaceDim+2] = basisDeriv[iQuad, iBasis, 1]*l32
B[2, iBasis*self.spaceDim+0] = basisDeriv[iQuad, iBasis, 2]*l13
B[2, iBasis*self.spaceDim+1] = basisDeriv[iQuad, iBasis, 2]*l23
B[2, iBasis*self.spaceDim+2] = basisDeriv[iQuad, iBasis, 2]*l33
B[3, iBasis*self.spaceDim+0] = \
basisDeriv[iQuad, iBasis, 1]*l11 + basisDeriv[iQuad, iBasis, 0]*l12
B[3, iBasis*self.spaceDim+1] = \
basisDeriv[iQuad, iBasis, 0]*l22 + basisDeriv[iQuad, iBasis, 1]*l21
B[3, iBasis*self.spaceDim+2] = \
basisDeriv[iQuad, iBasis, 1]*l31 + basisDeriv[iQuad, iBasis, 0]*l32
B[4, iBasis*self.spaceDim+0] = \
basisDeriv[iQuad, iBasis, 2]*l12 + basisDeriv[iQuad, iBasis, 1]*l13
B[4, iBasis*self.spaceDim+1] = \
basisDeriv[iQuad, iBasis, 2]*l22 + basisDeriv[iQuad, iBasis, 1]*l23
B[4, iBasis*self.spaceDim+2] = \
basisDeriv[iQuad, iBasis, 1]*l33 + basisDeriv[iQuad, iBasis, 2]*l32
B[5, iBasis*self.spaceDim+0] = \
basisDeriv[iQuad, iBasis, 2]*l11 + basisDeriv[iQuad, iBasis, 0]*l13
B[5, iBasis*self.spaceDim+1] = \
basisDeriv[iQuad, iBasis, 2]*l21 + basisDeriv[iQuad, iBasis, 0]*l23
B[5, iBasis*self.spaceDim+2] = \
basisDeriv[iQuad, iBasis, 0]*l33 + basisDeriv[iQuad, iBasis, 2]*l31
elif 2 == self.spaceDim:
B = numpy.zeros( (3, self.spaceDim*self.numBasis),
dtype=numpy.float64)
l11 = 0.0
l12 = 0.0
l21 = 0.0
l22 = 0.0
for kBasis in xrange(self.numBasis):
l11 += basisDeriv[iQuad, kBasis, 0]*disp[kBasis*self.spaceDim ]
l12 += basisDeriv[iQuad, kBasis, 1]*disp[kBasis*self.spaceDim ]
l21 += basisDeriv[iQuad, kBasis, 0]*disp[kBasis*self.spaceDim+1]
l22 += basisDeriv[iQuad, kBasis, 1]*disp[kBasis*self.spaceDim+1]
for iBasis in xrange(self.numBasis):
B[0, iBasis*self.spaceDim+0] = basisDeriv[iQuad, iBasis, 0]*l11
B[0, iBasis*self.spaceDim+1] = basisDeriv[iQuad, iBasis, 0]*l21
B[1, iBasis*self.spaceDim+0] = basisDeriv[iQuad, iBasis, 1]*l12
B[1, iBasis*self.spaceDim+1] = basisDeriv[iQuad, iBasis, 1]*l22
B[2, iBasis*self.spaceDim+0] = \
basisDeriv[iQuad, iBasis, 1]*l11 + basisDeriv[iQuad, iBasis, 0]*l12
B[2, iBasis*self.spaceDim+1] = \
basisDeriv[iQuad, iBasis, 0]*l22 + basisDeriv[iQuad, iBasis, 1]*l21
elif 1 == self.spaceDim:
B = numpy.zeros( (1, self.spaceDim*self.numBasis),
dtype=numpy.float64)
l11 = 0.0
for kBasis in xrange(self.numBasis):
l11 += basisDeriv[iQuad, kBasis, 0]*disp[kBasis]
for iBasis in xrange(self.numBasis):
B[0, iBasis*self.spaceDim+0] = basisDeriv[iQuad, iBasis, 0]*l11
else:
raise ValueError("Unknown spatial dimension '%d'." % self.spaceDim)
return B
def _calculateBasisDerivMatNonlinear(self, basisDeriv, iQuad):
"""
Calculate matrix of derivatives of basis functions.
"""
B = numpy.zeros( (self.spaceDim*self.spaceDim,
self.spaceDim*self.numBasis),
dtype=numpy.float64)
for iBasis in xrange(self.numBasis):
for iDim in xrange(self.spaceDim):
for jDim in xrange(self.spaceDim):
B[jDim+iDim*self.spaceDim, iBasis*self.spaceDim+iDim] = \
basisDeriv[iQuad, iBasis, jDim]
return B
def _calculateStrain(self, basisDeriv, iQuad, disp):
"""
Calculte Green-Lagrange strain. Shear strains are twice the
Green-Lagrance values for compatibility with computing the strains
using the B matrix in the infinitesimal strain case.
"""
if 3 == self.spaceDim:
strain = numpy.zeros( (1,6), dtype=numpy.float64)
l11 = 0.0
l12 = 0.0
l13 = 0.0
l21 = 0.0
l22 = 0.0
l23 = 0.0
l31 = 0.0
l32 = 0.0
l33 = 0.0
for kBasis in xrange(self.numBasis):
l11 += basisDeriv[iQuad, kBasis, 0]*disp[kBasis*self.spaceDim ]
l12 += basisDeriv[iQuad, kBasis, 1]*disp[kBasis*self.spaceDim ]
l13 += basisDeriv[iQuad, kBasis, 2]*disp[kBasis*self.spaceDim ]
l21 += basisDeriv[iQuad, kBasis, 0]*disp[kBasis*self.spaceDim+1]
l22 += basisDeriv[iQuad, kBasis, 1]*disp[kBasis*self.spaceDim+1]
l23 += basisDeriv[iQuad, kBasis, 2]*disp[kBasis*self.spaceDim+1]
l31 += basisDeriv[iQuad, kBasis, 0]*disp[kBasis*self.spaceDim+2]
l32 += basisDeriv[iQuad, kBasis, 1]*disp[kBasis*self.spaceDim+2]
l33 += basisDeriv[iQuad, kBasis, 2]*disp[kBasis*self.spaceDim+2]
strain[0, 0] = 0.5*(l11*l11 + l21*l21 + l31*l31)
strain[0, 1] = 0.5*(l12*l12 + l22*l22 + l32*l32)
strain[0, 2] = 0.5*(l13*l13 + l23*l23 + l33*l33)
strain[0, 3] = (l11*l12 + l21*l22 + l31*l32) # Use 2*e12 (D has mu)
strain[0, 4] = (l12*l13 + l22*l23 + l32*l33)
strain[0, 5] = (l11*l13 + l21*l23 + l31*l33)
for iBasis in xrange(self.numBasis):
strain[0, 0] += \
basisDeriv[iQuad, iBasis, 0]*disp[iBasis*self.spaceDim ]
strain[0, 1] += \
basisDeriv[iQuad, iBasis, 1]*disp[iBasis*self.spaceDim+1]
strain[0, 2] += \
basisDeriv[iQuad, iBasis, 2]*disp[iBasis*self.spaceDim+2]
strain[0, 3] += \
(basisDeriv[iQuad, iBasis, 0]*disp[iBasis*self.spaceDim+1] +
basisDeriv[iQuad, iBasis, 1]*disp[iBasis*self.spaceDim ])
strain[0, 4] += \
(basisDeriv[iQuad, iBasis, 1]*disp[iBasis*self.spaceDim+2] +
basisDeriv[iQuad, iBasis, 2]*disp[iBasis*self.spaceDim+1])
strain[0, 5] += \
(basisDeriv[iQuad, iBasis, 0]*disp[iBasis*self.spaceDim+2] +
basisDeriv[iQuad, iBasis, 2]*disp[iBasis*self.spaceDim ])
elif 2 == self.spaceDim:
strain = numpy.zeros( (1,3), dtype=numpy.float64)
l11 = 0.0
l12 = 0.0
l21 = 0.0
l22 = 0.0
for kBasis in xrange(self.numBasis):
l11 += basisDeriv[iQuad, kBasis, 0]*disp[kBasis*self.spaceDim ]
l12 += basisDeriv[iQuad, kBasis, 1]*disp[kBasis*self.spaceDim ]
l21 += basisDeriv[iQuad, kBasis, 0]*disp[kBasis*self.spaceDim+1]
l22 += basisDeriv[iQuad, kBasis, 1]*disp[kBasis*self.spaceDim+1]
strain[0, 0] = 0.5*(l11*l11 + l21*l21)
strain[0, 1] = 0.5*(l12*l12 + l22*l22)
strain[0, 2] = (l11*l12 + l21*l22) # Use 2*e12 (D has mu, not 2*mu)
for iBasis in xrange(self.numBasis):
strain[0, 0] += \
basisDeriv[iQuad, iBasis, 0]*disp[iBasis*self.spaceDim ]
strain[0, 1] += \
basisDeriv[iQuad, iBasis, 1]*disp[iBasis*self.spaceDim+1]
strain[0, 2] += \
(basisDeriv[iQuad, iBasis, 0]*disp[iBasis*self.spaceDim+1] +
basisDeriv[iQuad, iBasis, 1]*disp[iBasis*self.spaceDim ])
elif 1 == self.spaceDim:
strain = numpy.zeros( (1,1), dtype=numpy.float64)
l11 = 0.0
for kBasis in xrange(self.numBasis):
l11 += basisDeriv[iQuad, kBasis, 0]*disp[kBasis]
strain[0, 0] = 0.5*l11*l11
for iBasis in xrange(self.numBasis):
strain[0, 0] += basisDeriv[iQuad, iBasis, 0]*disp[iBasis]
else:
raise ValueError("Unknown spatial dimension '%d'." % self.spaceDim)
return strain
def _calculateStress(self, strain, D):
"""
Calculte 2nd Priola-Kirchoff stress matrix.
"""
S = numpy.zeros( (self.spaceDim*self.spaceDim,
self.spaceDim*self.spaceDim), dtype=numpy.float64)
Svec = numpy.dot(D, strain.transpose())
if 3 == self.spaceDim:
Smat = numpy.array([[Svec[0,0], Svec[3,0], Svec[5,0]],
[Svec[3,0], Svec[1,0], Svec[4,0]],
[Svec[5,0], Svec[4,0], Svec[2,0]]],
dtype=numpy.float64)
S[0:3,0:3] = Smat[:]
S[3:6,3:6] = Smat[:]
S[6:9,6:9] = Smat[:]
elif 2 == self.spaceDim:
Smat = numpy.array([[Svec[0,0], Svec[2,0]],
[Svec[2,0], Svec[1,0]]], dtype=numpy.float64)
S[0:2,0:2] = Smat[:]
S[2:4,2:4] = Smat[:]
elif 1 == self.spaceDim:
Smat = numpy.array([[Svec[0]]], dtype=numpy.float64)
S[0:1,0:1] = Smat[:]
else:
raise ValueError("Unknown spatial dimension '%d'." % self.spaceDim)
return S
# MAIN /////////////////////////////////////////////////////////////////
if __name__ == "__main__":
app = ElasticityLgDeformApp()
app.run()
# End of file
```
#### File: pytests/bc/TestDirichletBC.py
```python
import unittest
from pylith.bc.DirichletBC import DirichletBC
# ----------------------------------------------------------------------
class TestDirichletBC(unittest.TestCase):
"""
Unit testing of DirichletBC object.
"""
def test_constructor(self):
"""
Test constructor.
"""
from pylith.bc.DirichletBC import DirichletBC
bc = DirichletBC()
return
def test_configure(self):
"""
Test constructor.
"""
from spatialdata.spatialdb.SimpleDB import SimpleDB
db = SimpleDB()
db.inventory.label = "simple database"
db._configure()
from spatialdata.spatialdb.TimeHistory import TimeHistory
th = TimeHistory()
th._configure()
from pylith.bc.DirichletBC import DirichletBC
bc = DirichletBC()
bc.inventory.label = "abc"
bc.inventory.dbInitial = db
bc.inventory.dbRate = db
bc.inventory.dbChange = db
bc.inventory.thChange = th
bc._configure()
return
def test_implementsConstraint(self):
"""
Test to make sure DirichletBC satisfies constraint requirements.
"""
bc = DirichletBC()
from pylith.feassemble.Constraint import implementsConstraint
self.failUnless(implementsConstraint(bc))
return
def test_initialize(self):
"""
Test initialize().
WARNING: This is not a rigorous test of initialize() because we
don't verify the results.
"""
(mesh, bc, field) = self._initialize()
# We should really add something here to check to make sure things
# actually initialized correctly
return
def test_numDimConstrained(self):
"""
Test numDimConstrained().
"""
(mesh, bc, field) = self._initialize()
self.assertEqual(1, bc.numDimConstrained())
return
def test_verifyConfiguration(self):
"""
Test verifyConfiguration().
WARNING: This is not a rigorous test of verifyConfiguration() because we
don't verify the results.
"""
(mesh, bc, field) = self._initialize()
field.allocate()
bc.verifyConfiguration()
# We should really add something here to check to make sure things
# actually initialized correctly
return
def test_setConstraintSizes(self):
"""
Test setConstraintSizes().
WARNING: This is not a rigorous test of setConstraintSizes() because we
don't verify the results.
"""
(mesh, bc, field) = self._initialize()
bc.setConstraintSizes(field)
# We should really add something here to check to make sure things
# actually initialized correctly
return
def test_setConstraints(self):
"""
Test setConstraints().
WARNING: This is not a rigorous test of setConstraints() because we
don't verify the results.
"""
(mesh, bc, field) = self._initialize()
bc.setConstraintSizes(field)
field.allocate()
bc.setConstraints(field)
# We should really add something here to check to make sure things
# actually initialized correctly
return
def test_setField(self):
"""
Test setField().
WARNING: This is not a rigorous test of setField() because we
don't verify the results.
"""
(mesh, bc, field) = self._initialize()
bc.setConstraintSizes(field)
field.allocate()
bc.setConstraints(field)
t = 1.0
bc.setField(t, field)
# We should really add something here to check to make sure things
# actually initialized correctly
return
def test_setFieldIncr(self):
"""
Test setFieldIncr().
WARNING: This is not a rigorous test of setField() because we
don't verify the results.
"""
(mesh, bc, field) = self._initialize()
bc.setConstraintSizes(field)
field.allocate()
bc.setConstraints(field)
t0 = 1.0
t1 = 2.0
bc.setFieldIncr(t0, t1, field)
# We should really add something here to check to make sure things
# actually initialized correctly
return
def test_finalize(self):
"""
Test finalize().
WARNING: This is not a rigorous test of finalize() because we
neither set the input fields or verify the results.
"""
(mesh, bc, field) = self._initialize()
bc.finalize()
# We should really add something here to check to make sure things
# actually initialized correctly.
return
def test_factory(self):
"""
Test factory method.
"""
from pylith.bc.DirichletBC import boundary_condition
bc = boundary_condition()
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _initialize(self):
"""
Initialize DirichletBC boundary condition.
"""
from spatialdata.spatialdb.SimpleDB import SimpleDB
dbInitial = SimpleDB()
dbInitial.inventory.label = "TestDirichletBC tri3"
dbInitial.inventory.iohandler.inventory.filename = "data/tri3_disp.spatialdb"
dbInitial.inventory.iohandler._configure()
dbInitial._configure()
from pylith.bc.DirichletBC import DirichletBC
bc = DirichletBC()
bc.inventory.label = "bc"
bc.inventory.bcDOF = [1]
bc.inventory.dbInitial = dbInitial
bc._configure()
from spatialdata.geocoords.CSCart import CSCart
cs = CSCart()
cs.inventory.spaceDim = 2
cs._configure()
from spatialdata.units.Nondimensional import Nondimensional
normalizer = Nondimensional()
normalizer._configure()
from pylith.meshio.MeshIOAscii import MeshIOAscii
importer = MeshIOAscii()
importer.inventory.filename = "data/tri3.mesh"
importer.inventory.coordsys = cs
importer._configure()
mesh = importer.read(debug=False, interpolate=False)
bc.preinitialize(mesh)
bc.initialize(totalTime=0.0, numTimeSteps=1, normalizer=normalizer)
# Setup field
from pylith.topology.Field import Field
field = Field(mesh)
field.newSection(field.VERTICES_FIELD, cs.spaceDim())
return (mesh, bc, field)
# End of file
```
#### File: pytests/bc/TestNeumann.py
```python
import unittest
from pylith.bc.Neumann import Neumann
# ----------------------------------------------------------------------
class TestNeumann(unittest.TestCase):
"""
Unit testing of Neumann object.
"""
def test_implementsIntegrator(self):
"""
Test to make sure Neumann satisfies constraint requirements.
"""
bc = Neumann()
from pylith.feassemble.Integrator import implementsIntegrator
self.failUnless(implementsIntegrator(bc))
return
def test_constructor(self):
"""
Test constructor.
"""
from pylith.bc.Neumann import Neumann
bc = Neumann()
return
def test_initialize(self):
"""
Test initialize().
WARNING: This is not a rigorous test of initialize() because we
don't verify the results.
"""
(mesh, bc, fields) = self._initialize()
# No testing of result.
return
def test_timeStep(self):
"""
Test timeStep().
"""
(mesh, bc, fields) = self._initialize()
dt = 0.25
bc.timeStep(dt)
return
def test_stableTimeStep(self):
"""
Test stableTimeStep().
"""
(mesh, bc, fields) = self._initialize()
from pylith.utils.utils import maxscalar
self.assertAlmostEqual(1.0, bc.stableTimeStep(mesh)/maxscalar(), 7)
return
def test_needNewJacobian(self):
"""
Test needNewJacobian().
"""
(mesh, bc, fields) = self._initialize()
self.assertEqual(True, bc.needNewJacobian())
return
def test_integrateResidual(self):
"""
Test integrateResidual().
WARNING: This is not a rigorous test of integrateResidual() because we
don't verify the results.
"""
(mesh, bc, fields) = self._initialize()
residual = fields.get("residual")
t = 0.02
bc.integrateResidual(residual, t, fields)
# No testing of result.
return
def test_integrateJacobian(self):
"""
Test integrateJacobian().
This does nothing for Neumann BC.
"""
(mesh, bc, fields) = self._initialize()
from pylith.topology.Jacobian import Jacobian
jacobian = Jacobian(fields.solution())
jacobian.zero()
t = 0.24
bc.integrateJacobian(jacobian, t, fields)
self.assertEqual(False, bc.needNewJacobian())
# No testing of result.
return
def test_poststep(self):
"""
Test poststep().
WARNING: This is not a rigorous test of poststep() because we
neither set the input fields or verify the results.
"""
(mesh, bc, fields) = self._initialize()
t = 0.50
dt = 0.1
totalTime = 5
bc.poststep(t, dt, fields)
# No testing of result.
return
def test_finalize(self):
"""
Test finalize().
WARNING: This is not a rigorous test of finalize() because we
neither set the input fields or verify the results.
"""
(mesh, bc, fields) = self._initialize()
bc.finalize()
# No testing of result.
return
def test_factory(self):
"""
Test factory method.
"""
from pylith.bc.Neumann import boundary_condition
bc = boundary_condition()
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _initialize(self):
"""
Initialize Neumann boundary condition.
"""
from spatialdata.spatialdb.SimpleDB import SimpleDB
db = SimpleDB()
db.inventory.label = "TestNeumann tri3"
db.inventory.iohandler.inventory.filename = "data/tri3_tractions.spatialdb"
db.inventory.iohandler._configure()
db._configure()
from pylith.feassemble.FIATSimplex import FIATSimplex
cell = FIATSimplex()
cell.inventory.dimension = 1
cell.inventory.degree = 1
cell.inventory.order = 1
cell._configure()
from pylith.feassemble.Quadrature import Quadrature
quadrature = Quadrature()
quadrature.inventory.cell = cell
quadrature._configure()
from pylith.bc.Neumann import Neumann
bc = Neumann()
bc.inventory.bcQuadrature = quadrature
bc.inventory.dbInitial = db
bc.inventory.label = "bc"
bc.inventory.output.inventory.writer._configure()
bc.inventory.output._configure()
bc._configure()
from spatialdata.geocoords.CSCart import CSCart
cs = CSCart()
cs.inventory.spaceDim = 2
cs._configure()
from spatialdata.units.Nondimensional import Nondimensional
normalizer = Nondimensional()
normalizer._configure()
from pylith.meshio.MeshIOAscii import MeshIOAscii
importer = MeshIOAscii()
importer.inventory.filename = "data/tri3.mesh"
importer.inventory.coordsys = cs
importer._configure()
mesh = importer.read(debug=False, interpolate=True)
bc.preinitialize(mesh)
bc.initialize(totalTime=0.0, numTimeSteps=1, normalizer=normalizer)
bc.timeStep(0.01)
# Setup fields
from pylith.topology.SolutionFields import SolutionFields
fields = SolutionFields(mesh)
fields.add("residual", "residual")
fields.add("disp(t)", "displacement")
fields.add("dispIncr(t->t+dt)", "displacement")
fields.solutionName("dispIncr(t->t+dt)")
residual = fields.get("residual")
residual.newSection(residual.VERTICES_FIELD, cs.spaceDim())
residual.allocate()
residual.zero()
fields.copyLayout("residual")
return (mesh, bc, fields)
# End of file
```
#### File: pytests/feassemble/TestFIATSimplex.py
```python
import unittest
import numpy
from pylith.feassemble.FIATSimplex import FIATSimplex
from pylith.utils.testarray import test_scalararray
# ----------------------------------------------------------------------
class Tri3(object):
def __init__(self):
"""
Setup tri33 cell.
"""
vertices = numpy.array([[-1.0, -1.0],
[+1.0, -1.0],
[-1.0, +1.0]])
quadPts = numpy.array([ [-1.0/3.0, -1.0/3.0] ])
quadWts = numpy.array( [2.0])
# Compute basis fns and derivatives at quadrature points
basis = numpy.zeros( (1, 3), dtype=numpy.float64)
basisDeriv = numpy.zeros( (1, 3, 2), dtype=numpy.float64)
iQuad = 0
for q in quadPts:
basis[iQuad] = numpy.array([self.N0(q), self.N1(q), self.N2(q)],
dtype=numpy.float64).reshape( (3,) )
deriv = numpy.array([[self.N0p(q), self.N0q(q)],
[self.N1p(q), self.N1q(q)],
[self.N2p(q), self.N2q(q)]])
basisDeriv[iQuad] = deriv.reshape((3, 2))
iQuad += 1
self.cellDim = 2
self.numCorners = len(vertices)
self.numQuadPts = len(quadPts)
self.vertices = vertices
self.quadPts = quadPts
self.quadWts = quadWts
self.basis = basis
self.basisDeriv = basisDeriv
return
def N0(self, p):
return 0.5*(-p[0]-p[1])
def N0p(self, p):
return -0.5
def N0q(self, p):
return -0.5
def N1(self, p):
return 0.5*(1.0+p[0])
def N1p(self, p):
return 0.5
def N1q(self, p):
return 0.0
def N2(self, p):
return 0.5*(1.0+p[1])
def N2p(self, p):
return 0.0
def N2q(self, p):
return 0.5
# ----------------------------------------------------------------------
class Tri3Collocated(Tri3):
def __init__(self):
"""
Setup tri33 cell.
"""
vertices = numpy.array([[-1.0, -1.0],
[+1.0, -1.0],
[-1.0, +1.0]])
quadPts = vertices[:]
quadWts = numpy.array( [2.0/3.0, 2.0/3.0, 2.0/3.0])
# Compute basis fns and derivatives at quadrature points
basis = numpy.zeros( (3, 3), dtype=numpy.float64)
basisDeriv = numpy.zeros( (3, 3, 2), dtype=numpy.float64)
iQuad = 0
for q in quadPts:
basis[iQuad] = numpy.array([self.N0(q), self.N1(q), self.N2(q)],
dtype=numpy.float64).reshape( (3,) )
deriv = numpy.array([[self.N0p(q), self.N0q(q)],
[self.N1p(q), self.N1q(q)],
[self.N2p(q), self.N2q(q)]])
basisDeriv[iQuad] = deriv.reshape((3, 2))
iQuad += 1
self.cellDim = 2
self.numCorners = len(vertices)
self.numQuadPts = len(quadPts)
self.vertices = vertices
self.quadPts = quadPts
self.quadWts = quadWts
self.basis = basis
self.basisDeriv = basisDeriv
return
# ----------------------------------------------------------------------
class Tri6(object):
def __init__(self):
"""
Setup tri33 cell.
"""
vertices = numpy.array([[-1.0, -1.0],
[+1.0, -1.0],
[-1.0, +1.0],
[ 0.0, -1.0],
[ 0.0, 0.0],
[-1.0, 0.0]])
quadPts = numpy.array([ [-0.64288254, -0.68989795],
[-0.84993778, 0.28989795],
[ 0.33278049, -0.68989795],
[-0.43996017, 0.28989795]])
quadWts = numpy.array( [0.63608276, 0.36391724, 0.63608276, 0.36391724])
# Compute basis fns and derivatives at quadrature points
basis = numpy.zeros( (4, 6), dtype=numpy.float64)
basisDeriv = numpy.zeros( (4, 6, 2), dtype=numpy.float64)
iQuad = 0
for q in quadPts:
basis[iQuad] = numpy.array([self.N0(q), self.N1(q), self.N2(q),
self.N3(q), self.N4(q), self.N5(q)],
dtype=numpy.float64).reshape( (6,) )
deriv = numpy.array([[self.N0p(q), self.N0q(q)],
[self.N1p(q), self.N1q(q)],
[self.N2p(q), self.N2q(q)],
[self.N3p(q), self.N3q(q)],
[self.N4p(q), self.N4q(q)],
[self.N5p(q), self.N5q(q)]])
basisDeriv[iQuad] = deriv.reshape((6, 2))
iQuad += 1
self.cellDim = 2
self.numCorners = len(vertices)
self.numQuadPts = len(quadPts)
self.vertices = vertices
self.quadPts = quadPts
self.quadWts = quadWts
self.basis = basis
self.basisDeriv = basisDeriv
return
def N0(self, p):
return 0.5*(-p[0]-p[1])*(-1.0-p[0]-p[1])
def N0p(self, p):
return 0.5+p[0]+p[1]
def N0q(self, p):
return 0.5+p[0]+p[1]
def N1(self, p):
return 0.5*(1.0+p[0])*(p[0])
def N1p(self, p):
return 0.5+p[0]
def N1q(self, p):
return 0
def N2(self, p):
return 0.5*(1.0+p[1])*(p[1])
def N2p(self, p):
return 0
def N2q(self, p):
return 0.5+p[1]
def N3(self, p):
return (-p[0]-p[1])*(1+p[0])
def N3p(self, p):
return -1.0-2*p[0]-p[1]
def N3q(self, p):
return -(1+p[0])
def N4(self, p):
return (1.0+p[0])*(1+p[1])
def N4p(self, p):
return (1+p[1])
def N4q(self, p):
return (1.0+p[0])
def N5(self, p):
return (-p[0]-p[1])*(1+p[1])
def N5p(self, p):
return -(1+p[1])
def N5q(self, p):
return -1.0-p[0]-2*p[1]
# ----------------------------------------------------------------------
class Tet4(object):
def __init__(self):
"""
Setup tri33 cell.
"""
vertices = numpy.array([[+1.0, -1.0, -1.0],
[-1.0, -1.0, -1.0],
[-1.0, +1.0, -1.0],
[-1.0, -1.0, +1.0]])
quadPts = numpy.array([ [-1.0/2.0, -1.0/2.0, -1.0/2.0] ])
quadWts = numpy.array( [4.0/3.0])
# Compute basis fns and derivatives at quadrature points
basis = numpy.zeros( (1, 4), dtype=numpy.float64)
basisDeriv = numpy.zeros( (1, 4, 3), dtype=numpy.float64)
iQuad = 0
for q in quadPts:
basis[iQuad] = numpy.array([self.N1(q), self.N0(q),
self.N2(q), self.N3(q)],
dtype=numpy.float64).reshape( (4,) )
deriv = numpy.array([[self.N1p(q), self.N1q(q), self.N1r(q)],
[self.N0p(q), self.N0q(q), self.N0r(q)],
[self.N2p(q), self.N2q(q), self.N2r(q)],
[self.N3p(q), self.N3q(q), self.N3r(q)]])
basisDeriv[iQuad] = deriv.reshape((4, 3))
iQuad += 1
self.cellDim = 3
self.numCorners = len(vertices)
self.numQuadPts = len(quadPts)
self.vertices = vertices
self.quadPts = quadPts
self.quadWts = quadWts
self.basis = basis
self.basisDeriv = basisDeriv
return
def N0(self, p):
return 0.5*(-1-p[0]-p[1]-p[2])
def N0p(self, p):
return -0.5
def N0q(self, p):
return -0.5
def N0r(self, p):
return -0.5
def N1(self, p):
return 0.5*(1.0+p[0])
def N1p(self, p):
return 0.5
def N1q(self, p):
return 0.0
def N1r(self, p):
return 0.0
def N2(self, p):
return 0.5*(1.0+p[1])
def N2p(self, p):
return 0.0
def N2q(self, p):
return 0.5
def N2r(self, p):
return 0.0
def N3(self, p):
return 0.5*(1.0+p[2])
def N3p(self, p):
return 0.0
def N3q(self, p):
return 0.0
def N3r(self, p):
return 0.5
# ----------------------------------------------------------------------
class Tet4Collocated(Tet4):
def __init__(self):
"""
Setup tri33 cell.
"""
vertices = numpy.array([[+1.0, -1.0, -1.0],
[-1.0, -1.0, -1.0],
[-1.0, +1.0, -1.0],
[-1.0, -1.0, +1.0]])
quadPts = numpy.array([[-1.0, -1.0, -1.0],
[+1.0, -1.0, -1.0],
[-1.0, +1.0, -1.0],
[-1.0, -1.0, +1.0]])
quadWts = numpy.array( [1.0/3.0, 1.0/3.0, 1.0/3.0, 1.0/3.0])
# Compute basis fns and derivatives at quadrature points
basis = numpy.zeros( (4, 4), dtype=numpy.float64)
basisDeriv = numpy.zeros( (4, 4, 3), dtype=numpy.float64)
iQuad = 0
for q in quadPts:
basis[iQuad] = numpy.array([self.N1(q), self.N0(q),
self.N2(q), self.N3(q)],
dtype=numpy.float64).reshape( (4,) )
deriv = numpy.array([[self.N1p(q), self.N1q(q), self.N1r(q)],
[self.N0p(q), self.N0q(q), self.N0r(q)],
[self.N2p(q), self.N2q(q), self.N2r(q)],
[self.N3p(q), self.N3q(q), self.N3r(q)]])
basisDeriv[iQuad] = deriv.reshape((4, 3))
iQuad += 1
self.cellDim = 3
self.numCorners = len(vertices)
self.numQuadPts = len(quadPts)
self.vertices = vertices
self.quadPts = quadPts
self.quadWts = quadWts
self.basis = basis
self.basisDeriv = basisDeriv
return
# ----------------------------------------------------------------------
class TestFIATSimplex(unittest.TestCase):
"""
Unit testing of FIATSimplex object.
"""
def test_shape(self):
"""
Test _getShape().
"""
cell = FIATSimplex()
from FIAT.reference_element import default_simplex
cell.cellDim = 1
shape = cell._getShape()
self.assertEqual(default_simplex(1).get_shape(), shape.get_shape())
cell.cellDim = 2
shape = cell._getShape()
self.assertEqual(default_simplex(2).get_shape(), shape.get_shape())
cell.cellDim = 3
shape = cell._getShape()
self.assertEqual(default_simplex(3).get_shape(), shape.get_shape())
return
def test_initialize_tri3(self):
"""
Test initialize() with tri3 cell.
"""
cell = FIATSimplex()
cell.inventory.dimension = 2
cell.inventory.degree = 1
cell._configure()
cell.initialize(spaceDim=2)
cellE = Tri3()
self._checkVals(cellE, cell)
from pylith.feassemble.CellGeometry import GeometryTri2D
self.failUnless(isinstance(cell.geometry, GeometryTri2D))
return
def test_initialize_tri3_collocated(self):
"""
Test initialize() with tri3 cell.
"""
cell = FIATSimplex()
cell.inventory.dimension = 2
cell.inventory.degree = 1
cell.inventory.collocateQuad = True
cell._configure()
cell.initialize(spaceDim=2)
cellE = Tri3Collocated()
self._checkVals(cellE, cell)
from pylith.feassemble.CellGeometry import GeometryTri2D
self.failUnless(isinstance(cell.geometry, GeometryTri2D))
return
def test_initialize_tri6(self):
"""
Test initialize() with tri6 cell.
"""
cell = FIATSimplex()
cell.inventory.dimension = 2
cell.inventory.degree = 2
cell._configure()
cell.initialize(spaceDim=2)
cellE = Tri6()
self._checkVals(cellE, cell)
from pylith.feassemble.CellGeometry import GeometryTri2D
self.failUnless(isinstance(cell.geometry, GeometryTri2D))
return
def test_initialize_tet4(self):
"""
Test initialize() with tet4 cell.
"""
cell = FIATSimplex()
cell.inventory.dimension = 3
cell.inventory.degree = 1
cell._configure()
cell.initialize(spaceDim=3)
cellE = Tet4()
self._checkVals(cellE, cell)
from pylith.feassemble.CellGeometry import GeometryTet3D
self.failUnless(isinstance(cell.geometry, GeometryTet3D))
return
def test_initialize_tet4_collocated(self):
"""
Test initialize() with tet4 cell.
"""
cell = FIATSimplex()
cell.inventory.dimension = 3
cell.inventory.degree = 1
cell.inventory.collocateQuad = True
cell._configure()
cell.initialize(spaceDim=3)
cellE = Tet4Collocated()
self._checkVals(cellE, cell)
from pylith.feassemble.CellGeometry import GeometryTet3D
self.failUnless(isinstance(cell.geometry, GeometryTet3D))
return
def test_factory(self):
"""
Test factory method.
"""
from pylith.feassemble.FIATSimplex import reference_cell
c = reference_cell()
return
def _checkVals(self, cellE, cell):
"""
Check known values against those generated by FIATSimplex.
"""
# Check basic attributes
self.assertEqual(cellE.cellDim, cell.cellDim)
self.assertEqual(cellE.numCorners, cell.numCorners)
self.assertEqual(cellE.numQuadPts, cell.numQuadPts)
# Check arrays
test_scalararray(self, cellE.vertices, cell.vertices)
test_scalararray(self, cellE.quadPts, cell.quadPts)
test_scalararray(self, cellE.quadWts, cell.quadWts)
test_scalararray(self, cellE.basis, cell.basis)
test_scalararray(self, cellE.basisDeriv, cell.basisDeriv)
return
# End of file
```
#### File: pytests/friction/TestSlipWeakening.py
```python
import unittest
from pylith.friction.SlipWeakening import SlipWeakening
# ----------------------------------------------------------------------
class TestSlipWeakening(unittest.TestCase):
"""
Unit testing of SlipWeakening object.
"""
def setUp(self):
"""
Setup test subject.
"""
self.friction = SlipWeakening()
return
def test_constructor(self):
"""
Test constructor.
"""
return
def test_factory(self):
"""
Test factory method.
"""
from pylith.friction.SlipWeakening import friction_model
m = friction_model()
return
# End of file
```
#### File: pytests/materials/TestMaterial.py
```python
import unittest
# ----------------------------------------------------------------------
class TestMaterial(unittest.TestCase):
"""
Unit testing of Material object.
"""
def setUp(self):
"""
Setup test subject.
"""
from pylith.materials.ElasticPlaneStrain import ElasticPlaneStrain
self.material = ElasticPlaneStrain()
return
def testId(self):
"""
Test id().
"""
id = 1234
self.material.id(id)
self.assertEqual(id, self.material.id())
return
def testLabel(self):
"""
Test label().
"""
label = "material abc"
self.material.label(label)
self.assertEqual(label, self.material.label())
return
def testTimeStep(self):
"""
Test timeStep().
"""
dt = 0.5
self.material.timeStep(dt)
self.assertEqual(dt, self.material.timeStep())
return
def testDBProperties(self):
"""
Test dbProperties().
"""
from spatialdata.spatialdb.SimpleDB import SimpleDB
from spatialdata.spatialdb.SimpleIOAscii import SimpleIOAscii
iohandler = SimpleIOAscii()
iohandler.inventory.filename = "data/matinitialize.spatialdb"
iohandler._configure()
db = SimpleDB()
db.inventory.label = "material properties"
db.inventory.iohandler = iohandler
db._configure()
self.material.dbProperties(db)
# No test of result.
return
def testDBInitialState(self):
"""
Test dbInitialState().
"""
from spatialdata.spatialdb.SimpleDB import SimpleDB
from spatialdata.spatialdb.SimpleIOAscii import SimpleIOAscii
iohandler = SimpleIOAscii()
iohandler.inventory.filename = "data/matinitialize.spatialdb"
iohandler._configure()
db = SimpleDB()
db.inventory.label = "material properties"
db.inventory.iohandler = iohandler
db._configure()
self.material.dbInitialState(db)
# No test of result.
return
def testNormalizer(self):
"""
Test normalizer().
"""
from spatialdata.units.Nondimensional import Nondimensional
normalizer = Nondimensional()
normalizer._configure()
self.material.normalizer(normalizer)
# No test of result.
return
def testIsJacobianSymmetric(self):
"""
Test isJacobianSymmetric().
"""
# Default should be True.
self.failUnless(self.material.isJacobianSymmetric())
return
def test_preinitialize(self):
"""
Test preinitialize().
WARNING: This is not a rigorous test of initialize() because we
don't verify the results.
"""
from pylith.feassemble.FIATSimplex import FIATSimplex
cell = FIATSimplex()
cell.inventory.dimension = 2
cell.inventory.order = 1
cell.inventory.degree = 1
cell._configure()
from pylith.feassemble.Quadrature import Quadrature
quadrature = Quadrature()
quadrature.inventory.cell = cell
quadrature.inventory.minJacobian = 1.0e-4
quadrature._configure()
from spatialdata.spatialdb.SimpleDB import SimpleDB
from spatialdata.spatialdb.SimpleIOAscii import SimpleIOAscii
iohandler = SimpleIOAscii()
iohandler.inventory.filename = "data/matinitialize.spatialdb"
iohandler._configure()
db = SimpleDB()
db.inventory.label = "material properties"
db.inventory.iohandler = iohandler
db._configure()
from pylith.materials.ElasticPlaneStrain import ElasticPlaneStrain
material = ElasticPlaneStrain()
material.inventory.quadrature = quadrature
material.inventory.dbProperties = db
material.inventory.label = "my material"
material.inventory.id = 54
material._configure()
from spatialdata.geocoords.CSCart import CSCart
cs = CSCart()
cs.inventory.spaceDim = 2
cs._configure()
from spatialdata.units.Nondimensional import Nondimensional
normalizer = Nondimensional()
normalizer._configure()
from pylith.meshio.MeshIOAscii import MeshIOAscii
importer = MeshIOAscii()
importer.inventory.filename = "data/twoelems.mesh"
importer.inventory.coordsys = cs
importer._configure()
mesh = importer.read(debug=False, interpolate=False)
material.preinitialize(mesh)
# No test of result.
return
# End of file
```
#### File: pytests/meshio/testmeshio.py
```python
from pylith.tests.UnitTestApp import UnitTestApp
import unittest
class TestApp(UnitTestApp):
"""
Test application.
"""
def __init__(self):
"""
Constructor.
"""
UnitTestApp.__init__(self)
return
def _suite(self):
"""
Setup the test suite.
"""
suite = unittest.TestSuite()
from TestMeshIOAscii import TestMeshIOAscii
suite.addTest(unittest.makeSuite(TestMeshIOAscii))
from TestMeshIOLagrit import TestMeshIOLagrit
suite.addTest(unittest.makeSuite(TestMeshIOLagrit))
from TestVertexFilterVecNorm import TestVertexFilterVecNorm
suite.addTest(unittest.makeSuite(TestVertexFilterVecNorm))
from TestCellFilterAvg import TestCellFilterAvg
suite.addTest(unittest.makeSuite(TestCellFilterAvg))
from TestDataWriterVTK import TestDataWriterVTK
suite.addTest(unittest.makeSuite(TestDataWriterVTK))
from TestOutputManagerMesh import TestOutputManagerMesh
suite.addTest(unittest.makeSuite(TestOutputManagerMesh))
from TestOutputManagerSubMesh import TestOutputManagerSubMesh
suite.addTest(unittest.makeSuite(TestOutputManagerSubMesh))
from TestOutputSolnSubset import TestOutputSolnSubset
suite.addTest(unittest.makeSuite(TestOutputSolnSubset))
from TestOutputSolnPoints import TestOutputSolnPoints
suite.addTest(unittest.makeSuite(TestOutputSolnPoints))
from TestSingleOutput import TestSingleOutput
suite.addTest(unittest.makeSuite(TestSingleOutput))
#TestOutputNeumann
#TestOutputFaultKin
#TestOutputDirichlet
return suite
# ----------------------------------------------------------------------
if __name__ == '__main__':
app = TestApp()
app.run()
# End of file
```
#### File: pytests/meshio/TestXdmf.py
```python
import unittest
from pylith.meshio.Xdmf import Xdmf
# ----------------------------------------------------------------------
class TestXdmf(unittest.TestCase):
"""
Unit testing of Python Xdmf object.
"""
def test_constructor(self):
"""
Test constructor.
"""
xdmf = Xdmf()
return
def test_write(self):
files = [
"data/tri3.h5",
"data/tri3_vertex.h5",
"data/tri3_cell.h5",
"data/tri3_points.h5",
"data/tri3_points_vertex.h5",
"data/tri3_surf.h5",
"data/tri3_surf_vertex.h5",
"data/tri3_surf_cell.h5",
"data/quad4.h5",
"data/quad4_vertex.h5",
"data/quad4_cell.h5",
"data/quad4_points.h5",
"data/quad4_points_vertex.h5",
"data/quad4_surf.h5",
"data/quad4_surf_vertex.h5",
"data/quad4_surf_cell.h5",
"data/tet4.h5",
"data/tet4_vertex.h5",
"data/tet4_cell.h5",
"data/tet4_points.h5",
"data/tet4_points_vertex.h5",
"data/tet4_surf.h5",
"data/tet4_surf_vertex.h5",
"data/tet4_surf_cell.h5",
"data/hex8.h5",
"data/hex8_vertex.h5",
"data/hex8_cell.h5",
"data/hex8_points.h5",
"data/hex8_points_vertex.h5",
"data/hex8_surf.h5",
"data/hex8_surf_vertex.h5",
"data/hex8_surf_cell.h5",
]
import os
xdmf = Xdmf()
for filenameH5 in files:
filenameXdmf = os.path.split(filenameH5)[-1].replace(".h5", ".xmf")
xdmf.write(filenameH5, filenameXdmf, verbose=False)
filenameXdmfE = "data/" + filenameXdmf
self._check(filenameXdmfE, filenameXdmf)
def _check(self, filenameE, filename):
fin = open(filenameE, "r")
linesE = fin.readlines()
fin.close()
fin = open(filename, "r")
lines = fin.readlines()
fin.close()
self.assertEqual(len(linesE), len(lines), "Number of lines for Xdmf file '%s' doesn't match." % filename)
numLines = len(linesE)
for i in xrange(numLines):
self.assertEqual(linesE[i], lines[i], "Line %d of file '%s' doesn't match." % (i, filename))
return
# End of file
```
#### File: pytests/mpi/TestCommunicator.py
```python
import unittest
import pylith.mpi.Communicator as mpicomm
# ----------------------------------------------------------------------
class TestCommunicator(unittest.TestCase):
"""
Unit testing of Communicator object.
"""
def test_petsc_comm_world(self):
"""
Test petsc_comm_world().
"""
comm = mpicomm.petsc_comm_world()
return
def test_petsc_comm_self(self):
"""
Test petsc_comm_self().
"""
comm = mpicomm.petsc_comm_self()
return
def test_mpi_comm_world(self):
"""
Test mpi_comm_world().
"""
comm = mpicomm.mpi_comm_world()
return
def test_mpi_comm_self(self):
"""
Test mpi_comm_self().
"""
comm = mpicomm.mpi_comm_self()
return
def test_rank(self):
"""
Test Communicator.rank().
"""
comm = mpicomm.petsc_comm_world()
self.assertEqual(0, comm.rank)
return
def test_size(self):
"""
Test Communicator.size().
"""
comm = mpicomm.petsc_comm_world()
self.assertEqual(1, comm.size)
return
def test_barrier(self):
"""
Test Communicator.barrier().
"""
comm = mpicomm.petsc_comm_world()
comm.barrier()
return
# End of file
```
#### File: pytests/topology/TestMeshField.py
```python
import unittest
from pylith.topology.Field import Field
# ----------------------------------------------------------------------
class TestMeshField(unittest.TestCase):
"""
Unit testing of Field object.
"""
def setUp(self):
"""
Setup mesh and associated field.
"""
from spatialdata.geocoords.CSCart import CSCart
cs = CSCart()
cs.inventory.spaceDim = 2
cs._configure()
from spatialdata.units.Nondimensional import Nondimensional
normalizer = Nondimensional()
normalizer._configure()
from pylith.meshio.MeshIOAscii import MeshIOAscii
importer = MeshIOAscii()
importer.inventory.filename = "data/tri3.mesh"
importer.inventory.coordsys = cs
importer._configure()
self.mesh = importer.read(debug=False, interpolate=False)
self.field = Field(self.mesh)
self.field.allocate()
return
def test_constructorA(self):
"""
Test constructor.
"""
return
def test_mesh(self):
"""
Test mesh().
"""
mesh = self.field.mesh()
self.assertEqual(2, mesh.dimension())
return
def test_label(self):
"""
Test label().
"""
label = "field A"
self.field.label(label)
self.assertEqual(label, self.field.label())
return
def test_vectorFieldType(self):
"""
Test vectorFieldType().
"""
fieldType = Field.MULTI_SCALAR
self.field.vectorFieldType(fieldType)
self.assertEqual(fieldType, self.field.vectorFieldType())
return
def test_scale(self):
"""
Test scale().
"""
scale = 2.0
self.field.scale(scale)
self.assertEqual(scale, self.field.scale())
return
def test_dimensionalizeOkay(self):
"""
Test dimensionalizeOkay().
"""
self.assertEqual(False, self.field.dimensionalizeOkay())
self.field.dimensionalizeOkay(True)
self.assertEqual(True, self.field.dimensionalizeOkay())
return
def test_spaceDim(self):
"""
Test spaceDim().
"""
self.assertEqual(2, self.field.spaceDim())
return
def test_newSectionDomain(self):
"""
Test newSection(domain).
"""
self.field.newSection(Field.VERTICES_FIELD, 4)
# No test of result
return
def test_cloneSectionField(self):
"""
Test newSection(field).
"""
fieldB = Field(self.mesh)
fieldB.cloneSection(self.field)
# No test of result
return
def test_operatorAdd(self):
"""
Test add().
"""
fieldB = Field(self.mesh)
fieldB.allocate()
self.field.add(fieldB)
# No test of result
return
def test_copy(self):
"""
Test newSection(field).
"""
fieldB = Field(self.mesh)
fieldB.allocate()
fieldB.copy(self.field)
# No test of result
return
# End of file
```
#### File: pytests/topology/TestRefineUniform.py
```python
import unittest
from pylith.topology.RefineUniform import RefineUniform
# ----------------------------------------------------------------------
class TestRefineUniform(unittest.TestCase):
"""
Unit testing of Python RefineUniform object.
"""
def test_constructor(self):
"""
Test constructor.
"""
io = RefineUniform()
return
def test_refineTet4NoFault(self):
"""
Test refine().
"""
filenameIn = "data/twotet4.mesh"
filenameOut = "data/twotet4_test.mesh"
filenameOutE = "data/twotet4_nofault_refined2.mesh"
self._runTest(filenameIn, filenameOut, filenameOutE)
return
def test_refineTet4Fault(self):
"""
Test refine().
"""
filenameIn = "data/twotet4.mesh"
filenameOut = "data/twotet4_test.mesh"
filenameOutE = "data/twotet4_fault_refined2.mesh"
self._runTest(filenameIn, filenameOut, filenameOutE, "fault")
return
def test_factory(self):
"""
Test factory method.
"""
from pylith.topology.RefineUniform import mesh_refiner
refiner = mesh_refiner()
return
def _runTest(self, filenameIn, filenameOut, filenameOutE, faultGroup=None):
from spatialdata.geocoords.CSCart import CSCart
cs = CSCart()
cs._configure()
from pylith.meshio.MeshIOAscii import MeshIOAscii
io = MeshIOAscii()
io.inventory.filename = filenameIn
io.inventory.coordsys = cs
io._configure()
mesh = io.read(debug=False, interpolate=True)
if not faultGroup is None:
from pylith.faults.FaultCohesiveKin import FaultCohesiveKin
fault = FaultCohesiveKin()
fault.inventory.matId = 10
fault.inventory.faultLabel = faultGroup
fault._configure()
nvertices = fault.numVerticesNoMesh(mesh)
firstFaultVertex = 0
firstLagrangeVertex = nvertices
firstFaultCell = 2*nvertices
fault.adjustTopology(mesh,
firstFaultVertex,
firstLagrangeVertex,
firstFaultCell)
from pylith.topology.RefineUniform import RefineUniform
refiner = RefineUniform()
meshRefined = refiner.refine(mesh)
return
# End of file
``` |
{
"source": "joegenius98/AlphaGoLite",
"score": 3
} |
#### File: AlphaGoLite/algorithms/neuralnet.py
```python
from tensorflow.keras import Input, Model
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from keras.initializers import glorot_uniform
import numpy as np
from keras.optimizers import *
import random
def residual_layer(X, filter_size):
# Save the input value. You'll need this later to add back to the main path.
X_skip_connection = X
# First component
X = Conv2D(filters=256, kernel_size=filter_size, strides=(1, 1), kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3)(X)
X = Activation('relu')(X)
# Second component of main path same as first
X = Conv2D(filters=256, kernel_size=filter_size, strides=(1, 1), kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3)(X)
X = Activation('relu')(X)
X_skip_connection = Conv2D(filters=256, kernel_size=filter_size, strides=(1, 1), kernel_initializer=glorot_uniform(seed=0))(X_skip_connection)
X_skip_connection = Conv2D(filters=256, kernel_size=filter_size, strides=(1, 1), kernel_initializer=glorot_uniform(seed=0))(X_skip_connection)
# Final step: Add skip_connection value to main path, and pass it through a RELU activation
X = Add()([X, X_skip_connection])
X = Activation('relu')(X)
return X
def convolutional_layer(X):
# First component
X = Conv2D(filters=256, kernel_size=(1, 1), strides=(1, 1), kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3)(X)
X = Activation('relu')(X)
return X
def value_head(X):
X = Conv2D(filters=1, kernel_size=(1,1), strides=(1, 1), kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3)(X)
X = Activation('relu')(X)
X = Dense(256)(X)
X = Activation('relu')(X)
X = Dense(1)(X)
X = Activation('tanh')(X)
return X
def policy_head(X):
X = Conv2D(filters=1, kernel_size=(1, 1), strides=(1, 1), kernel_initializer=glorot_uniform(seed=0))(X)
X = Conv2D(filters=1, kernel_size=(1, 1), strides=(1, 1), kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3)(X)
X = Activation('relu')(X)
#19x19+1 -> all possible moves + pass
X = Dense(362)(X)
return X
def AlphaGoLite_Network():
inputs = Input(shape=(19,19,17))
# 1 convolutional layer
X = convolutional_layer(inputs)
# 5 residual layers instead of 40
X=residual_layer(X, (3,3))
X=residual_layer(X, (3,3))
X=residual_layer(X, (3,3))
X=residual_layer(X, (3,3))
X=residual_layer(X, (2,2))
#value head
value_head_output = value_head(X)
#policy head
policy_head_output = policy_head(X)
X = Model(inputs=inputs, outputs=[value_head_output, policy_head_output])
return X
#tester code with fake input
if __name__ =="__main__":
nn= AlphaGo_Zero_Network()
rsmprop = RMSprop(clipvalue=0.5)
nn.compile(loss=['binary_crossentropy', 'categorical_crossentropy'], optimizer='rmsprop')
x=[[[[random.choice([1,0]) for x in range(17)] for j in range(19)] for k in range(19)] for t in range(20)]
x = np.array(x, dtype="float32")
y=[[random.choice([1,0])] for x in range(20)]
y = np.array(y, dtype="float32")
z=[[[[random.choice([1,0]) for i in range(362)]]] for x in range(20)]
z = np.array(z, dtype="float32")
# model.predict(x)
history = nn.fit(x, [y, z], epochs=1, batch_size=1)
``` |
{
"source": "JoeGermuska/census-data-downloader",
"score": 2
} |
#### File: census-data-downloader/census_data_downloader/__init__.py
```python
import jinja2
import logging
import pathlib
from .tables import TABLE_LIST
logger = logging.getLogger(__name__)
def download_everything(*args, **kwargs):
"""
Download all the data.
"""
logger.debug("Downloading all datasets for all geographies")
for klass in TABLE_LIST:
obj = klass(*args, **kwargs)
logger.debug(f"Downloading {klass.PROCESSED_TABLE_NAME} dataset")
obj.download_everything()
def inventory_everything(docs_dir):
"""
Create markdown files describing all of the data tables.
"""
docs_path = pathlib.Path(docs_dir)
if not docs_path.exists():
docs_path.mkdir()
template_loader = jinja2.FileSystemLoader([pathlib.Path(__file__).parent.joinpath("templates")])
template_env = jinja2.Environment(loader=template_loader)
list_template = template_env.get_template("table_list.md")
with open(docs_path.joinpath("TABLES.md"), 'w') as f:
f.write(list_template.render(object_list=TABLE_LIST))
__all__ = ("TABLE_LIST", "download_everything")
``` |
{
"source": "joeghodsi/interview-questions",
"score": 4
} |
#### File: joeghodsi/interview-questions/balanced_parens.py
```python
def balanced_parens(str):
'''
runtime: O(n)
space : O(1)
'''
if str is None:
return True
open_count = 0
for char in str:
if char == '(':
open_count += 1
elif char == ')':
open_count -= 1
if open_count < 0:
return False
return open_count == 0
```
#### File: cracking-the-coding-interview/ch1-arrays-and-strings/1.5-string-compression.py
```python
def compress_string(string):
if not isinstance(string, basestring):
return None # in prod, I'd let this raise instead of fail silently and assume result
if len(string) == 0:
return string
compressed = string[0]
count = 1
previous_char = None
for i in xrange(len(string)):
if string[i] == previous_char:
count += 1
elif previous_char is not None:
compressed += str(count) + string[i]
count = 1
previous_char = string[i]
compressed += str(count)
if len(compressed) > len(string):
return string
return compressed
print compress_string('abbbccccaa')
print compress_string('abcc')
print compress_string('a')
print compress_string('aa')
print compress_string('a2')
print compress_string('')
print compress_string(None)
```
#### File: cracking-the-coding-interview/ch5-bit-manipulation/5.3-same-1s-count.py
```python
def smaller_and_larger_with_same_1s(num):
def _1s_count(n):
mask = 1
count = 0
while mask > 0:
if n & mask != 0:
count += 1
mask = mask << 1
return count
num_1s = _1s_count(num)
next_greater = None
next_smaller = None
i = num
while True:
i += 1
if _1s_count(i) == num_1s:
next_greater = i
break
i = num
while True:
i -= 1
if _1s_count(i) == num_1s:
next_smaller = i
break
return next_greater, next_smaller
```
#### File: cracking-the-coding-interview/ch8-object-oriented-design/8.2-call-center.py
```python
from queue import Queue
class CallCenter:
def __init__(self):
self.active_calls = []
self.waiting_calls = Queue()
self.respondents = []
self.free_respondents = Queue()
self.managers = []
self.directors = []
def dispatch_call(self, call):
'''dispatches a new call'''
if len(self.free_respondents) == 0:
self.waiting_calls.enqueue(call)
return # all respondents are currently busy, please wait
self._dispatch_call(call)
def escalate(self, call):
'''escalates a call to the next employee level. can be because the employee is busy or
not equipped to handle the call'''
current_employee = call.employee
next_employee = current_employee.boss
if not next_employee.free:
next_employee = next_employee.boss # simplification: assume director is free
call.employee = next_employee
next_employee.free = False
current_employee.free = True
if current_employee.role == Role.respondent:
self.free_respondents.append(current_employee)
def call_end_receiver(self, call):
'''listens for signal that call has ended'''
self.active_calls.remove(call)
call.employee.free = True
def employee_free_receiver(self, employee):
'''listens for signal that employee has become free'''
self.free_respondents.append(employee)
next_call = self.waiting_calls.pop()
if next_call:
self._dispatch_call(next_call)
def _dispatch_call(self, call):
if call.employee:
return # the call is already dispatched
free_respondent = self.free_respondents.pop()
call.employee = free_respondent
free_respondent.free = False
call.start()
self.active_calls.append(call)
class Call:
def __init__(self):
self.waited = 0 # seconds caller has waited
self.call_length = 0 # seconds call lasted once connected
self.caller_info = None # metadata. caller may have entered credit card, phone number, etc
self.employee = None # one-to-one employee taking the call
def start(self):
pass
class Employee:
def __init__(self, role):
self.info = None # metadata. name, address, phone, etc
self.role = role or Role.respondent # role enum: respondent, manager, director
self.free = True # whether they are free to take a call
self.boss = None # the person this employee reports to
class Role:
respondent = 0
manager = 1
director = 2
```
#### File: cracking-the-coding-interview/ch8-object-oriented-design/8.3-jukebox.py
```python
from queue import Queue
import circular_queue
import llist
class Jukebox:
money = 0
playing_song = None
play_queue = Queue()
focused_song = None
focused_album = None
albums = circular_queue()
def __init__(self):
'''add all albums with all songs and init focused_* to first respective element'''
pass
def add_money(self, money):
self.money += money
def can_queue(self, song):
return self.money >= song.cost
def queue_song(self, song):
if self.can_queue(song):
self.play_queue.enqueue(song)
self.money -= song.cost
def next_album(self):
self.focused_album = self.focused_album.next()
def next_song(self):
next_song = self.focused_song.next()
if not next_song:
self.next_album()
next_song = self.focused_album.songs[0]
self.focused_song = next_song
def song_end_receiver(self, song):
self.play_queue.remove(song)
self.playing_song = self.play_queue.dequeue()
class Album:
songs = llist()
metadata = None # title, length, artist, etc
class Song:
album = None
cost = 0.75 # some songs might cost more?
metadata = None # title, length, artist, etc
```
#### File: joeghodsi/interview-questions/find_smallest_missing_pos_int.py
```python
def find_smallest_missing_pos_int_sort(array):
'''
runtime: O(nlogn)
space : O(1) - this depends on the sort but python's list.sort() performs an in-place sort
This sort-based solution is slow but requires no additional space, given the right sorting
algorithm.
'''
array.sort()
n = len(array)
# move to index of first positive integer; if none exist, return 1
index_of_first_pos = 0
for index_of_first_pos in xrange(n):
if array[index_of_first_pos] > 0:
break
current_pos_int = 1
for i in xrange(index_of_first_pos, n):
if array[i] != current_pos_int:
return current_pos_int
current_pos_int += 1
return current_pos_int
def find_smallest_missing_pos_int_set(array):
'''
runtime: O(n)
space : O(n)
This set-based solution is fast and readable but requires linear additional space.
'''
as_set = set(array)
n = len(array)
for x in xrange(1, n + 1):
if x not in as_set:
return x
return n + 1
def find_smallest_missing_pos_int_optimal(array):
'''
runtime: O(n)
space : O(1)
This in-place swap solution runs in linear time and requires constant space. Aside from some
constant-factor performance optimizations, this is the optimal solution.
'''
n = len(array)
def in_bounds(value):
return 1 <= value <= n
# swap integers within 1 to n into their respective cells
# all other values (x < 1, x > n, and repeats) end up in the remaining empty cells
for i in xrange(n):
while in_bounds(array[i]) and array[i] != i + 1 and array[array[i] - 1] != array[i]:
swap_cell = array[i] - 1
array[i], array[swap_cell] = array[swap_cell], array[i]
for i in xrange(n):
if array[i] != i + 1:
return i + 1
return n + 1
```
#### File: joeghodsi/interview-questions/reverse_linked_list.py
```python
class Node(object):
def __init__(self, value=None):
self.value = value
self.next = None
def reverse_linked_list(head):
'''
runtime: O(n)
space : O(1)
'''
prev = None
curr = head
while curr is not None:
next = curr.next
curr.next = prev
prev = curr
curr = next
return prev
```
#### File: interview-questions/skiena/dp-edit-distance.py
```python
MATCH, SWAP, INSERT, DELETE = 1, 2, 3, 4
class DistanceCost:
edit = 0 # MATCH, INSERT, DELETE
cost = 0
def _print_path(distance_cost_matrix):
path = ''
i, j = len(distance_cost_matrix) - 1, len(distance_cost_matrix[0]) - 1
while i != 0 or j != 0:
current = distance_cost_matrix[i][j]
if current.edit == MATCH:
path = 'M' + path
i, j = i - 1, j - 1
elif current.edit == SWAP:
path = 'S' + path
i, j = i - 1, j - 1
elif current.edit == INSERT:
path = 'I' + path
i, j = i - 1, j
elif current.edit == DELETE:
path = 'D' + path
i, j = i, j - 1
print path
def edit_distance(start, end):
start = ' ' + start
end = ' ' + end
distance_cost_matrix = (
[[DistanceCost() for _ in xrange(len(start))] for _ in xrange(len(end))])
for j in xrange(len(start)):
distance_cost_matrix[0][j].cost = j
distance_cost_matrix[0][j].edit = DELETE
for i in xrange(len(end)):
distance_cost_matrix[i][0].cost = i
distance_cost_matrix[i][0].edit = INSERT
for i in xrange(1, len(end)):
for j in xrange(1, len(start)):
current = distance_cost_matrix[i][j]
current.edit = SWAP
current.cost = distance_cost_matrix[i-1][j-1].cost + 1
insert = distance_cost_matrix[i][j-1]
delete = distance_cost_matrix[i-1][j]
if start[j] == end[i]:
current.edit = MATCH
current.cost = distance_cost_matrix[i-1][j-1].cost
if insert.cost + 1 < current.cost:
current.edit = INSERT
current.cost = insert.cost + 1
if delete.cost + 1 < current.cost:
current.edit = DELETE
current.cost = delete.cost + 1
_print_path(distance_cost_matrix)
return distance_cost_matrix[-1][-1].cost
start = 'thou shalt not'
end = 'you should not'
print edit_distance(start, end)
``` |
{
"source": "joegle/hrv-biofeedback",
"score": 3
} |
#### File: python-heart/examples/redis_server.py
```python
import heart
import redis
import numpy as np
#TODO
# Epoch create epoch named file
# opt-parse
class feedback(heart.Heart_Monitor):
def __init__(self,serial_device="/dev/ttyUSB0"):
heart.Heart_Monitor.__init__(self,serial_device)
self.connection = redis.StrictRedis(host='localhost', port=6379, db=0)
def on_beat(self):
self.redis_serve()
def redis_serve(self):
self.connection.set('beat',self.beat_time)
self.connection.set('std10',round(np.std(self.RR_intervals[-10:])))
self.connection.set('std50',round(np.std(self.RR_intervals[-50:])))
self.connection.set('avg10',round(np.average(self.RR_intervals[-10:])))
def main():
session=feedback()
session.start()
if __name__ == "__main__":
main()
``` |
{
"source": "joegog/rock-paper-scissors-lizard-spock",
"score": 4
} |
#### File: joegog/rock-paper-scissors-lizard-spock/menu_starter.py
```python
from rnd_game import RNDGame
from player import Player
from rpsls_game import RockPaperScissorsLizardSpockGame
def enter_username():
while True:
try:
username_input = str(input().strip())
return username_input
break
except:
print("please insert a useful username")
def enter_game_selection():
while True:
game_input = str(input()).strip()
if game_input == '1':
return game_input
break
elif game_input == '2':
return game_input
break
print("Sorry, {0} is not a option, please try again".format(str(game_input)))
print("Please choose a game")
print("Press 1 and Enter for 'Random Number Game'")
print("Press 2 and Enter for 'Rock, Paper, Scissors, Lizard, Spock'")
class MenuStarter:
def __init__(self) -> object:
print(
"Welcome to the first exercise - it consists of 'Random Number Game' und 'Rock, paper, scissors ("
"extended with Spock and Lizard) '")
print("Please enter your nickname")
player1 = Player(enter_username())
print("Welcome " + player1.name + " to the first exercise :)")
print("Select the game you want to play")
print("Select 1 and Enter for 'Random Number Game'")
print("Select 2 und Enter für 'Rock, Paper, Scissors, Lizard, Spock'")
check = enter_game_selection()
if check == '1':
print("'Random Number Game' is your selection")
g = RNDGame(player1.name)
g.start_game()
elif check == '2':
print("'Rock, Paper, Scissors, Lizard, Spock' is your selection")
print("Please enter name of the second player")
player2 = Player(enter_username())
g = RockPaperScissorsLizardSpockGame(player1, player2)
print("OK let's start the game " + player1.name + " vs. " + player2.name + " have fun")
g.start_game(player1, player2)
```
#### File: joegog/rock-paper-scissors-lizard-spock/rnd_game.py
```python
from random import randrange
from termcolor import cprint
class RNDGame():
def __init__(self, username) -> object:
self.username = username
print("Hallo " + self.username + " Welcome to 'Random Number Game'")
print("The goal of the game is that you correctly guess a random number from 1-25. You have 5 rounds to win "
"the game! Otherwise you lose. Let's go")
print("")
self.__random_number = randrange(1, 26, 1)
self.__round = 1
return
def start_game(self):
while self.__round <= 5:
print("this is round number: " + str(self.__round))
print("waiting for your guess.... :)")
my_guess = self.guess()
if self.__random_number == my_guess:
return self.game_won()
elif self.__random_number > my_guess:
print("Hint: the secret number you are looking for is higher than your input")
elif self.__random_number < my_guess:
print("Hint: the secret number you are looking for is lower than your input")
self.__round += 1
self.game_lost()
def guess(self):
while True:
try:
guess_number = int(input())
return guess_number
break
except:
print("please insert only numbers")
def game_won(self):
cprint("you won, awesome", 'green')
def game_lost(self):
cprint("you lost because 5 rounds are over, I'm soo sorry :(", 'red')
``` |
{
"source": "joegomes/BasicSR",
"score": 3
} |
#### File: basicsr/metrics/niqe.py
```python
import cv2
import math
import numpy as np
import os
from scipy.ndimage.filters import convolve
from scipy.special import gamma
from basicsr.metrics.metric_util import reorder_image, to_y_channel
from basicsr.utils.matlab_functions import imresize
from basicsr.utils.registry import METRIC_REGISTRY
def estimate_aggd_param(block):
"""Estimate AGGD (Asymmetric Generalized Gaussian Distribution) paramters.
Args:
block (ndarray): 2D Image block.
Returns:
tuple: alpha (float), beta_l (float) and beta_r (float) for the AGGD
distribution (Estimating the parames in Equation 7 in the paper).
"""
block = block.flatten()
gam = np.arange(0.2, 10.001, 0.001) # len = 9801
gam_reciprocal = np.reciprocal(gam)
r_gam = np.square(gamma(gam_reciprocal * 2)) / (gamma(gam_reciprocal) * gamma(gam_reciprocal * 3))
left_std = np.sqrt(np.mean(block[block < 0]**2))
right_std = np.sqrt(np.mean(block[block > 0]**2))
gammahat = left_std / right_std
rhat = (np.mean(np.abs(block)))**2 / np.mean(block**2)
rhatnorm = (rhat * (gammahat**3 + 1) * (gammahat + 1)) / ((gammahat**2 + 1)**2)
array_position = np.argmin((r_gam - rhatnorm)**2)
alpha = gam[array_position]
beta_l = left_std * np.sqrt(gamma(1 / alpha) / gamma(3 / alpha))
beta_r = right_std * np.sqrt(gamma(1 / alpha) / gamma(3 / alpha))
return (alpha, beta_l, beta_r)
def compute_feature(block):
"""Compute features.
Args:
block (ndarray): 2D Image block.
Returns:
list: Features with length of 18.
"""
feat = []
alpha, beta_l, beta_r = estimate_aggd_param(block)
feat.extend([alpha, (beta_l + beta_r) / 2])
# distortions disturb the fairly regular structure of natural images.
# This deviation can be captured by analyzing the sample distribution of
# the products of pairs of adjacent coefficients computed along
# horizontal, vertical and diagonal orientations.
shifts = [[0, 1], [1, 0], [1, 1], [1, -1]]
for i in range(len(shifts)):
shifted_block = np.roll(block, shifts[i], axis=(0, 1))
alpha, beta_l, beta_r = estimate_aggd_param(block * shifted_block)
# Eq. 8
mean = (beta_r - beta_l) * (gamma(2 / alpha) / gamma(1 / alpha))
feat.extend([alpha, mean, beta_l, beta_r])
return feat
def niqe(img, mu_pris_param, cov_pris_param, gaussian_window, block_size_h=96, block_size_w=96):
"""Calculate NIQE (Natural Image Quality Evaluator) metric.
Ref: Making a "Completely Blind" Image Quality Analyzer.
This implementation could produce almost the same results as the official
MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip
Note that we do not include block overlap height and width, since they are
always 0 in the official implementation.
For good performance, it is advisable by the official implemtation to
divide the distorted image in to the same size patched as used for the
construction of multivariate Gaussian model.
Args:
img (ndarray): Input image whose quality needs to be computed. The
image must be a gray or Y (of YCbCr) image with shape (h, w).
Range [0, 255] with float type.
mu_pris_param (ndarray): Mean of a pre-defined multivariate Gaussian
model calculated on the pristine dataset.
cov_pris_param (ndarray): Covariance of a pre-defined multivariate
Gaussian model calculated on the pristine dataset.
gaussian_window (ndarray): A 7x7 Gaussian window used for smoothing the
image.
block_size_h (int): Height of the blocks in to which image is divided.
Default: 96 (the official recommended value).
block_size_w (int): Width of the blocks in to which image is divided.
Default: 96 (the official recommended value).
"""
assert img.ndim == 2, ('Input image must be a gray or Y (of YCbCr) image with shape (h, w).')
# crop image
h, w = img.shape
num_block_h = math.floor(h / block_size_h)
num_block_w = math.floor(w / block_size_w)
img = img[0:num_block_h * block_size_h, 0:num_block_w * block_size_w]
distparam = [] # dist param is actually the multiscale features
for scale in (1, 2): # perform on two scales (1, 2)
mu = convolve(img, gaussian_window, mode='nearest')
sigma = np.sqrt(np.abs(convolve(np.square(img), gaussian_window, mode='nearest') - np.square(mu)))
# normalize, as in Eq. 1 in the paper
img_nomalized = (img - mu) / (sigma + 1)
feat = []
for idx_w in range(num_block_w):
for idx_h in range(num_block_h):
# process ecah block
block = img_nomalized[idx_h * block_size_h // scale:(idx_h + 1) * block_size_h // scale,
idx_w * block_size_w // scale:(idx_w + 1) * block_size_w // scale]
feat.append(compute_feature(block))
distparam.append(np.array(feat))
if scale == 1:
img = imresize(img / 255., scale=0.5, antialiasing=True)
img = img * 255.
distparam = np.concatenate(distparam, axis=1)
# fit a MVG (multivariate Gaussian) model to distorted patch features
mu_distparam = np.nanmean(distparam, axis=0)
# use nancov. ref: https://ww2.mathworks.cn/help/stats/nancov.html
distparam_no_nan = distparam[~np.isnan(distparam).any(axis=1)]
cov_distparam = np.cov(distparam_no_nan, rowvar=False)
# compute niqe quality, Eq. 10 in the paper
invcov_param = np.linalg.pinv((cov_pris_param + cov_distparam) / 2)
quality = np.matmul(
np.matmul((mu_pris_param - mu_distparam), invcov_param), np.transpose((mu_pris_param - mu_distparam)))
quality = np.sqrt(quality)
quality = float(np.squeeze(quality))
return quality
@METRIC_REGISTRY.register()
def calculate_niqe(img, crop_border, input_order='HWC', convert_to='y', **kwargs):
"""Calculate NIQE (Natural Image Quality Evaluator) metric.
Ref: Making a "Completely Blind" Image Quality Analyzer.
This implementation could produce almost the same results as the official
MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip
> MATLAB R2021a result for tests/data/baboon.png: 5.72957338 (5.7296)
> Our re-implementation result for tests/data/baboon.png: 5.7295763 (5.7296)
We use the official params estimated from the pristine dataset.
We use the recommended block size (96, 96) without overlaps.
Args:
img (ndarray): Input image whose quality needs to be computed.
The input image must be in range [0, 255] with float/int type.
The input_order of image can be 'HW' or 'HWC' or 'CHW'. (BGR order)
If the input order is 'HWC' or 'CHW', it will be converted to gray
or Y (of YCbCr) image according to the ``convert_to`` argument.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
input_order (str): Whether the input order is 'HW', 'HWC' or 'CHW'.
Default: 'HWC'.
convert_to (str): Whether coverted to 'y' (of MATLAB YCbCr) or 'gray'.
Default: 'y'.
Returns:
float: NIQE result.
"""
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
# we use the official params estimated from the pristine dataset.
niqe_pris_params = np.load(os.path.join(ROOT_DIR, 'niqe_pris_params.npz'))
mu_pris_param = niqe_pris_params['mu_pris_param']
cov_pris_param = niqe_pris_params['cov_pris_param']
gaussian_window = niqe_pris_params['gaussian_window']
img = img.astype(np.float32)
if input_order != 'HW':
img = reorder_image(img, input_order=input_order)
if convert_to == 'y':
img = to_y_channel(img)
elif convert_to == 'gray':
img = cv2.cvtColor(img / 255., cv2.COLOR_BGR2GRAY) * 255.
img = np.squeeze(img)
if crop_border != 0:
img = img[crop_border:-crop_border, crop_border:-crop_border]
# round is necessary for being consistent with MATLAB's result
img = img.round()
niqe_result = niqe(img, mu_pris_param, cov_pris_param, gaussian_window)
return niqe_result
```
#### File: basicsr/utils/diffjpeg.py
```python
import itertools
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
# ------------------------ utils ------------------------#
y_table = np.array(
[[16, 11, 10, 16, 24, 40, 51, 61], [12, 12, 14, 19, 26, 58, 60, 55], [14, 13, 16, 24, 40, 57, 69, 56],
[14, 17, 22, 29, 51, 87, 80, 62], [18, 22, 37, 56, 68, 109, 103, 77], [24, 35, 55, 64, 81, 104, 113, 92],
[49, 64, 78, 87, 103, 121, 120, 101], [72, 92, 95, 98, 112, 100, 103, 99]],
dtype=np.float32).T
y_table = nn.Parameter(torch.from_numpy(y_table))
c_table = np.empty((8, 8), dtype=np.float32)
c_table.fill(99)
c_table[:4, :4] = np.array([[17, 18, 24, 47], [18, 21, 26, 66], [24, 26, 56, 99], [47, 66, 99, 99]]).T
c_table = nn.Parameter(torch.from_numpy(c_table))
def diff_round(x):
""" Differentiable rounding function
"""
return torch.round(x) + (x - torch.round(x))**3
def quality_to_factor(quality):
""" Calculate factor corresponding to quality
Args:
quality(float): Quality for jpeg compression.
Returns:
float: Compression factor.
"""
if quality < 50:
quality = 5000. / quality
else:
quality = 200. - quality * 2
return quality / 100.
# ------------------------ compression ------------------------#
class RGB2YCbCrJpeg(nn.Module):
""" Converts RGB image to YCbCr
"""
def __init__(self):
super(RGB2YCbCrJpeg, self).__init__()
matrix = np.array([[0.299, 0.587, 0.114], [-0.168736, -0.331264, 0.5], [0.5, -0.418688, -0.081312]],
dtype=np.float32).T
self.shift = nn.Parameter(torch.tensor([0., 128., 128.]))
self.matrix = nn.Parameter(torch.from_numpy(matrix))
def forward(self, image):
"""
Args:
image(Tensor): batch x 3 x height x width
Returns:
Tensor: batch x height x width x 3
"""
image = image.permute(0, 2, 3, 1)
result = torch.tensordot(image, self.matrix, dims=1) + self.shift
return result.view(image.shape)
class ChromaSubsampling(nn.Module):
""" Chroma subsampling on CbCr channels
"""
def __init__(self):
super(ChromaSubsampling, self).__init__()
def forward(self, image):
"""
Args:
image(tensor): batch x height x width x 3
Returns:
y(tensor): batch x height x width
cb(tensor): batch x height/2 x width/2
cr(tensor): batch x height/2 x width/2
"""
image_2 = image.permute(0, 3, 1, 2).clone()
cb = F.avg_pool2d(image_2[:, 1, :, :].unsqueeze(1), kernel_size=2, stride=(2, 2), count_include_pad=False)
cr = F.avg_pool2d(image_2[:, 2, :, :].unsqueeze(1), kernel_size=2, stride=(2, 2), count_include_pad=False)
cb = cb.permute(0, 2, 3, 1)
cr = cr.permute(0, 2, 3, 1)
return image[:, :, :, 0], cb.squeeze(3), cr.squeeze(3)
class BlockSplitting(nn.Module):
""" Splitting image into patches
"""
def __init__(self):
super(BlockSplitting, self).__init__()
self.k = 8
def forward(self, image):
"""
Args:
image(tensor): batch x height x width
Returns:
Tensor: batch x h*w/64 x h x w
"""
height, _ = image.shape[1:3]
batch_size = image.shape[0]
image_reshaped = image.view(batch_size, height // self.k, self.k, -1, self.k)
image_transposed = image_reshaped.permute(0, 1, 3, 2, 4)
return image_transposed.contiguous().view(batch_size, -1, self.k, self.k)
class DCT8x8(nn.Module):
""" Discrete Cosine Transformation
"""
def __init__(self):
super(DCT8x8, self).__init__()
tensor = np.zeros((8, 8, 8, 8), dtype=np.float32)
for x, y, u, v in itertools.product(range(8), repeat=4):
tensor[x, y, u, v] = np.cos((2 * x + 1) * u * np.pi / 16) * np.cos((2 * y + 1) * v * np.pi / 16)
alpha = np.array([1. / np.sqrt(2)] + [1] * 7)
self.tensor = nn.Parameter(torch.from_numpy(tensor).float())
self.scale = nn.Parameter(torch.from_numpy(np.outer(alpha, alpha) * 0.25).float())
def forward(self, image):
"""
Args:
image(tensor): batch x height x width
Returns:
Tensor: batch x height x width
"""
image = image - 128
result = self.scale * torch.tensordot(image, self.tensor, dims=2)
result.view(image.shape)
return result
class YQuantize(nn.Module):
""" JPEG Quantization for Y channel
Args:
rounding(function): rounding function to use
"""
def __init__(self, rounding):
super(YQuantize, self).__init__()
self.rounding = rounding
self.y_table = y_table
def forward(self, image, factor=1):
"""
Args:
image(tensor): batch x height x width
Returns:
Tensor: batch x height x width
"""
if isinstance(factor, (int, float)):
image = image.float() / (self.y_table * factor)
else:
b = factor.size(0)
table = self.y_table.expand(b, 1, 8, 8) * factor.view(b, 1, 1, 1)
image = image.float() / table
image = self.rounding(image)
return image
class CQuantize(nn.Module):
""" JPEG Quantization for CbCr channels
Args:
rounding(function): rounding function to use
"""
def __init__(self, rounding):
super(CQuantize, self).__init__()
self.rounding = rounding
self.c_table = c_table
def forward(self, image, factor=1):
"""
Args:
image(tensor): batch x height x width
Returns:
Tensor: batch x height x width
"""
if isinstance(factor, (int, float)):
image = image.float() / (self.c_table * factor)
else:
b = factor.size(0)
table = self.c_table.expand(b, 1, 8, 8) * factor.view(b, 1, 1, 1)
image = image.float() / table
image = self.rounding(image)
return image
class CompressJpeg(nn.Module):
"""Full JPEG compression algorithm
Args:
rounding(function): rounding function to use
"""
def __init__(self, rounding=torch.round):
super(CompressJpeg, self).__init__()
self.l1 = nn.Sequential(RGB2YCbCrJpeg(), ChromaSubsampling())
self.l2 = nn.Sequential(BlockSplitting(), DCT8x8())
self.c_quantize = CQuantize(rounding=rounding)
self.y_quantize = YQuantize(rounding=rounding)
def forward(self, image, factor=1):
"""
Args:
image(tensor): batch x 3 x height x width
Returns:
dict(tensor): Compressed tensor with batch x h*w/64 x 8 x 8.
"""
y, cb, cr = self.l1(image * 255)
components = {'y': y, 'cb': cb, 'cr': cr}
for k in components.keys():
comp = self.l2(components[k])
if k in ('cb', 'cr'):
comp = self.c_quantize(comp, factor=factor)
else:
comp = self.y_quantize(comp, factor=factor)
components[k] = comp
return components['y'], components['cb'], components['cr']
# ------------------------ decompression ------------------------#
class YDequantize(nn.Module):
"""Dequantize Y channel
"""
def __init__(self):
super(YDequantize, self).__init__()
self.y_table = y_table
def forward(self, image, factor=1):
"""
Args:
image(tensor): batch x height x width
Returns:
Tensor: batch x height x width
"""
if isinstance(factor, (int, float)):
out = image * (self.y_table * factor)
else:
b = factor.size(0)
table = self.y_table.expand(b, 1, 8, 8) * factor.view(b, 1, 1, 1)
out = image * table
return out
class CDequantize(nn.Module):
"""Dequantize CbCr channel
"""
def __init__(self):
super(CDequantize, self).__init__()
self.c_table = c_table
def forward(self, image, factor=1):
"""
Args:
image(tensor): batch x height x width
Returns:
Tensor: batch x height x width
"""
if isinstance(factor, (int, float)):
out = image * (self.c_table * factor)
else:
b = factor.size(0)
table = self.c_table.expand(b, 1, 8, 8) * factor.view(b, 1, 1, 1)
out = image * table
return out
class iDCT8x8(nn.Module):
"""Inverse discrete Cosine Transformation
"""
def __init__(self):
super(iDCT8x8, self).__init__()
alpha = np.array([1. / np.sqrt(2)] + [1] * 7)
self.alpha = nn.Parameter(torch.from_numpy(np.outer(alpha, alpha)).float())
tensor = np.zeros((8, 8, 8, 8), dtype=np.float32)
for x, y, u, v in itertools.product(range(8), repeat=4):
tensor[x, y, u, v] = np.cos((2 * u + 1) * x * np.pi / 16) * np.cos((2 * v + 1) * y * np.pi / 16)
self.tensor = nn.Parameter(torch.from_numpy(tensor).float())
def forward(self, image):
"""
Args:
image(tensor): batch x height x width
Returns:
Tensor: batch x height x width
"""
image = image * self.alpha
result = 0.25 * torch.tensordot(image, self.tensor, dims=2) + 128
result.view(image.shape)
return result
class BlockMerging(nn.Module):
"""Merge patches into image
"""
def __init__(self):
super(BlockMerging, self).__init__()
def forward(self, patches, height, width):
"""
Args:
patches(tensor) batch x height*width/64, height x width
height(int)
width(int)
Returns:
Tensor: batch x height x width
"""
k = 8
batch_size = patches.shape[0]
image_reshaped = patches.view(batch_size, height // k, width // k, k, k)
image_transposed = image_reshaped.permute(0, 1, 3, 2, 4)
return image_transposed.contiguous().view(batch_size, height, width)
class ChromaUpsampling(nn.Module):
"""Upsample chroma layers
"""
def __init__(self):
super(ChromaUpsampling, self).__init__()
def forward(self, y, cb, cr):
"""
Args:
y(tensor): y channel image
cb(tensor): cb channel
cr(tensor): cr channel
Returns:
Tensor: batch x height x width x 3
"""
def repeat(x, k=2):
height, width = x.shape[1:3]
x = x.unsqueeze(-1)
x = x.repeat(1, 1, k, k)
x = x.view(-1, height * k, width * k)
return x
cb = repeat(cb)
cr = repeat(cr)
return torch.cat([y.unsqueeze(3), cb.unsqueeze(3), cr.unsqueeze(3)], dim=3)
class YCbCr2RGBJpeg(nn.Module):
"""Converts YCbCr image to RGB JPEG
"""
def __init__(self):
super(YCbCr2RGBJpeg, self).__init__()
matrix = np.array([[1., 0., 1.402], [1, -0.344136, -0.714136], [1, 1.772, 0]], dtype=np.float32).T
self.shift = nn.Parameter(torch.tensor([0, -128., -128.]))
self.matrix = nn.Parameter(torch.from_numpy(matrix))
def forward(self, image):
"""
Args:
image(tensor): batch x height x width x 3
Returns:
Tensor: batch x 3 x height x width
"""
result = torch.tensordot(image + self.shift, self.matrix, dims=1)
return result.view(image.shape).permute(0, 3, 1, 2)
class DeCompressJpeg(nn.Module):
"""Full JPEG decompression algorithm
Args:
rounding(function): rounding function to use
"""
def __init__(self, rounding=torch.round):
super(DeCompressJpeg, self).__init__()
self.c_dequantize = CDequantize()
self.y_dequantize = YDequantize()
self.idct = iDCT8x8()
self.merging = BlockMerging()
self.chroma = ChromaUpsampling()
self.colors = YCbCr2RGBJpeg()
def forward(self, y, cb, cr, imgh, imgw, factor=1):
"""
Args:
compressed(dict(tensor)): batch x h*w/64 x 8 x 8
imgh(int)
imgw(int)
factor(float)
Returns:
Tensor: batch x 3 x height x width
"""
components = {'y': y, 'cb': cb, 'cr': cr}
for k in components.keys():
if k in ('cb', 'cr'):
comp = self.c_dequantize(components[k], factor=factor)
height, width = int(imgh / 2), int(imgw / 2)
else:
comp = self.y_dequantize(components[k], factor=factor)
height, width = imgh, imgw
comp = self.idct(comp)
components[k] = self.merging(comp, height, width)
#
image = self.chroma(components['y'], components['cb'], components['cr'])
image = self.colors(image)
image = torch.min(255 * torch.ones_like(image), torch.max(torch.zeros_like(image), image))
return image / 255
# ------------------------ main DiffJPEG ------------------------ #
class DiffJPEG(nn.Module):
"""This JPEG algorithm result is slightly differnet from cv2.
DiffJPEG supports batch processing.
Args:
differentiable(bool): If True, uses custom differentiable rounding function, if False, uses standard torch.round
"""
def __init__(self, differentiable=True):
super(DiffJPEG, self).__init__()
if differentiable:
rounding = diff_round
else:
rounding = torch.round
self.compress = CompressJpeg(rounding=rounding)
self.decompress = DeCompressJpeg(rounding=rounding)
def forward(self, x, quality):
"""
Args:
x (Tensor): Input image, bchw, rgb, [0, 1]
quality(float): Quality factor for jpeg compression scheme.
"""
factor = quality
if isinstance(factor, (int, float)):
factor = quality_to_factor(factor)
else:
for i in range(factor.size(0)):
factor[i] = quality_to_factor(factor[i])
h, w = x.size()[-2:]
h_pad, w_pad = 0, 0
# why should use 16
if h % 16 != 0:
h_pad = 16 - h % 16
if w % 16 != 0:
w_pad = 16 - w % 16
x = F.pad(x, (0, w_pad, 0, h_pad), mode='constant', value=0)
y, cb, cr = self.compress(x, factor=factor)
recovered = self.decompress(y, cb, cr, (h + h_pad), (w + w_pad), factor=factor)
recovered = recovered[:, :, 0:h, 0:w]
return recovered
if __name__ == '__main__':
import cv2
from basicsr.utils import img2tensor, tensor2img
img_gt = cv2.imread('test.png') / 255.
# -------------- cv2 -------------- #
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 20]
_, encimg = cv2.imencode('.jpg', img_gt * 255., encode_param)
img_lq = np.float32(cv2.imdecode(encimg, 1))
cv2.imwrite('cv2_JPEG_20.png', img_lq)
# -------------- DiffJPEG -------------- #
jpeger = DiffJPEG(differentiable=False).cuda()
img_gt = img2tensor(img_gt)
img_gt = torch.stack([img_gt, img_gt]).cuda()
quality = img_gt.new_tensor([20, 40])
out = jpeger(img_gt, quality=quality)
cv2.imwrite('pt_JPEG_20.png', tensor2img(out[0]))
cv2.imwrite('pt_JPEG_40.png', tensor2img(out[1]))
```
#### File: basicsr/utils/img_process_util.py
```python
import cv2
import numpy as np
import torch
from torch.nn import functional as F
def filter2D(img, kernel):
"""PyTorch version of cv2.filter2D
Args:
img (Tensor): (b, c, h, w)
kernel (Tensor): (b, k, k)
"""
k = kernel.size(-1)
b, c, h, w = img.size()
if k % 2 == 1:
img = F.pad(img, (k // 2, k // 2, k // 2, k // 2), mode='reflect')
else:
raise ValueError('Wrong kernel size')
ph, pw = img.size()[-2:]
if kernel.size(0) == 1:
# apply the same kenrel to all batch images
img = img.view(b * c, 1, ph, pw)
kernel = kernel.view(1, 1, k, k)
return F.conv2d(img, kernel, padding=0).view(b, c, h, w)
else:
img = img.view(1, b * c, ph, pw)
kernel = kernel.view(b, 1, k, k).repeat(1, c, 1, 1).view(b * c, 1, k, k)
return F.conv2d(img, kernel, groups=b * c).view(b, c, h, w)
def usm_sharp(img, weight=0.5, radius=50, threshold=10):
"""USM sharpening.
Input image: I; Blurry image: B.
1. sharp = I + weight * (I - B)
2. Mask = 1 if abs(I - B) > threshold, else: 0
3. Blur mask:
4. Out = Mask * sharp + (1 - Mask) * I
Args:
img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
weight (float): Sharp weight. Default: 1.
radius (float): Kernel size of Gaussian blur. Default: 50.
threshold (int):
"""
if radius % 2 == 0:
radius += 1
blur = cv2.GaussianBlur(img, (radius, radius), 0)
residual = img - blur
mask = np.abs(residual) * 255 > threshold
mask = mask.astype('float32')
soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
sharp = img + weight * residual
sharp = np.clip(sharp, 0, 1)
return soft_mask * sharp + (1 - soft_mask) * img
class USMSharp(torch.nn.Module):
def __init__(self, radius=50, sigma=0):
super(USMSharp, self).__init__()
if radius % 2 == 0:
radius += 1
self.radius = radius
kernel = cv2.getGaussianKernel(radius, sigma)
kernel = torch.FloatTensor(np.dot(kernel, kernel.transpose())).unsqueeze_(0)
self.register_buffer('kernel', kernel)
def forward(self, img, weight=0.5, threshold=10):
blur = filter2D(img, self.kernel)
residual = img - blur
mask = torch.abs(residual) * 255 > threshold
mask = mask.float()
soft_mask = filter2D(mask, self.kernel)
sharp = img + weight * residual
sharp = torch.clip(sharp, 0, 1)
return soft_mask * sharp + (1 - soft_mask) * img
```
#### File: basicsr/utils/matlab_functions.py
```python
import math
import numpy as np
import torch
def cubic(x):
"""cubic function used for calculate_weights_indices."""
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5 * absx3 - 2.5 * absx2 + 1) * (
(absx <= 1).type_as(absx)) + (-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2) * (((absx > 1) *
(absx <= 2)).type_as(absx))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
"""Calculate weights and indices, used for imresize function.
Args:
in_length (int): Input length.
out_length (int): Output length.
scale (float): Scale factor.
kernel_width (int): Kernel width.
antialisaing (bool): Whether to apply anti-aliasing when downsampling.
"""
if (scale < 1) and antialiasing:
# Use a modified kernel (larger kernel width) to simultaneously
# interpolate and antialias
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5 + scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
p = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, p) + torch.linspace(0, p - 1, p).view(1, p).expand(
out_length, p)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, p) - indices
# apply cubic kernel
if (scale < 1) and antialiasing:
weights = scale * cubic(distance_to_center * scale)
else:
weights = cubic(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, p)
# If a column in weights is all zero, get rid of it. only consider the
# first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, p - 2)
weights = weights.narrow(1, 1, p - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, p - 2)
weights = weights.narrow(1, 0, p - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
@torch.no_grad()
def imresize(img, scale, antialiasing=True):
"""imresize function same as MATLAB.
It now only supports bicubic.
The same scale applies for both height and width.
Args:
img (Tensor | Numpy array):
Tensor: Input image with shape (c, h, w), [0, 1] range.
Numpy: Input image with shape (h, w, c), [0, 1] range.
scale (float): Scale factor. The same scale applies for both height
and width.
antialisaing (bool): Whether to apply anti-aliasing when downsampling.
Default: True.
Returns:
Tensor: Output image with shape (c, h, w), [0, 1] range, w/o round.
"""
squeeze_flag = False
if type(img).__module__ == np.__name__: # numpy type
numpy_type = True
if img.ndim == 2:
img = img[:, :, None]
squeeze_flag = True
img = torch.from_numpy(img.transpose(2, 0, 1)).float()
else:
numpy_type = False
if img.ndim == 2:
img = img.unsqueeze(0)
squeeze_flag = True
in_c, in_h, in_w = img.size()
out_h, out_w = math.ceil(in_h * scale), math.ceil(in_w * scale)
kernel_width = 4
kernel = 'cubic'
# get weights and indices
weights_h, indices_h, sym_len_hs, sym_len_he = calculate_weights_indices(in_h, out_h, scale, kernel, kernel_width,
antialiasing)
weights_w, indices_w, sym_len_ws, sym_len_we = calculate_weights_indices(in_w, out_w, scale, kernel, kernel_width,
antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_c, in_h + sym_len_hs + sym_len_he, in_w)
img_aug.narrow(1, sym_len_hs, in_h).copy_(img)
sym_patch = img[:, :sym_len_hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_he:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_hs + in_h, sym_len_he).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_c, out_h, in_w)
kernel_width = weights_h.size(1)
for i in range(out_h):
idx = int(indices_h[i][0])
for j in range(in_c):
out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_h[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_c, out_h, in_w + sym_len_ws + sym_len_we)
out_1_aug.narrow(2, sym_len_ws, in_w).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_we:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_ws + in_w, sym_len_we).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_c, out_h, out_w)
kernel_width = weights_w.size(1)
for i in range(out_w):
idx = int(indices_w[i][0])
for j in range(in_c):
out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_w[i])
if squeeze_flag:
out_2 = out_2.squeeze(0)
if numpy_type:
out_2 = out_2.numpy()
if not squeeze_flag:
out_2 = out_2.transpose(1, 2, 0)
return out_2
def rgb2ycbcr(img, y_only=False):
"""Convert a RGB image to YCbCr image.
This function produces the same results as Matlab's `rgb2ycbcr` function.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `RGB <-> YCrCb`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
y_only (bool): Whether to only return Y channel. Default: False.
Returns:
ndarray: The converted YCbCr image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img)
if y_only:
out_img = np.dot(img, [65.481, 128.553, 24.966]) + 16.0
else:
out_img = np.matmul(
img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], [24.966, 112.0, -18.214]]) + [16, 128, 128]
out_img = _convert_output_type_range(out_img, img_type)
return out_img
def bgr2ycbcr(img, y_only=False):
"""Convert a BGR image to YCbCr image.
The bgr version of rgb2ycbcr.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
y_only (bool): Whether to only return Y channel. Default: False.
Returns:
ndarray: The converted YCbCr image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img)
if y_only:
out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0
else:
out_img = np.matmul(
img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], [65.481, -37.797, 112.0]]) + [16, 128, 128]
out_img = _convert_output_type_range(out_img, img_type)
return out_img
def ycbcr2rgb(img):
"""Convert a YCbCr image to RGB image.
This function produces the same results as Matlab's ycbcr2rgb function.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `YCrCb <-> RGB`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
Returns:
ndarray: The converted RGB image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img) * 255
out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836] # noqa: E126
out_img = _convert_output_type_range(out_img, img_type)
return out_img
def ycbcr2bgr(img):
"""Convert a YCbCr image to BGR image.
The bgr version of ycbcr2rgb.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
Returns:
ndarray: The converted BGR image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img) * 255
out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0.00791071, -0.00153632, 0],
[0, -0.00318811, 0.00625893]]) * 255.0 + [-276.836, 135.576, -222.921] # noqa: E126
out_img = _convert_output_type_range(out_img, img_type)
return out_img
def _convert_input_type_range(img):
"""Convert the type and range of the input image.
It converts the input image to np.float32 type and range of [0, 1].
It is mainly used for pre-processing the input image in colorspace
convertion functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
Returns:
(ndarray): The converted image with type of np.float32 and range of
[0, 1].
"""
img_type = img.dtype
img = img.astype(np.float32)
if img_type == np.float32:
pass
elif img_type == np.uint8:
img /= 255.
else:
raise TypeError(f'The img type should be np.float32 or np.uint8, but got {img_type}')
return img
def _convert_output_type_range(img, dst_type):
"""Convert the type and range of the image according to dst_type.
It converts the image to desired type and range. If `dst_type` is np.uint8,
images will be converted to np.uint8 type with range [0, 255]. If
`dst_type` is np.float32, it converts the image to np.float32 type with
range [0, 1].
It is mainly used for post-processing images in colorspace convertion
functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The image to be converted with np.float32 type and
range [0, 255].
dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it
converts the image to np.uint8 type with range [0, 255]. If
dst_type is np.float32, it converts the image to np.float32 type
with range [0, 1].
Returns:
(ndarray): The converted image with desired type and range.
"""
if dst_type not in (np.uint8, np.float32):
raise TypeError(f'The dst_type should be np.float32 or np.uint8, but got {dst_type}')
if dst_type == np.uint8:
img = img.round()
else:
img /= 255.
return img.astype(dst_type)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.