repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
blebo/pv | pv/__init__.py | 1 | 1425 | # Copyright (c) 2010-2011 Edmund Tse
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import pv
__version__ = '0.2'
__date__ = '28 Jan 11'
_ANSI_COLOR = False # Use ANSI colouring on output
_DEBUG = False # Print debug statements
def debug():
"""
Enables printing of debug messages
"""
pv._DEBUG = True
def debug_color():
"""
Enables ANSI coloured output in debug messages
"""
pv._ANSI_COLOR = True
| mit | -3,173,980,225,585,122,300 | 34.625 | 81 | 0.74807 | false |
brownharryb/erpnext | erpnext/regional/italy/utils.py | 1 | 14732 | from __future__ import unicode_literals
import frappe, json, os
from frappe.utils import flt, cstr
from erpnext.controllers.taxes_and_totals import get_itemised_tax
from frappe import _
from frappe.core.doctype.file.file import remove_file
from six import string_types
from frappe.desk.form.load import get_attachments
from erpnext.regional.italy import state_codes
def update_itemised_tax_data(doc):
if not doc.taxes: return
itemised_tax = get_itemised_tax(doc.taxes)
for row in doc.items:
tax_rate = 0.0
if itemised_tax.get(row.item_code):
tax_rate = sum([tax.get('tax_rate', 0) for d, tax in itemised_tax.get(row.item_code).items()])
row.tax_rate = flt(tax_rate, row.precision("tax_rate"))
row.tax_amount = flt((row.net_amount * tax_rate) / 100, row.precision("net_amount"))
row.total_amount = flt((row.net_amount + row.tax_amount), row.precision("total_amount"))
@frappe.whitelist()
def export_invoices(filters=None):
saved_xmls = []
invoices = frappe.get_all("Sales Invoice", filters=get_conditions(filters), fields=["*"])
for invoice in invoices:
attachments = get_e_invoice_attachments(invoice)
saved_xmls += [attachment.file_name for attachment in attachments]
zip_filename = "{0}-einvoices.zip".format(frappe.utils.get_datetime().strftime("%Y%m%d_%H%M%S"))
download_zip(saved_xmls, zip_filename)
@frappe.whitelist()
def prepare_invoice(invoice, progressive_number):
#set company information
company = frappe.get_doc("Company", invoice.company)
invoice.progressive_number = progressive_number
invoice.unamended_name = get_unamended_name(invoice)
invoice.company_data = company
company_address = frappe.get_doc("Address", invoice.company_address)
invoice.company_address_data = company_address
#Set invoice type
if invoice.is_return and invoice.return_against:
invoice.type_of_document = "TD04" #Credit Note (Nota di Credito)
invoice.return_against_unamended = get_unamended_name(frappe.get_doc("Sales Invoice", invoice.return_against))
else:
invoice.type_of_document = "TD01" #Sales Invoice (Fattura)
#set customer information
invoice.customer_data = frappe.get_doc("Customer", invoice.customer)
customer_address = frappe.get_doc("Address", invoice.customer_address)
invoice.customer_address_data = customer_address
if invoice.shipping_address_name:
invoice.shipping_address_data = frappe.get_doc("Address", invoice.shipping_address_name)
if invoice.customer_data.is_public_administration:
invoice.transmission_format_code = "FPA12"
else:
invoice.transmission_format_code = "FPR12"
invoice.e_invoice_items = [item for item in invoice.items]
tax_data = get_invoice_summary(invoice.e_invoice_items, invoice.taxes)
invoice.tax_data = tax_data
#Check if stamp duty (Bollo) of 2 EUR exists.
stamp_duty_charge_row = next((tax for tax in invoice.taxes if tax.charge_type == _("Actual") and tax.tax_amount == 2.0 ), None)
if stamp_duty_charge_row:
invoice.stamp_duty = stamp_duty_charge_row.tax_amount
for item in invoice.e_invoice_items:
if item.tax_rate == 0.0 and item.tax_amount == 0.0:
item.tax_exemption_reason = tax_data["0.0"]["tax_exemption_reason"]
customer_po_data = {}
for d in invoice.e_invoice_items:
if (d.customer_po_no and d.customer_po_date
and d.customer_po_no not in customer_po_data):
customer_po_data[d.customer_po_no] = d.customer_po_date
invoice.customer_po_data = customer_po_data
return invoice
def get_conditions(filters):
filters = json.loads(filters)
conditions = {"docstatus": 1}
if filters.get("company"): conditions["company"] = filters["company"]
if filters.get("customer"): conditions["customer"] = filters["customer"]
if filters.get("from_date"): conditions["posting_date"] = (">=", filters["from_date"])
if filters.get("to_date"): conditions["posting_date"] = ("<=", filters["to_date"])
if filters.get("from_date") and filters.get("to_date"):
conditions["posting_date"] = ("between", [filters.get("from_date"), filters.get("to_date")])
return conditions
#TODO: Use function from frappe once PR #6853 is merged.
def download_zip(files, output_filename):
from zipfile import ZipFile
input_files = [frappe.get_site_path('private', 'files', filename) for filename in files]
output_path = frappe.get_site_path('private', 'files', output_filename)
with ZipFile(output_path, 'w') as output_zip:
for input_file in input_files:
output_zip.write(input_file, arcname=os.path.basename(input_file))
with open(output_path, 'rb') as fileobj:
filedata = fileobj.read()
frappe.local.response.filename = output_filename
frappe.local.response.filecontent = filedata
frappe.local.response.type = "download"
def get_invoice_summary(items, taxes):
summary_data = frappe._dict()
for tax in taxes:
#Include only VAT charges.
if tax.charge_type == "Actual":
continue
#Charges to appear as items in the e-invoice.
if tax.charge_type in ["On Previous Row Total", "On Previous Row Amount"]:
reference_row = next((row for row in taxes if row.idx == int(tax.row_id or 0)), None)
if reference_row:
items.append(
frappe._dict(
idx=len(items)+1,
item_code=reference_row.description,
item_name=reference_row.description,
description=reference_row.description,
rate=reference_row.tax_amount,
qty=1.0,
amount=reference_row.tax_amount,
stock_uom=frappe.db.get_single_value("Stock Settings", "stock_uom") or _("Nos"),
tax_rate=tax.rate,
tax_amount=(reference_row.tax_amount * tax.rate) / 100,
net_amount=reference_row.tax_amount,
taxable_amount=reference_row.tax_amount,
item_tax_rate={tax.account_head: tax.rate},
charges=True
)
)
#Check item tax rates if tax rate is zero.
if tax.rate == 0:
for item in items:
item_tax_rate = item.item_tax_rate
if isinstance(item.item_tax_rate, string_types):
item_tax_rate = json.loads(item.item_tax_rate)
if item_tax_rate and tax.account_head in item_tax_rate:
key = cstr(item_tax_rate[tax.account_head])
if key not in summary_data:
summary_data.setdefault(key, {"tax_amount": 0.0, "taxable_amount": 0.0,
"tax_exemption_reason": "", "tax_exemption_law": ""})
summary_data[key]["tax_amount"] += item.tax_amount
summary_data[key]["taxable_amount"] += item.net_amount
if key == "0.0":
summary_data[key]["tax_exemption_reason"] = tax.tax_exemption_reason
summary_data[key]["tax_exemption_law"] = tax.tax_exemption_law
if summary_data == {}: #Implies that Zero VAT has not been set on any item.
summary_data.setdefault("0.0", {"tax_amount": 0.0, "taxable_amount": tax.total,
"tax_exemption_reason": tax.tax_exemption_reason, "tax_exemption_law": tax.tax_exemption_law})
else:
item_wise_tax_detail = json.loads(tax.item_wise_tax_detail)
for rate_item in [tax_item for tax_item in item_wise_tax_detail.items() if tax_item[1][0] == tax.rate]:
key = cstr(tax.rate)
if not summary_data.get(key): summary_data.setdefault(key, {"tax_amount": 0.0, "taxable_amount": 0.0})
summary_data[key]["tax_amount"] += rate_item[1][1]
summary_data[key]["taxable_amount"] += sum([item.net_amount for item in items if item.item_code == rate_item[0]])
for item in items:
key = cstr(tax.rate)
if item.get("charges"):
if not summary_data.get(key): summary_data.setdefault(key, {"taxable_amount": 0.0})
summary_data[key]["taxable_amount"] += item.taxable_amount
return summary_data
#Preflight for successful e-invoice export.
def sales_invoice_validate(doc):
#Validate company
if doc.doctype != 'Sales Invoice':
return
if not doc.company_address:
frappe.throw(_("Please set an Address on the Company '%s'" % doc.company), title=_("E-Invoicing Information Missing"))
else:
validate_address(doc.company_address)
company_fiscal_regime = frappe.get_cached_value("Company", doc.company, 'fiscal_regime')
if not company_fiscal_regime:
frappe.throw(_("Fiscal Regime is mandatory, kindly set the fiscal regime in the company {0}")
.format(doc.company))
else:
doc.company_fiscal_regime = company_fiscal_regime
doc.company_tax_id = frappe.get_cached_value("Company", doc.company, 'tax_id')
doc.company_fiscal_code = frappe.get_cached_value("Company", doc.company, 'fiscal_code')
if not doc.company_tax_id and not doc.company_fiscal_code:
frappe.throw(_("Please set either the Tax ID or Fiscal Code on Company '%s'" % doc.company), title=_("E-Invoicing Information Missing"))
#Validate customer details
customer = frappe.get_doc("Customer", doc.customer)
if customer.customer_type == _("Individual"):
doc.customer_fiscal_code = customer.fiscal_code
if not doc.customer_fiscal_code:
frappe.throw(_("Please set Fiscal Code for the customer '%s'" % doc.customer), title=_("E-Invoicing Information Missing"))
else:
if customer.is_public_administration:
doc.customer_fiscal_code = customer.fiscal_code
if not doc.customer_fiscal_code:
frappe.throw(_("Please set Fiscal Code for the public administration '%s'" % doc.customer), title=_("E-Invoicing Information Missing"))
else:
doc.tax_id = customer.tax_id
if not doc.tax_id:
frappe.throw(_("Please set Tax ID for the customer '%s'" % doc.customer), title=_("E-Invoicing Information Missing"))
if not doc.customer_address:
frappe.throw(_("Please set the Customer Address"), title=_("E-Invoicing Information Missing"))
else:
validate_address(doc.customer_address)
if not len(doc.taxes):
frappe.throw(_("Please set at least one row in the Taxes and Charges Table"), title=_("E-Invoicing Information Missing"))
else:
for row in doc.taxes:
if row.rate == 0 and row.tax_amount == 0 and not row.tax_exemption_reason:
frappe.throw(_("Row {0}: Please set at Tax Exemption Reason in Sales Taxes and Charges".format(row.idx)),
title=_("E-Invoicing Information Missing"))
for schedule in doc.payment_schedule:
if schedule.mode_of_payment and not schedule.mode_of_payment_code:
schedule.mode_of_payment_code = frappe.get_cached_value('Mode of Payment',
schedule.mode_of_payment, 'mode_of_payment_code')
#Ensure payment details are valid for e-invoice.
def sales_invoice_on_submit(doc, method):
#Validate payment details
if get_company_country(doc.company) not in ['Italy',
'Italia', 'Italian Republic', 'Repubblica Italiana']:
return
if not len(doc.payment_schedule):
frappe.throw(_("Please set the Payment Schedule"), title=_("E-Invoicing Information Missing"))
else:
for schedule in doc.payment_schedule:
if not schedule.mode_of_payment:
frappe.throw(_("Row {0}: Please set the Mode of Payment in Payment Schedule".format(schedule.idx)),
title=_("E-Invoicing Information Missing"))
elif not frappe.db.get_value("Mode of Payment", schedule.mode_of_payment, "mode_of_payment_code"):
frappe.throw(_("Row {0}: Please set the correct code on Mode of Payment {1}".format(schedule.idx, schedule.mode_of_payment)),
title=_("E-Invoicing Information Missing"))
prepare_and_attach_invoice(doc)
def prepare_and_attach_invoice(doc, replace=False):
progressive_name, progressive_number = get_progressive_name_and_number(doc, replace)
invoice = prepare_invoice(doc, progressive_number)
invoice_xml = frappe.render_template('erpnext/regional/italy/e-invoice.xml', context={"doc": invoice}, is_path=True)
invoice_xml = invoice_xml.replace("&", "&")
xml_filename = progressive_name + ".xml"
_file = frappe.get_doc({
"doctype": "File",
"file_name": xml_filename,
"attached_to_doctype": doc.doctype,
"attached_to_name": doc.name,
"is_private": True,
"content": invoice_xml
})
_file.save()
return file
@frappe.whitelist()
def generate_single_invoice(docname):
doc = frappe.get_doc("Sales Invoice", docname)
e_invoice = prepare_and_attach_invoice(doc, True)
return e_invoice.file_name
@frappe.whitelist()
def download_e_invoice_file(file_name):
content = None
with open(frappe.get_site_path('private', 'files', file_name), "r") as f:
content = f.read()
frappe.local.response.filename = file_name
frappe.local.response.filecontent = content
frappe.local.response.type = "download"
#Delete e-invoice attachment on cancel.
def sales_invoice_on_cancel(doc, method):
if get_company_country(doc.company) not in ['Italy',
'Italia', 'Italian Republic', 'Repubblica Italiana']:
return
for attachment in get_e_invoice_attachments(doc):
remove_file(attachment.name, attached_to_doctype=doc.doctype, attached_to_name=doc.name)
def get_company_country(company):
return frappe.get_cached_value('Company', company, 'country')
def get_e_invoice_attachments(invoice):
out = []
attachments = get_attachments(invoice.doctype, invoice.name)
company_tax_id = invoice.company_tax_id if invoice.company_tax_id.startswith("IT") else "IT" + invoice.company_tax_id
for attachment in attachments:
if attachment.file_name and attachment.file_name.startswith(company_tax_id) and attachment.file_name.endswith(".xml"):
out.append(attachment)
return out
def validate_address(address_name):
fields = ["pincode", "city", "country_code"]
data = frappe.get_cached_value("Address", address_name, fields, as_dict=1) or {}
for field in fields:
if not data.get(field):
frappe.throw(_("Please set {0} for address {1}".format(field.replace('-',''), address_name)),
title=_("E-Invoicing Information Missing"))
def get_unamended_name(doc):
attributes = ["naming_series", "amended_from"]
for attribute in attributes:
if not hasattr(doc, attribute):
return doc.name
if doc.amended_from:
return "-".join(doc.name.split("-")[:-1])
else:
return doc.name
def get_progressive_name_and_number(doc, replace=False):
if replace:
for attachment in get_e_invoice_attachments(doc):
remove_file(attachment.name, attached_to_doctype=doc.doctype, attached_to_name=doc.name)
filename = attachment.file_name.split(".xml")[0]
return filename, filename.split("_")[1]
company_tax_id = doc.company_tax_id if doc.company_tax_id.startswith("IT") else "IT" + doc.company_tax_id
progressive_name = frappe.model.naming.make_autoname(company_tax_id + "_.#####")
progressive_number = progressive_name.split("_")[1]
return progressive_name, progressive_number
def set_state_code(doc, method):
if doc.get('country_code'):
doc.country_code = doc.country_code.upper()
if not doc.get('state'):
return
if not (hasattr(doc, "state_code") and doc.country in ["Italy", "Italia", "Italian Republic", "Repubblica Italiana"]):
return
state_codes_lower = {key.lower():value for key,value in state_codes.items()}
state = doc.get('state','').lower()
if state_codes_lower.get(state):
doc.state_code = state_codes_lower.get(state)
| gpl-3.0 | -7,337,605,459,802,171,000 | 37.165803 | 139 | 0.712802 | false |
naterh/chipsec | source/tool/chipsec/utilcmd/pci_cmd.py | 1 | 4384 | #!/usr/local/bin/python
#CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2015, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#[email protected]
#
#
# usage as a standalone utility:
#
## \addtogroup standalone
#chipsec_util pci
#-----
#~~~
#chipsec_util pci enumerate
#chipsec_util pci <bus> <device> <function> <offset> <width> [value]
#''
# Examples:
#''
# chipsec_util pci enumerate
# chipsec_util pci 0 0 0 0x88 4
# chipsec_util pci 0 0 0 0x88 byte 0x1A
# chipsec_util pci 0 0x1F 0 0xDC 1 0x1
# chipsec_util pci 0 0 0 0x98 dword 0x004E0040
#~~~
__version__ = '1.0'
import os
import sys
import time
import chipsec_util
from chipsec.logger import *
from chipsec.file import *
from chipsec.hal.pci import *
usage = "chipsec_util pci enumerate\n" + \
"chipsec_util pci <bus> <device> <function> <offset> <width> [value]\n" + \
"Examples:\n" + \
" chipsec_util pci enumerate\n" + \
" chipsec_util pci 0 0 0 0x88 4\n" + \
" chipsec_util pci 0 0 0 0x88 byte 0x1A\n" + \
" chipsec_util pci 0 0x1F 0 0xDC 1 0x1\n" + \
" chipsec_util pci 0 0 0 0x98 dword 0x004E0040\n\n"
# ###################################################################
#
# PCIe Devices and Configuration Registers
#
# ###################################################################
def pci(argv):
if 3 > len(argv):
print usage
return
op = argv[2]
t = time.time()
if ( 'enumerate' == op ):
logger().log( "[CHIPSEC] Enumerating available PCIe devices.." )
print_pci_devices( chipsec_util._cs.pci.enumerate_devices() )
logger().log( "[CHIPSEC] (pci) time elapsed %.3f" % (time.time()-t) )
return
try:
bus = int(argv[2],16)
device = int(argv[3],16)
function = int(argv[4],16)
offset = int(argv[5],16)
if 6 == len(argv):
width = 1
else:
if 'byte' == argv[6]:
width = 1
elif 'word' == argv[6]:
width = 2
elif 'dword' == argv[6]:
width = 4
else:
width = int(argv[6])
except Exception as e :
print usage
return
if 8 == len(argv):
value = int(argv[7], 16)
if 1 == width:
chipsec_util._cs.pci.write_byte( bus, device, function, offset, value )
elif 2 == width:
chipsec_util._cs.pci.write_word( bus, device, function, offset, value )
elif 4 == width:
chipsec_util._cs.pci.write_dword( bus, device, function, offset, value )
else:
print "ERROR: Unsupported width 0x%x" % width
return
logger().log( "[CHIPSEC] writing PCI %d/%d/%d, off 0x%02X: 0x%X" % (bus, device, function, offset, value) )
else:
if 1 == width:
pci_value = chipsec_util._cs.pci.read_byte(bus, device, function, offset)
elif 2 == width:
pci_value = chipsec_util._cs.pci.read_word(bus, device, function, offset)
elif 4 == width:
pci_value = chipsec_util._cs.pci.read_dword(bus, device, function, offset)
else:
print "ERROR: Unsupported width 0x%x" % width
return
logger().log( "[CHIPSEC] reading PCI B/D/F %d/%d/%d, off 0x%02X: 0x%X" % (bus, device, function, offset, pci_value) )
logger().log( "[CHIPSEC] (pci) time elapsed %.3f" % (time.time()-t) )
chipsec_util.commands['pci'] = {'func' : pci , 'start_driver' : True, 'help' : usage }
| gpl-2.0 | 8,458,805,531,248,735,000 | 30.488889 | 125 | 0.551323 | false |
jakubbrindza/gtg | GTG/gtk/browser/tag_context_menu.py | 1 | 3597 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2013 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""
tag_context_menu:
Implements a context (pop-up) menu for the tag item in the sidebar.
Right now it is just a void shell It is supposed to become a more generic
sidebar context for all kind of item displayed there.
Also, it is supposed to handle more complex menus (with non-std widgets,
like a color picker)
"""
from gi.repository import Gtk
from GTG.core.translations import _
from GTG.gtk.colors import generate_tag_color, color_add, color_remove
class TagContextMenu(Gtk.Menu):
"""Context menu fo the tag i the sidebar"""
def __init__(self, req, vmanager, tag=None):
Gtk.Menu.__init__(self)
self.req = req
self.vmanager = vmanager
self.tag = tag
# Build up the menu
self.set_tag(tag)
self.__build_menu()
def __build_menu(self):
"""Build up the widget"""
# Reset the widget
for i in self:
self.remove(i)
i.destroy()
if self.tag is not None:
# Color chooser FIXME: SHOULD BECOME A COLOR PICKER
self.mi_cc = Gtk.MenuItem()
self.mi_cc.set_label(_("Edit Tag..."))
self.mi_ctag = Gtk.MenuItem()
self.mi_ctag.set_label(_("Generate Color"))
self.append(self.mi_cc)
self.append(self.mi_ctag)
self.mi_cc.connect('activate', self.on_mi_cc_activate)
self.mi_ctag.connect('activate', self.on_mi_ctag_activate)
if self.tag.is_search_tag():
self.mi_del = Gtk.MenuItem()
self.mi_del.set_label(_("Delete"))
self.append(self.mi_del)
self.mi_del.connect('activate', self.on_mi_del_activate)
# Make it visible
self.show_all()
# PUBLIC API ##############################################################
def set_tag(self, tag):
"""Update the context menu items using the tag attributes."""
self.tag = tag
self.__build_menu()
# CALLBACKS ###############################################################
def on_mi_cc_activate(self, widget):
"""Callback: show the tag editor upon request"""
self.vmanager.open_tag_editor(self.tag)
def on_mi_ctag_activate(self, widget):
random_color = generate_tag_color()
present_color = self.tag.get_attribute('color')
if(present_color is not None):
color_remove(present_color)
self.tag.set_attribute('color', random_color)
color_add(random_color)
def on_mi_del_activate(self, widget):
""" delete a selected search """
self.req.remove_tag(self.tag.get_name())
| gpl-3.0 | 4,590,780,264,420,060,700 | 38.097826 | 79 | 0.582986 | false |
creasyw/IMTAphy | modules/phy/imtaphy/testConfigs/imtaphyViewer/scenario/Probe.py | 1 | 28585 | #!/usr/bin/env python
################################################################################
# This file is part of IMTAphy
# _____________________________________________________________________________
#
# Copyright (C) 2012
# Institute of Communication Networks (LKN)
# Department of Electrical Engineering and Information Technology (EE & IT)
# Technische Universitaet Muenchen
# Arcisstr. 21
# 80333 Muenchen - Germany
# http://www.lkn.ei.tum.de/~jan/imtaphy/index.html
#
# _____________________________________________________________________________
#
# IMTAphy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IMTAphy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IMTAphy. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
#
# based on code with this license:
#
###############################################################################
# This file is part of openWNS (open Wireless Network Simulator)
# _____________________________________________________________________________
#
# Copyright (C) 2004-2007
# Chair of Communication Networks (ComNets)
# Kopernikusstr. 16, D-52074 Aachen, Germany
# phone: ++49-241-80-27910,
# fax: ++49-241-80-22242
# email: [email protected]
# www: http://www.openwns.org
# _____________________________________________________________________________
#
# openWNS is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License version 2 as published by the
# Free Software Foundation;
#
# openWNS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import TableParser
class ProbeTypeError(Exception):
"""
Raised if not a probe of desired type
"""
pass
class Probe(object):
"""
Base class to read probes from files
"""
valueNames = ["minimum", "maximum", "trials", "mean", "variance", "relativeVariance",
"standardDeviation", "relativeStandardDeviation", "skewness",
"moment2", "moment3"]
def __init__(self, probeType, filename):
"""
Raises an error if file is not available or of desired type
"""
self.filename = filename
self.absFilename = os.path.abspath(self.filename)
self.__items = self.parseFile(self.absFilename)
self.dirname, self.filenameWithoutDir = os.path.split(self.filename)
if self.dirname == "":
self.dirname = "./"
# check if is probe file of desired type
# DISABLED: Does not work during LogEval / Moments / TimeSeries migration
# evaluation = self.getValue("Evaluation")
# if not probeType in evaluation:
# raise ProbeTypeError(str(self) + " tried to read a probe of type: " + probeType)
# This name is the name provided by the probe itself. It may
# be not unique for probe files being kept in a directory
# since it is a matter of configuration ...
self.name = self.getValue("Name")
# This name is built from the filename and therfor unique, at
# least for all probes in one directory
altName, ext = os.path.splitext(self.filenameWithoutDir)
self.altName = altName
self.description = self.getValue("Description")
self.minimum = self.getValue("Minimum")
self.maximum = self.getValue("Maximum")
self.trials = self.getValue("Trials")
self.mean = self.getValue("Mean")
self.variance = self.getValue("Variance")
self.relativeVariance = self.getValue("Relative variance")
self.standardDeviation = self.getValue("Standard deviation")
self.relativeStandardDeviation = self.getValue("Relative standard deviation")
self.skewness = self.getValue("Skewness")
self.moment2 = self.getValue("2nd moment")
self.moment3 = self.getValue("3rd moment")
self.sumOfAllValues = self.getValue("Sum of all values")
self.sumOfAllValuesSquare = self.getValue("(Sum of all values)^2")
self.sumOfAllValuesCubic = self.getValue("(Sum of all values)^3")
def parseFile(fileName):
""" parses self.filename
searches for the pattern: '# key: value', returns a dict with
the found keys and values
"""
items = {}
for line in file(fileName):
# strip spaces and newlines
line = line.strip()
if line.startswith("#"):
if ":" in line:
# strip spaces and "#" at the beginning of the line
line = line.lstrip("# ")
key, value = line.split(":")
# strip spaces and new lines around key and value
key = key.strip()
value = value.strip()
if not items.has_key(key):
items[key] = value
else:
raise Exception("Tried to add '" + key + "' but this was already found.")
else:
# when no "#" is found, we can stop parsing
break
return items
parseFile = staticmethod(parseFile)
def getValue(self, parameter):
""" Try to find the value for 'parameter'
"""
value = self.__items[parameter]
# automatic conversion
# try int, float, string (in this order)
try:
return int(value)
except ValueError:
try:
return float(value)
except ValueError:
return value
# @staticmethod (this syntax works only for python >= 2.4)
def readProbes(probeType, probeClass, dirname):
result = {}
for ff in os.listdir(dirname):
filename = os.path.join(dirname, ff)
if os.path.isfile(filename):
if filename.endswith(probeType):
try:
#print "Probe:readProbes: probe=probeClass(%s)" % filename
probe = probeClass(filename)
result[probe.filenameWithoutDir] = probe
except ProbeTypeError, e:
pass
return result
readProbes = staticmethod(readProbes)
# Moments probe specific part
class MomentsProbeBase:
probeType = "Moments"
class MomentsProbe(Probe, MomentsProbeBase):
fileNameSig = "_Moments.dat"
def __init__(self, filename):
super(MomentsProbe, self).__init__("Moments", filename)
# @staticmethod (this syntax works only for python >= 2.4)
def readProbes(dirname):
return Probe.readProbes(MomentsProbe.fileNameSig, MomentsProbe, dirname)
readProbes = staticmethod(readProbes)
# PDF probe specific part
class PDFHistogramEntry(object):
__slots__ = ["x", "cdf", "ccdf", "pdf"]
def __init__(self, listOfValues):
self.x = float(listOfValues[0])
self.cdf = float(listOfValues[1])
self.ccdf = float(listOfValues[2])
self.pdf = float(listOfValues[3])
class PDFProbeBase:
probeType = "PDF"
class PDFProbe(Probe, PDFProbeBase):
fileNameSig = "_PDF.dat"
valueNames = ["P01","P05","P50","P95","P99","minX", "maxX", "numberOfBins", "underflows", "overflows"] + Probe.valueNames
histogram = None
def __init__(self, filename):
super(PDFProbe, self).__init__("PDF", filename)
# read Percentiles
self.P01 = self.getValue("P01")
self.P05 = self.getValue("P05")
self.P50 = self.getValue("P50")
self.P95 = self.getValue("P95")
self.P99 = self.getValue("P99")
# These parameters have not been measured but configured ...
self.minX = self.getValue("Left border of x-axis")
self.maxX = self.getValue("Right border of x-axis")
self.numberOfBins = self.getValue("Resolution of x-axis")
self.underflows = self.getValue("Underflows")
self.overflows = self.getValue("Overflows")
self.__histogram = []
self.__histogramRead = False
def __getHistogram(self):
if self.__histogramRead == False:
self.__histogram = []
for line in file(self.absFilename):
if not line.startswith('#'):
self.__histogram.append(PDFHistogramEntry(line.split()))
self.__histogramRead = True
return self.__histogram
histogram = property(__getHistogram)
# @staticmethod (this syntax works only for python >= 2.4)
def readProbes(dirname):
return Probe.readProbes(PDFProbe.fileNameSig, PDFProbe, dirname)
readProbes = staticmethod(readProbes)
def __getPureHistogram(self):
# actually there is one bin more than stated in numberOfBins
if len(self.histogram) == self.numberOfBins + 3:
# underflows and overflows
return self.histogram[1:self.numberOfBins + 1]
elif len(self.histogram) == self.numberOfBins + 2:
# underflows or overflows
if self.overflows > 0:
# overflows
return self.histogram[:self.numberOfBins + 1]
elif self.underflows > 0:
# underflows
return self.histogram[1:self.numberOfBins + 2]
else:
raise "Did not expect to reach this line"
else:
# everything was fine already
return self.histogram
pureHistogram = property(__getPureHistogram)
class TimeSeriesProbeBase:
probeType = "TimeSeries"
class TimeSeriesProbe(object, TimeSeriesProbeBase):
fileNameSig = "_TimeSeries.dat"
valueNames = []
filename = None
filenameWithoutDir = None
name = None
entries = None
def __init__(self, filename):
self.filename = filename
self.dirname, self.filenameWithoutDir = os.path.split(self.filename)
if self.dirname == "":
self.dirname = "./"
# Parse the file
items = Probe.parseFile(self.filename)
self.altName = self.filenameWithoutDir.rsplit('_', 1)[0]
self.name = items["Name"]
self.description = items["Description"]
self.entries = []
for line in file(self.filename):
if not line.startswith('#'):
self.entries.append(LogEvalEntry(line.split()))
# @staticmethod (this syntax works only for python >= 2.4)
def readProbes(dirname):
result = Probe.readProbes(TimeSeriesProbe.fileNameSig, TimeSeriesProbe, dirname)
return result
readProbes = staticmethod(readProbes)
# LogEval probe specific part
class LogEvalEntry(object):
__slots__ = ["x", "y"]
def __init__(self, listOfValues):
self.x = float(listOfValues[0])
self.y = float(listOfValues[1])
class LogEvalProbe(Probe):
fileNameSig = "_Log.dat"
valueNames = Probe.valueNames
entries = None
readAllValues = True
filenameEntries = None
probeType = "LogEval"
def __init__(self, filename):
super(LogEvalProbe, self).__init__("LogEval", filename)
splitFilename = filename.split(".")
splitFilename[-2] += ".log"
self.filenameEntries = str(".").join(splitFilename)
# In the renovated LogEval Probe, the header and the data are in one and the same file
# TODO: fileNameEntries can be removed when PDataBase/SortingCriterion are abandoned
if not os.path.exists(self.filenameEntries):
self.filenameEntries = filename
self.__entries = []
self.__entriesRead = False
def __getEntries(self):
if not self.readAllValues:
return []
if self.__entriesRead == False:
self.__entries = []
for line in file(self.filenameEntries):
if not line.startswith('#'):
self.__entries.append(LogEvalEntry(line.split()))
self.__entriesRead = True
return self.__entries
entries = property(__getEntries)
# @staticmethod (this syntax works only for python >= 2.4)
def readProbes(dirname):
return Probe.readProbes(LogEvalProbe.fileNameSig, LogEvalProbe, dirname)
readProbes = staticmethod(readProbes)
class BatchMeansHistogramEntry:
def __init__(self, listOfValues):
self.x = float(listOfValues[1])
self.cdf = float(listOfValues[0])
self.pdf = float(listOfValues[3])
self.relativeError = float(listOfValues[2])
self.confidence = float(listOfValues[4])
if len(listOfValues) > 5:
self.numberOfTrialsPerInterval = int(listOfValues[5])
else:
self.numberOfTrialsPerInterval = 0
class BatchMeansProbe(Probe):
fileNameSig = "_BaM.dat"
valueNames = ["lowerBorder", "upperBorder", "numberOfIntervals", "intervalSize",
"sizeOfGroups", "maximumRelativeError", "evaluatedGroups", "underflows",
"overflows", "meanBm", "confidenceOfMeanAbsolute", "confidenceOfMeanPercent",
"relativeErrorMean", "varianceBm", "confidenceOfVarianceAbsolute", "confidenceOfVariancePercent",
"relativeErrorVariance", "sigma", "firstOrderCorrelationCoefficient"] + Probe.valueNames
histogram = None
probeType = "BatchMeans"
def __init__(self, filename):
super(BatchMeansProbe, self).__init__("BatchMeans", filename)
self.lowerBorder = self.getValue("lower border")
self.upperBorder = self.getValue("upper border")
self.numberOfIntervals = self.getValue("number of intervals")
self.intervalSize = self.getValue("interval size")
self.sizeOfGroups = self.getValue("size of groups")
self.maximumRelativeError = self.getValue("maximum relative error [%]")
self.evaluatedGroups = self.getValue("evaluated groups")
self.underflows = self.getValue("Underflows")
self.overflows = self.getValue("Overflows")
self.meanBm = self.getValue("mean (BM version)")
self.confidenceOfMeanAbsolute = self.getValue("confidence of mean absolute [+-]")
self.confidenceOfMeanPercent = self.getValue("confidence of mean [%]")
self.relativeErrorMean = self.getValue("relative error (Bayes Error)")
self.varianceBm = self.getValue("variance (BM version)")
self.confidenceOfVarianceAbsolute = self.getValue("confidence of variance absolute [+-]")
self.confidenceOfVariancePercent = self.getValue("confidence of variance [%]")
self.relativeErrorVariance = self.getValue("relative error")
self.sigma = self.getValue("sigma")
self.firstOrderCorrelationCoefficient = self.getValue("1st order correlation coefficient")
# read x, CDF, PDF, relative error, confidence, number of trials
self.histogram = []
for line in file(self.absFilename):
if not line.startswith("#"):
self.histogram.append(BatchMeansHistogramEntry(line.split()))
# @staticmethod (this syntax works only for python >= 2.4)
def readProbes(dirname):
return Probe.readProbes(BatchMeansProbe.fileNameSig, BatchMeansProbe, dirname)
readProbes = staticmethod(readProbes)
class LreHistogramEntry(object):
def __init__(self, listOfValues):
self.ordinate = float(listOfValues[0])
self.abscissa = float(listOfValues[1])
if listOfValues[2] == 'not_available':
self.relativeError = float('nan')
else:
self.relativeError = float(listOfValues[2])
if listOfValues[3] == 'not_available':
self.meanLocalCorrelationCoefficient = float('nan')
else:
self.meanLocalCorrelationCoefficient = float(listOfValues[3])
if listOfValues[4] == 'not_available':
self.deviationFromMeanLocalCC = float('nan')
else:
self.deviationFromMeanLocalCC = float(listOfValues[4])
self.numberOfTrialsPerInterval = int(listOfValues[5])
if listOfValues[6] == 'not_available':
self.numberOfTransitionsPerInterval = float('nan')
else:
self.numberOfTransitionsPerInterval = int(listOfValues[6])
self.relativeErrorWithinLimit = listOfValues[7]
class LreProbe(Probe):
fileNameSigs = ["_LREF.dat",
"_LREF_pf.dat",
"_LREG.dat",
"_LREG_pf.dat"]
valueNames = ["lreType", "maximumRelativeError", "fMax", "fMin", "scaling",
"maximumNumberOfTrialsPerLevel", "rhoN60", "rhoN50",
"rhoN40", "rhoN30", "rhoN20", "rhoN10", "rho00",
"rhoP25", "rhoP50", "rhoP75", "rhoP90", "rhoP95", "rhoP99",
"peakNumberOfSortingElements", "resultIndexOfCurrentLevel", "numberOfLevels",
"relativeErrorMean", "relativeErrorVariance", "relativeErrorStandardDeviation",
"meanLocalCorrelationCoefficientMean", "meanLocalCorrelationCoefficientVariance",
"meanLocalCorrelationCoefficientStandardDeviation", "numberOfTrialsPerIntervalMean",
"numberOfTrialsPerIntervalVariance", "numberOfTrialsPerIntervalStandardDeviation",
"numberOfTransitionsPerIntervalMean", "numberOfTransitionsPerIntervalVariance",
"numberOfTransitionsPerIntervalStandardDeviation"] + Probe.valueNames
histogram = None
probeType = "LRE"
def __init__(self, filename):
super(LreProbe, self).__init__("LRE", filename)
self.lreType = self.getValue("Evaluation")
self.maximumRelativeError = self.getValue("Maximum relative error [%]")
self.fMax = self.getValue("F max")
self.fMin = self.getValue("F min")
self.scaling = self.getValue("Scaling")
self.maximumNumberOfTrialsPerLevel = self.getValue("Maximum number of trials per level")
self.rhoN60 = self.getValue("correlated (rho = -0.60)")
self.rhoN50 = self.getValue("correlated (rho = -0.50)")
self.rhoN40 = self.getValue("correlated (rho = -0.40)")
self.rhoN30 = self.getValue("correlated (rho = -0.30)")
self.rhoN20 = self.getValue("correlated (rho = -0.20)")
self.rhoN10 = self.getValue("correlated (rho = -0.10)")
self.rho00 = self.getValue("uncorrelated (rho = 0.00)")
self.rhoP25 = self.getValue("correlated (rho = +0.25)")
self.rhoP50 = self.getValue("correlated (rho = +0.50)")
self.rhoP75 = self.getValue("correlated (rho = +0.75)")
self.rhoP90 = self.getValue("correlated (rho = +0.90)")
self.rhoP95 = self.getValue("correlated (rho = +0.95)")
self.rhoP99 = self.getValue("correlated (rho = +0.99)")
self.peakNumberOfSortingElements = self.getValue("Peak number of sorting mem. elems.")
self.resultIndexOfCurrentLevel = self.getValue("Result memory index of current level")
self.numberOfLevels = self.getValue("Number of levels")
self.relativeErrorMean = self.getValue("Relative error (Mean)")
self.relativeErrorVariance = self.getValue("Relative error (Variance)")
self.relativeErrorStandardDeviation = self.getValue("Relative error (Standard deviation)")
self.meanLocalCorrelationCoefficientMean = self.getValue("Mean local correlation coefficient (Mean)")
self.meanLocalCorrelationCoefficientVariance = self.getValue("Mean local correlation coefficient (Variance)")
self.meanLocalCorrelationCoefficientStandardDeviation = self.getValue("Mean local correlation coefficient (Standard deviation)")
self.deviationFromMeanLocalCCMean = self.getValue("Deviation from mean local c.c.(Mean)")
self.deviationFromMeanLocalCCVariance = self.getValue("Deviation from mean local c.c.(Variance)")
self.deviationFromMeanLocalCCStandardDeviation = self.getValue("Deviation from mean local c.c.(Standard deviation)")
self.numberOfTrialsPerIntervalMean = self.getValue("Number of trials per interval (Mean)")
self.numberOfTrialsPerIntervalVariance = self.getValue("Number of trials per interval (Variance)")
self.numberOfTrialsPerIntervalStandardDeviation = self.getValue("Number of trials per interval (Standard deviation)")
self.numberOfTransitionsPerIntervalMean = self.getValue("Number of transitions per interval (Mean)")
self.numberOfTransitionsPerIntervalVariance = self.getValue("Number of transitions per interval (Variance)")
self.numberOfTransitionsPerIntervalStandardDeviation = self.getValue("Number of transitions per interval (Standard deviation)")
self.histogram = []
for line in file(self.absFilename):
if not line.startswith("#"):
self.histogram.append(LreHistogramEntry(line.split()))
# @staticmethod (this syntax works only for python >= 2.4)
def readProbes(dirname):
result = {}
for suffix in LreProbe.fileNameSigs:
result.update(Probe.readProbes(suffix, LreProbe, dirname))
return result
readProbes = staticmethod(readProbes)
class DlreHistogramEntry(LreHistogramEntry):
def __init__(self, listOfValues):
super(DlreHistogramEntry, self).__init__(listOfValues)
class DlreProbe(Probe):
fileNameSigs = ["_DLREF.dat",
"_DLREG.dat",
"_DLREP.dat"]
valueNames = ["lreType", "lowerBorder", "upperBorder", "numberOfIntervals",
"intervalSize", "maximumNumberOfSamples", "maximumRelativeErrorPercent",
"evaluatedLevels", "underflows", "overflows"] + Probe.valueNames
histogram = None
probeType = "DLRE"
def __init__(self, filename):
super(DlreProbe, self).__init__("DLRE", filename)
self.dlreType = self.getValue("Evaluation")
self.lowerBorder = self.getValue("lower border")
self.upperBorder = self.getValue("upper border")
self.numberOfIntervals = self.getValue("number of intervals")
self.intervalSize = self.getValue("interval size")
self.maximumNumberOfSamples = self.getValue("maximum number of samples")
self.maximumRelativeErrorPercent = self.getValue("maximum relative error [%]")
self.evaluatedLevels = self.getValue("evaluated levels")
self.underflows = self.getValue("Underflows")
self.overflows = self.getValue("Overflows")
self.histogram = []
for line in file(self.absFilename):
if not line.startswith("#"):
self.histogram.append(DlreHistogramEntry(line.split()))
# @staticmethod (this syntax works only for python >= 2.4)
def readProbes(dirname):
result = {}
for suffix in DlreProbe.fileNameSigs:
result.update(Probe.readProbes(suffix, DlreProbe, dirname))
return result
readProbes = staticmethod(readProbes)
class TableProbe:
fileNameSigs = ['_mean.dat',
'_max.dat',
'_min.dat',
'_trials.dat',
'_var.dat',
'_mean.m',
'_max.m',
'_min.m',
'_trials.m',
'_var.m',
] # there are more than these, but these are the most commonly used ones.
valueNames = ["minimum", "maximum"]
tableParser = None
filename = None
filenameWithoutDir = None
name = None
probeType = "Table"
def __init__(self, filename):
self.tableParser = TableParser.TableParser(filename)
self.filename = filename
self.dirname, self.filenameWithoutDir = os.path.split(self.filename)
if self.dirname == "":
self.dirname = "./"
self.name = self.filenameWithoutDir.rsplit('_', 1)[0]
self.type = self.filenameWithoutDir.rsplit('_', 1)[1]
self.description = self.tableParser.getDescription()
self.minimum = self.tableParser.minimum
self.maximum = self.tableParser.maximum
self.trials = self.tableParser.trials
self.mean = "-"
self.variance = "-"
self.relativeVariance = "-"
self.standardDeviation = "-"
self.relativeStandardDeviation = "-"
self.skewness = "-"
self.moment2 = "-"
self.moment3 = "-"
# @staticmethod (this syntax works only for python >= 2.4)
def readProbes(dirname):
result = {}
for suffix in TableProbe.fileNameSigs:
#print "Calling result.update(Probe.readProbes(%s, TableProbe, %s))" % (suffix, dirname)
try:
result.update(Probe.readProbes(suffix, TableProbe, dirname))
except TableParser.WrongDimensions:
continue
return result
readProbes = staticmethod(readProbes)
def readAllProbes(dirname):
result = {}
result = PDFProbe.readProbes(dirname)
result.update(LogEvalProbe.readProbes(dirname))
result.update(TimeSeriesProbe.readProbes(dirname))
result.update(MomentsProbe.readProbes(dirname))
result.update(TableProbe.readProbes(dirname))
# @todo: update result dict with table probes when simcontrol can handle them
return result
def getProbeType(filename):
"""This function identifies and returns the type of a probe file"""
for probeType in [ MomentsProbe, PDFProbe, LogEvalProbe, TimeSeriesProbe ]:
if probeType.fileNameSig in filename:
return probeType
for tableSuffix in TableProbe.fileNameSigs:
if tableSuffix in filename:
return TableProbe
# if nothing was found
raise TypeError("Could not identify probe type from filename: "+fileName)
| gpl-2.0 | 8,648,500,860,190,995,000 | 41.600596 | 136 | 0.576141 | false |
snap-stanford/ogb | examples/nodeproppred/products/cluster_gcn.py | 1 | 6509 | import argparse
import torch
from tqdm import tqdm
import torch.nn.functional as F
from torch_geometric.data import ClusterData, ClusterLoader, NeighborSampler
from torch_geometric.nn import SAGEConv
from ogb.nodeproppred import PygNodePropPredDataset, Evaluator
from logger import Logger
class SAGE(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
dropout):
super(SAGE, self).__init__()
self.convs = torch.nn.ModuleList()
self.convs.append(SAGEConv(in_channels, hidden_channels))
for _ in range(num_layers - 2):
self.convs.append(SAGEConv(hidden_channels, hidden_channels))
self.convs.append(SAGEConv(hidden_channels, out_channels))
self.dropout = dropout
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
def forward(self, x, edge_index):
for conv in self.convs[:-1]:
x = conv(x, edge_index)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.convs[-1](x, edge_index)
return torch.log_softmax(x, dim=-1)
def inference(self, x_all, subgraph_loader, device):
pbar = tqdm(total=x_all.size(0) * len(self.convs))
pbar.set_description('Evaluating')
for i, conv in enumerate(self.convs):
xs = []
for batch_size, n_id, adj in subgraph_loader:
edge_index, _, size = adj.to(device)
x = x_all[n_id].to(device)
x_target = x[:size[1]]
x = conv((x, x_target), edge_index)
if i != len(self.convs) - 1:
x = F.relu(x)
xs.append(x.cpu())
pbar.update(batch_size)
x_all = torch.cat(xs, dim=0)
pbar.close()
return x_all
def train(model, loader, optimizer, device):
model.train()
total_loss = total_examples = 0
total_correct = total_examples = 0
for data in loader:
data = data.to(device)
if data.train_mask.sum() == 0:
continue
optimizer.zero_grad()
out = model(data.x, data.edge_index)[data.train_mask]
y = data.y.squeeze(1)[data.train_mask]
loss = F.nll_loss(out, y)
loss.backward()
optimizer.step()
num_examples = data.train_mask.sum().item()
total_loss += loss.item() * num_examples
total_examples += num_examples
total_correct += out.argmax(dim=-1).eq(y).sum().item()
total_examples += y.size(0)
return total_loss / total_examples, total_correct / total_examples
@torch.no_grad()
def test(model, data, evaluator, subgraph_loader, device):
model.eval()
out = model.inference(data.x, subgraph_loader, device)
y_true = data.y
y_pred = out.argmax(dim=-1, keepdim=True)
train_acc = evaluator.eval({
'y_true': y_true[data.train_mask],
'y_pred': y_pred[data.train_mask]
})['acc']
valid_acc = evaluator.eval({
'y_true': y_true[data.valid_mask],
'y_pred': y_pred[data.valid_mask]
})['acc']
test_acc = evaluator.eval({
'y_true': y_true[data.test_mask],
'y_pred': y_pred[data.test_mask]
})['acc']
return train_acc, valid_acc, test_acc
def main():
parser = argparse.ArgumentParser(description='OGBN-Products (Cluster-GCN)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--log_steps', type=int, default=1)
parser.add_argument('--num_partitions', type=int, default=15000)
parser.add_argument('--num_workers', type=int, default=12)
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--hidden_channels', type=int, default=256)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--eval_steps', type=int, default=5)
parser.add_argument('--runs', type=int, default=10)
args = parser.parse_args()
print(args)
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
dataset = PygNodePropPredDataset(name='ogbn-products')
split_idx = dataset.get_idx_split()
data = dataset[0]
# Convert split indices to boolean masks and add them to `data`.
for key, idx in split_idx.items():
mask = torch.zeros(data.num_nodes, dtype=torch.bool)
mask[idx] = True
data[f'{key}_mask'] = mask
cluster_data = ClusterData(data, num_parts=args.num_partitions,
recursive=False, save_dir=dataset.processed_dir)
loader = ClusterLoader(cluster_data, batch_size=args.batch_size,
shuffle=True, num_workers=args.num_workers)
subgraph_loader = NeighborSampler(data.edge_index, sizes=[-1],
batch_size=1024, shuffle=False,
num_workers=args.num_workers)
model = SAGE(data.x.size(-1), args.hidden_channels, dataset.num_classes,
args.num_layers, args.dropout).to(device)
evaluator = Evaluator(name='ogbn-products')
logger = Logger(args.runs, args)
for run in range(args.runs):
model.reset_parameters()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, 1 + args.epochs):
loss, train_acc = train(model, loader, optimizer, device)
if epoch % args.log_steps == 0:
print(f'Run: {run + 1:02d}, '
f'Epoch: {epoch:02d}, '
f'Loss: {loss:.4f}, '
f'Approx Train Acc: {train_acc:.4f}')
if epoch > 19 and epoch % args.eval_steps == 0:
result = test(model, data, evaluator, subgraph_loader, device)
logger.add_result(run, result)
train_acc, valid_acc, test_acc = result
print(f'Run: {run + 1:02d}, '
f'Epoch: {epoch:02d}, '
f'Train: {100 * train_acc:.2f}%, '
f'Valid: {100 * valid_acc:.2f}% '
f'Test: {100 * test_acc:.2f}%')
logger.print_statistics(run)
logger.print_statistics()
if __name__ == "__main__":
main()
| mit | -6,371,632,995,247,485,000 | 33.807487 | 79 | 0.580734 | false |
billion57/lykos | src/messages.py | 1 | 1332 | import json
import os
import src.settings as var
MESSAGES_DIR = os.path.join(os.path.dirname(__file__), "..", "messages")
ROOT_DIR = os.path.join(os.path.dirname(__file__), "..")
class Messages:
def __init__ (self):
self.lang = var.LANGUAGE
self._load_messages()
def get(self, key):
if not self.messages[key.lower()]:
raise KeyError("Key {0!r} does not exist! Add it to messages.json".format(key))
return self.messages[key.lower()]
__getitem__ = get
def _load_messages(self):
with open(os.path.join(MESSAGES_DIR, self.lang + ".json")) as f:
self.messages = json.load(f)
if not os.path.isfile(os.path.join(ROOT_DIR, "messages.json")):
return
with open(os.path.join(ROOT_DIR, "messages.json")) as f:
custom_msgs = json.load(f)
if not custom_msgs:
return
for key, message in custom_msgs.items():
if key in self.messages:
if not isinstance(message, type(self.messages[key.lower()])):
raise TypeError("messages.json: Key {0!r} must be of type {1!r}".format(key, type(self.messages[key.lower()]).__name__))
self.messages[key.lower()] = message
messages = Messages()
# Because woffle is needy
# vim: set sw=4 expandtab:
| bsd-2-clause | 7,753,922,367,016,149,000 | 30.714286 | 140 | 0.589339 | false |
sdrogers/ms2ldaviz | ms2ldaviz/decomposition/migrations/0012_auto_20170228_0712.py | 1 | 1821 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-02-28 07:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('decomposition', '0011_auto_20170221_1013'),
]
operations = [
migrations.CreateModel(
name='GlobalMotifsToSets',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('motif', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.GlobalMotif')),
],
),
migrations.CreateModel(
name='MotifSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('featureset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.FeatureSet')),
],
),
migrations.AddField(
model_name='documentglobalmass2motif',
name='decomposition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='decomposition.Decomposition'),
),
migrations.AddField(
model_name='globalmotifstosets',
name='motifset',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.MotifSet'),
),
migrations.AddField(
model_name='decomposition',
name='motifset',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='decomposition.MotifSet'),
),
]
| mit | 7,012,930,820,296,582,000 | 38.586957 | 126 | 0.610653 | false |
ljjjustin/crystal | tools/gendiagram.py | 1 | 2389 | #!/usr/bin/env python
import argparse
import os
import re
import sys
import string
import traceback
import urllib
ALL_STYLES = [
'default', 'earth', 'qsd', # traditional
'rose', 'napkin', 'mscgen', # traditional
'modern-blue', 'omegapple', 'roundgreen', # colourful
]
WEBSITE="http://www.websequencediagrams.com/"
def gen_sequence_diagram(source, target=None, style='default'):
# read source content
with open(source, 'r') as f:
text = f.read()
if not text:
print "%s is empty, exit..." % source
sys.exit(0)
# make request
request = {}
request["apiVersion"] = "1"
request["message"] = text
request["style"] = style
url = urllib.urlencode(request)
resp = urllib.urlopen(WEBSITE, url)
line = resp.readline()
resp.close()
expr = re.compile("(\?(img|pdf|png|svg)=[a-zA-Z0-9]+)")
match = expr.search(line)
if match == None:
raise Exception("Invalid response from server.")
image_url = match.group(0)
if not target:
suffix = string.split(image_url, '=')[0][1:]
dirname = os.path.dirname(source)
basename = os.path.basename(source)
filename = string.rstrip(basename, '.wsd')
output = string.join([filename , suffix], sep='.')
target = os.path.join(dirname, output)
urllib.urlretrieve(WEBSITE + image_url, target)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', metavar='input file',
required=True, help='Input file name')
parser.add_argument('-o', '--output', metavar='output file',
default=None, help='Output file name')
parser.add_argument('-s', '--style', metavar='output style',
default='modern-blue',
help='Output image style, all style: %s' % ALL_STYLES)
args = parser.parse_args()
source = args.input
target = args.output
style = args.style
if not os.path.exists(source):
print "%s do not exists" % source
sys.exit(-1)
if style not in ALL_STYLES:
print "%s style do not supported" % style
sys.exit(-2)
try:
gen_sequence_diagram(source, target, style=style)
except Exception as e:
print traceback.print_exc()
sys.exit(-3)
| apache-2.0 | -4,146,312,819,985,038,300 | 25.544444 | 78 | 0.582671 | false |
rspavel/spack | lib/spack/spack/test/llnl/util/cpu.py | 1 | 10173 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import pytest
import contextlib
import os.path
import jsonschema
import llnl.util.cpu
import llnl.util.cpu.detect
import spack.paths
# This is needed to check that with repr we could create equivalent objects
from llnl.util.cpu import Microarchitecture # noqa
@pytest.fixture(params=[
'linux-ubuntu18.04-broadwell',
'linux-rhel7-broadwell',
'linux-rhel7-skylake_avx512',
'linux-rhel7-ivybridge',
'linux-rhel7-haswell',
'linux-rhel7-zen',
'linux-scientific7-k10',
'linux-scientificfermi6-bulldozer',
'linux-scientificfermi6-piledriver',
'linux-scientific7-piledriver',
'linux-rhel6-piledriver',
'linux-centos7-power8le',
'linux-centos7-thunderx2',
'linux-centos7-cascadelake',
'darwin-mojave-ivybridge',
'darwin-mojave-haswell',
'darwin-mojave-skylake',
])
def expected_target(request, monkeypatch):
cpu = llnl.util.cpu
platform, operating_system, target = request.param.split('-')
architecture_family = llnl.util.cpu.targets[target].family
monkeypatch.setattr(
cpu.detect.platform, 'machine', lambda: str(architecture_family)
)
# Monkeypatch for linux
if platform == 'linux':
monkeypatch.setattr(cpu.detect.platform, 'system', lambda: 'Linux')
@contextlib.contextmanager
def _open(not_used_arg):
filename = os.path.join(
spack.paths.test_path, 'data', 'targets', request.param
)
with open(filename) as f:
yield f
monkeypatch.setattr(cpu.detect, 'open', _open, raising=False)
elif platform == 'darwin':
monkeypatch.setattr(cpu.detect.platform, 'system', lambda: 'Darwin')
filename = os.path.join(
spack.paths.test_path, 'data', 'targets', request.param
)
info = {}
with open(filename) as f:
for line in f:
key, value = line.split(':')
info[key.strip()] = value.strip()
def _check_output(args, env):
current_key = args[-1]
return info[current_key]
monkeypatch.setattr(cpu.detect, 'check_output', _check_output)
return llnl.util.cpu.targets[target]
@pytest.fixture(params=[x for x in llnl.util.cpu.targets])
def supported_target(request):
return request.param
@pytest.mark.regression('13803')
def test_target_detection(expected_target):
detected_target = llnl.util.cpu.host()
assert detected_target == expected_target
def test_no_dashes_in_target_names(supported_target):
assert '-' not in supported_target
def test_str_conversion(supported_target):
assert supported_target == str(llnl.util.cpu.targets[supported_target])
def test_repr_conversion(supported_target):
target = llnl.util.cpu.targets[supported_target]
assert eval(repr(target)) == target
def test_equality(supported_target):
target = llnl.util.cpu.targets[supported_target]
for name, other_target in llnl.util.cpu.targets.items():
if name == supported_target:
assert other_target == target
else:
assert other_target != target
@pytest.mark.parametrize('operation,expected_result', [
# Test microarchitectures that are ordered with respect to each other
('x86_64 < skylake', True),
('icelake > skylake', True),
('piledriver <= steamroller', True),
('zen2 >= zen', True),
('zen >= zen', True),
('aarch64 <= thunderx2', True),
('aarch64 <= a64fx', True),
# Test unrelated microarchitectures
('power8 < skylake', False),
('power8 <= skylake', False),
('skylake < power8', False),
('skylake <= power8', False),
# Test microarchitectures of the same family that are not a "subset"
# of each other
('cascadelake > cannonlake', False),
('cascadelake < cannonlake', False),
('cascadelake <= cannonlake', False),
('cascadelake >= cannonlake', False),
('cascadelake == cannonlake', False),
('cascadelake != cannonlake', True)
])
def test_partial_ordering(operation, expected_result):
target, operator, other_target = operation.split()
target = llnl.util.cpu.targets[target]
other_target = llnl.util.cpu.targets[other_target]
code = 'target ' + operator + 'other_target'
assert eval(code) is expected_result
@pytest.mark.parametrize('target_name,expected_family', [
('skylake', 'x86_64'),
('zen', 'x86_64'),
('pentium2', 'x86'),
])
def test_architecture_family(target_name, expected_family):
target = llnl.util.cpu.targets[target_name]
assert str(target.family) == expected_family
@pytest.mark.parametrize('target_name,feature', [
('skylake', 'avx2'),
('icelake', 'avx512f'),
# Test feature aliases
('icelake', 'avx512'),
('skylake', 'sse3'),
('power8', 'altivec'),
('broadwell', 'sse4.1'),
('skylake', 'clflushopt'),
('aarch64', 'neon')
])
def test_features_query(target_name, feature):
target = llnl.util.cpu.targets[target_name]
assert feature in target
@pytest.mark.parametrize('target_name,wrong_feature', [
('skylake', 1),
('bulldozer', llnl.util.cpu.targets['x86_64'])
])
def test_wrong_types_for_features_query(target_name, wrong_feature):
target = llnl.util.cpu.targets[target_name]
with pytest.raises(TypeError, match='only objects of string types'):
assert wrong_feature in target
def test_generic_microarchitecture():
generic_march = llnl.util.cpu.generic_microarchitecture('foo')
assert generic_march.name == 'foo'
assert not generic_march.features
assert not generic_march.ancestors
assert generic_march.vendor == 'generic'
def test_target_json_schema():
# The file microarchitectures.json contains static data i.e. data that is
# not meant to be modified by users directly. It is thus sufficient to
# validate it only once during unit tests.
json_data = llnl.util.cpu.schema.targets_json.data
jsonschema.validate(json_data, llnl.util.cpu.schema.schema)
@pytest.mark.parametrize('target_name,compiler,version,expected_flags', [
# Test GCC
('x86_64', 'gcc', '4.9.3', '-march=x86-64 -mtune=generic'),
('x86_64', 'gcc', '4.2.0', '-march=x86-64 -mtune=generic'),
('x86_64', 'gcc', '4.1.1', '-march=x86-64 -mtune=x86-64'),
('nocona', 'gcc', '4.9.3', '-march=nocona -mtune=nocona'),
('nehalem', 'gcc', '4.9.3', '-march=nehalem -mtune=nehalem'),
('nehalem', 'gcc', '4.8.5', '-march=corei7 -mtune=corei7'),
('sandybridge', 'gcc', '4.8.5', '-march=corei7-avx -mtune=corei7-avx'),
('thunderx2', 'gcc', '4.8.5', '-march=armv8-a'),
('thunderx2', 'gcc', '4.9.3', '-march=armv8-a+crc+crypto'),
# Test Clang / LLVM
('sandybridge', 'clang', '3.9.0', '-march=sandybridge -mtune=sandybridge'),
('icelake', 'clang', '6.0.0', '-march=icelake -mtune=icelake'),
('icelake', 'clang', '8.0.0',
'-march=icelake-client -mtune=icelake-client'),
('zen2', 'clang', '9.0.0', '-march=znver2 -mtune=znver2'),
('power9le', 'clang', '8.0.0', '-mcpu=power9 -mtune=power9'),
('thunderx2', 'clang', '6.0.0', '-mcpu=thunderx2t99'),
# Test Intel on Intel CPUs
('sandybridge', 'intel', '17.0.2', '-march=corei7-avx -mtune=corei7-avx'),
('sandybridge', 'intel', '18.0.5',
'-march=sandybridge -mtune=sandybridge'),
# Test Intel on AMD CPUs
pytest.param('steamroller', 'intel', '17.0.2', '-msse4.2',
marks=pytest.mark.filterwarnings('ignore::UserWarning')),
pytest.param('zen', 'intel', '17.0.2', '-march=core-avx2 -mtune=core-avx2',
marks=pytest.mark.filterwarnings('ignore::UserWarning')),
# Test that an unknown compiler returns an empty string
('sandybridge', 'unknown', '4.8.5', ''),
])
def test_optimization_flags(target_name, compiler, version, expected_flags):
target = llnl.util.cpu.targets[target_name]
flags = target.optimization_flags(compiler, version)
assert flags == expected_flags
@pytest.mark.parametrize('target_name,compiler,version', [
('excavator', 'gcc', '4.8.5')
])
def test_unsupported_optimization_flags(target_name, compiler, version):
target = llnl.util.cpu.targets[target_name]
with pytest.raises(
llnl.util.cpu.UnsupportedMicroarchitecture,
match='cannot produce optimized binary'
):
target.optimization_flags(compiler, version)
@pytest.mark.parametrize('operation,expected_result', [
# In the tests below we won't convert the right hand side to
# Microarchitecture, so that automatic conversion from a known
# target name will be tested
('cascadelake > cannonlake', False),
('cascadelake < cannonlake', False),
('cascadelake <= cannonlake', False),
('cascadelake >= cannonlake', False),
('cascadelake == cannonlake', False),
('cascadelake != cannonlake', True)
])
def test_automatic_conversion_on_comparisons(operation, expected_result):
target, operator, other_target = operation.split()
target = llnl.util.cpu.targets[target]
code = 'target ' + operator + 'other_target'
assert eval(code) is expected_result
@pytest.mark.parametrize('version,expected_number,expected_suffix', [
('4.2.0', '4.2.0', ''),
('4.2.0-apple', '4.2.0', 'apple'),
('my-funny-name-with-dashes', '', 'my-funny-name-with-dashes'),
('10.3.56~svnr64537', '10.3.56', '~svnr64537')
])
def test_version_components(version, expected_number, expected_suffix):
number, suffix = llnl.util.cpu.version_components(version)
assert number == expected_number
assert suffix == expected_suffix
def test_invalid_family():
targets = llnl.util.cpu.targets
multi_parents = Microarchitecture(
name='chimera', parents=[targets['pentium4'], targets['power7']],
vendor='Imagination', features=[], compilers={}, generation=0
)
with pytest.raises(AssertionError,
match='a target is expected to belong'):
multi_parents.family
| lgpl-2.1 | 5,764,354,231,056,630,000 | 34.07931 | 79 | 0.65261 | false |
patochectp/navitia | source/tyr/tyr/resources.py | 1 | 64116 | # coding: utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, division
from flask import current_app, url_for, request
import flask_restful
from flask_restful import marshal_with, marshal, reqparse, inputs, abort
import sqlalchemy
from validate_email import validate_email
from datetime import datetime
from tyr.tyr_user_event import TyrUserEvent
from tyr.tyr_end_point_event import EndPointEventMessage, TyrEventsRabbitMq
from tyr.helper import load_instance_config
import logging
import os
import shutil
import json
from jsonschema import validate, ValidationError
from tyr.formats import poi_type_conf_format, parse_error
from navitiacommon.default_traveler_profile_params import (
default_traveler_profile_params,
acceptable_traveler_types,
)
from navitiacommon import models, utils
from navitiacommon.models import db
from navitiacommon.parser_args_type import CoordFormat, PositiveFloat, BooleanType, OptionValue, geojson_argument
from functools import wraps
from tyr.validations import datetime_format
from tyr.tasks import (
create_autocomplete_depot,
remove_autocomplete_depot,
import_autocomplete,
cities,
cosmogony2cities,
COSMOGONY_REGEXP,
)
from tyr.helper import get_instance_logger, save_in_tmp
from tyr.fields import *
from werkzeug.exceptions import BadRequest
import werkzeug
__ALL__ = ['Api', 'Instance', 'User', 'Key']
class Api(flask_restful.Resource):
def __init__(self):
pass
def get(self):
return marshal(models.Api.query.all(), api_fields)
class Index(flask_restful.Resource):
def get(self):
return {'jobs': {'href': url_for('jobs', _external=True)}}
class Job(flask_restful.Resource):
@marshal_with(jobs_fields)
def get(self, instance_name=None):
query = models.Job.query
if instance_name:
query = query.join(models.Instance)
query = query.filter(models.Instance.name == instance_name)
return {'jobs': query.order_by(models.Job.created_at.desc()).limit(30)}
def post(self, instance_name):
instance = models.Instance.query_existing().filter_by(name=instance_name).first_or_404()
if not request.files:
return {'message': 'the Data file is missing'}, 400
content = request.files['file']
logger = get_instance_logger(instance)
logger.info('content received: %s', content)
instance = load_instance_config(instance_name)
if not os.path.exists(instance.source_directory):
return ({'error': 'input folder unavailable'}, 500)
full_file_name = os.path.join(os.path.realpath(instance.source_directory), content.filename)
content.save(full_file_name + ".tmp")
shutil.move(full_file_name + ".tmp", full_file_name)
return {'message': 'OK'}, 200
def _validate_poi_types_json(poi_types_json):
"""
poi_types configuration must follow some rules so that the binarisation is OK.
It's checked at bina, but tyr must also reject broken config directly so that the user knows.
"""
# Check that the conf is a valid json
try:
validate(poi_types_json, poi_type_conf_format)
except ValidationError as e:
abort(400, status="error", message='{}'.format(parse_error(e)))
# Check that poi_type.id defined are unique
poi_types_map = {}
for p in poi_types_json.get('poi_types', []):
if p.get('id') in poi_types_map:
abort(400, status="error", message='POI type id {} is defined multiple times'.format(p.get('id')))
poi_types_map[p.get('id')] = p.get('name')
# Check that poi_type.id 'amenity:parking' and 'amenity:bicycle_rental' are defined.
# Those are mandatory as they are used for journey processing (BSS and car).
if not 'amenity:parking' in poi_types_map or not 'amenity:bicycle_rental' in poi_types_map:
abort(
400,
status="error",
message='The 2 POI types id=amenity:parking and id=amenity:bicycle_rental must be defined',
)
# Check that rules to affect poi_types to OSM object are using a poi_type.id defined in "poi_types" list.
for r in poi_types_json.get('rules', []):
pt_id = r.get('poi_type_id')
if not pt_id in poi_types_map:
abort(
400,
status="error",
message='Using an undefined POI type id ({}) forbidden in rules'.format(pt_id),
)
class InstancePoiType(flask_restful.Resource):
def get(self, instance_name):
instance = models.Instance.query_existing().filter_by(name=instance_name).first_or_404()
poi_types = '{}'
if instance.poi_type_json and instance.poi_type_json.poi_types_json:
poi_types = instance.poi_type_json.poi_types_json
return json.loads(poi_types)
def post(self, instance_name):
instance = models.Instance.query_existing().filter_by(name=instance_name).first_or_404()
try:
poi_types_json = request.get_json(silent=False)
except:
abort(400, status="error", message='Incorrect json provided')
try:
_validate_poi_types_json(poi_types_json)
poi_types = models.PoiTypeJson(
json.dumps(poi_types_json, ensure_ascii=False).encode('utf-8', 'backslashreplace'), instance
)
db.session.add(poi_types)
db.session.commit()
except Exception:
logging.exception("fail")
raise
return json.loads(instance.poi_type_json.poi_types_json), 200
def delete(self, instance_name):
instance = models.Instance.query_existing().filter_by(name=instance_name).first_or_404()
poi_types = instance.poi_type_json
try:
if poi_types:
db.session.delete(poi_types)
db.session.commit()
except Exception:
logging.exception("fail")
raise
return json.loads('{}'), 204
class AutocompletePoiType(flask_restful.Resource):
def get(self, name):
autocomplete_param = models.AutocompleteParameter.query.filter_by(name=name).first_or_404()
poi_types = '{}'
if autocomplete_param.poi_types_json:
poi_types = autocomplete_param.poi_types_json
return json.loads(poi_types)
def post(self, name):
autocomplete_param = models.AutocompleteParameter.query.filter_by(name=name).first_or_404()
try:
poi_types_json = request.get_json(silent=False)
except:
abort(400, status="error", message='Incorrect json provided')
try:
_validate_poi_types_json(poi_types_json)
autocomplete_param.poi_types_json = json.dumps(poi_types_json, ensure_ascii=False).encode(
'utf-8', 'backslashreplace'
)
db.session.commit()
except Exception:
logging.exception("fail")
raise
return json.loads(autocomplete_param.poi_types_json), 200
def delete(self, name):
autocomplete_param = models.AutocompleteParameter.query.filter_by(name=name).first_or_404()
try:
autocomplete_param.poi_types_json = None
db.session.commit()
except Exception:
logging.exception("fail")
raise
return json.loads('{}'), 204
class Instance(flask_restful.Resource):
def __init__(self):
pass
@marshal_with(instance_fields)
def get(self, id=None, name=None):
parser = reqparse.RequestParser()
parser.add_argument(
'is_free',
type=inputs.boolean,
required=False,
case_sensitive=False,
help='boolean for returning only free or private instances',
)
args = parser.parse_args()
args.update({'id': id, 'name': name})
if any(v is not None for v in args.values()):
return (
models.Instance.query_existing()
.filter_by(**{k: v for k, v in args.items() if v is not None})
.all()
)
else:
return models.Instance.query_existing().all()
def delete(self, id=None, name=None):
instance = models.Instance.get_from_id_or_name(id, name)
try:
instance.discarded = True
db.session.commit()
except Exception:
logging.exception("fail")
raise
return marshal(instance, instance_fields)
def put(self, id=None, name=None):
instance = models.Instance.get_from_id_or_name(id, name)
parser = reqparse.RequestParser()
parser.add_argument(
'scenario',
type=str,
case_sensitive=False,
help='the name of the scenario used by jormungandr',
choices=['new_default', 'distributed'],
location=('json', 'values'),
default=instance.scenario,
)
parser.add_argument(
'journey_order',
type=str,
case_sensitive=False,
help='the sort order of the journeys in jormungandr',
choices=['arrival_time', 'departure_time'],
location=('json', 'values'),
default=instance.journey_order,
)
parser.add_argument(
'max_walking_duration_to_pt',
type=int,
help='the maximum duration of walking in fallback section',
location=('json', 'values'),
default=instance.max_walking_duration_to_pt,
)
parser.add_argument(
'max_bike_duration_to_pt',
type=int,
help='the maximum duration of bike in fallback section',
location=('json', 'values'),
default=instance.max_bike_duration_to_pt,
)
parser.add_argument(
'max_bss_duration_to_pt',
type=int,
help='the maximum duration of bss in fallback section',
location=('json', 'values'),
default=instance.max_bss_duration_to_pt,
)
parser.add_argument(
'max_car_duration_to_pt',
type=int,
help='the maximum duration of car in fallback section',
location=('json', 'values'),
default=instance.max_car_duration_to_pt,
)
parser.add_argument(
'max_car_no_park_duration_to_pt',
type=int,
help='the maximum duration of car in fallback section when parking aren\'t used',
location=('json', 'values'),
default=instance.max_car_no_park_duration_to_pt,
)
parser.add_argument(
'max_nb_transfers',
type=int,
help='the maximum number of transfers in a journey',
location=('json', 'values'),
default=instance.max_nb_transfers,
)
parser.add_argument(
'walking_speed',
type=float,
help='the walking speed',
location=('json', 'values'),
default=instance.walking_speed,
)
parser.add_argument(
'bike_speed',
type=float,
help='the biking speed',
location=('json', 'values'),
default=instance.bike_speed,
)
parser.add_argument(
'bss_speed',
type=float,
help='the speed of bss',
location=('json', 'values'),
default=instance.bss_speed,
)
parser.add_argument(
'car_speed',
type=float,
help='the speed of car',
location=('json', 'values'),
default=instance.car_speed,
)
parser.add_argument(
'car_no_park_speed',
type=float,
help='the speed of car when parking aren\'t used',
location=('json', 'values'),
default=instance.car_no_park_speed,
)
parser.add_argument(
'min_bike',
type=int,
help='minimum duration of bike fallback',
location=('json', 'values'),
default=instance.min_bike,
)
parser.add_argument(
'min_bss',
type=int,
help='minimum duration of bss fallback',
location=('json', 'values'),
default=instance.min_bss,
)
parser.add_argument(
'min_car',
type=int,
help='minimum duration of car fallback',
location=('json', 'values'),
default=instance.min_car,
)
parser.add_argument(
'successive_physical_mode_to_limit_id',
type=str,
help='the id of physical_mode to limit succession, as sent by kraken to jormungandr,'
' used by _max_successive_physical_mode rule',
location=('json', 'values'),
default=instance.successive_physical_mode_to_limit_id,
)
parser.add_argument(
'max_duration',
type=int,
help='latest time point of research, in second',
location=('json', 'values'),
default=instance.max_duration,
)
parser.add_argument(
'walking_transfer_penalty',
type=int,
help='transfer penalty, in second',
location=('json', 'values'),
default=instance.walking_transfer_penalty,
)
parser.add_argument(
'night_bus_filter_max_factor',
type=float,
help='night bus filter param',
location=('json', 'values'),
default=instance.night_bus_filter_max_factor,
)
parser.add_argument(
'night_bus_filter_base_factor',
type=int,
help='night bus filter param',
location=('json', 'values'),
default=instance.night_bus_filter_base_factor,
)
parser.add_argument(
'priority',
type=int,
help='instance priority',
location=('json', 'values'),
default=instance.priority,
)
parser.add_argument(
'bss_provider',
type=inputs.boolean,
help='bss provider activation',
location=('json', 'values'),
default=instance.bss_provider,
)
parser.add_argument(
'full_sn_geometries',
type=inputs.boolean,
help='activation of full geometries',
location=('json', 'values'),
default=instance.full_sn_geometries,
)
parser.add_argument(
'is_free',
type=inputs.boolean,
help='instance doesn\'t require authorization to be used',
location=('json', 'values'),
default=instance.is_free,
)
parser.add_argument(
'is_open_data',
type=inputs.boolean,
help='instance only use open data',
location=('json', 'values'),
default=instance.is_open_data,
)
parser.add_argument(
'import_stops_in_mimir',
type=inputs.boolean,
help='import stops in global autocomplete',
location=('json', 'values'),
default=instance.import_stops_in_mimir,
)
parser.add_argument(
'import_ntfs_in_mimir',
type=inputs.boolean,
help='import ntfs data in global autocomplete',
location=('json', 'values'),
default=instance.import_ntfs_in_mimir,
)
parser.add_argument(
'min_nb_journeys',
type=int,
help='minimum number of different suggested journeys',
location=('json', 'values'),
default=instance.min_nb_journeys,
)
parser.add_argument(
'max_nb_journeys',
type=int,
required=False,
help='maximum number of different suggested journeys',
location=('json', 'values'),
)
parser.add_argument(
'min_journeys_calls',
type=int,
help='minimum number of calls to kraken',
location=('json', 'values'),
default=instance.min_journeys_calls,
)
parser.add_argument(
'max_successive_physical_mode',
type=int,
required=False,
help='maximum number of successive physical modes in an itinerary',
location=('json', 'values'),
)
parser.add_argument(
'final_line_filter',
type=inputs.boolean,
help='filter on vj using same lines and same stops',
location=('json', 'values'),
default=instance.final_line_filter,
)
parser.add_argument(
'max_extra_second_pass',
type=int,
help='maximum number of second pass to get more itineraries',
location=('json', 'values'),
default=instance.max_extra_second_pass,
)
parser.add_argument(
'max_nb_crowfly_by_mode',
type=dict,
help='maximum nb of crowfly, used before computing the fallback matrix,' ' in distributed scenario',
location=('json', 'values'),
default=instance.max_nb_crowfly_by_mode,
)
parser.add_argument(
'autocomplete_backend',
type=str,
case_sensitive=False,
help='the name of the backend used by jormungandr for the autocompletion',
choices=['kraken', 'bragi'],
location=('json', 'values'),
default=instance.autocomplete_backend,
)
parser.add_argument(
'additional_time_after_first_section_taxi',
type=int,
help='additionnal time after the taxi section when used as first section mode',
location=('json', 'values'),
default=instance.additional_time_after_first_section_taxi,
)
parser.add_argument(
'additional_time_before_last_section_taxi',
type=int,
help='additionnal time before the taxi section when used as last section mode',
location=('json', 'values'),
default=instance.additional_time_before_last_section_taxi,
)
parser.add_argument(
'max_additional_connections',
type=int,
help='maximum number of connections allowed in journeys',
location=('json', 'values'),
default=instance.max_additional_connections,
)
parser.add_argument(
'car_park_provider',
type=inputs.boolean,
help='boolean to activate / deactivate call to car parking provider',
location=('json', 'values'),
default=instance.car_park_provider,
)
args = parser.parse_args()
try:
def map_args_to_instance(attr_name):
setattr(instance, attr_name, args[attr_name])
map(
map_args_to_instance,
[
'scenario',
'journey_order',
'max_walking_duration_to_pt',
'max_bike_duration_to_pt',
'max_bss_duration_to_pt',
'max_car_duration_to_pt',
'max_car_no_park_duration_to_pt',
'max_nb_transfers',
'walking_speed',
'bike_speed',
'bss_speed',
'car_speed',
'car_no_park_speed',
'min_bike',
'min_bss',
'min_car',
'max_duration',
'walking_transfer_penalty',
'night_bus_filter_max_factor',
'night_bus_filter_base_factor',
'successive_physical_mode_to_limit_id',
'priority',
'bss_provider',
'full_sn_geometries',
'is_free',
'is_open_data',
'import_stops_in_mimir',
'import_ntfs_in_mimir',
'min_nb_journeys',
'max_nb_journeys',
'min_journeys_calls',
'max_successive_physical_mode',
'final_line_filter',
'max_extra_second_pass',
'autocomplete_backend',
'additional_time_after_first_section_taxi',
'additional_time_before_last_section_taxi',
'max_additional_connections',
'car_park_provider',
],
)
max_nb_crowfly_by_mode = args.get('max_nb_crowfly_by_mode')
import copy
new = copy.deepcopy(instance.max_nb_crowfly_by_mode)
new.update(max_nb_crowfly_by_mode)
instance.max_nb_crowfly_by_mode = new
db.session.commit()
except Exception:
logging.exception("fail")
raise
return marshal(instance, instance_fields)
class User(flask_restful.Resource):
def get(self, user_id=None):
parser = reqparse.RequestParser()
parser.add_argument(
'disable_geojson', type=inputs.boolean, default=True, help='remove geojson from the response'
)
if user_id:
args = parser.parse_args()
g.disable_geojson = args['disable_geojson']
user = models.User.query.get_or_404(user_id)
return marshal(user, user_fields_full)
else:
parser.add_argument('login', type=unicode, required=False, case_sensitive=False, help='login')
parser.add_argument('email', type=unicode, required=False, case_sensitive=False, help='email')
parser.add_argument('key', type=unicode, required=False, case_sensitive=False, help='key')
parser.add_argument('end_point_id', type=int)
parser.add_argument('block_until', type=datetime_format, required=False, case_sensitive=False)
args = parser.parse_args()
g.disable_geojson = args['disable_geojson']
if args['key']:
logging.debug(args['key'])
users = models.User.get_from_token(args['key'], datetime.now())
return marshal(users, user_fields)
else:
del args['disable_geojson']
# dict comprehension would be better, but it's not in python 2.6
filter_params = dict((k, v) for k, v in args.items() if v)
if filter_params:
users = models.User.query.filter_by(**filter_params).all()
return marshal(users, user_fields)
else:
users = models.User.query.all()
return marshal(users, user_fields)
def post(self):
user = None
parser = reqparse.RequestParser()
parser.add_argument(
'login',
type=unicode,
required=True,
case_sensitive=False,
help='login is required',
location=('json', 'values'),
)
parser.add_argument(
'email',
type=unicode,
required=True,
case_sensitive=False,
help='email is required',
location=('json', 'values'),
)
parser.add_argument(
'block_until',
type=datetime_format,
required=False,
help='end block date access',
location=('json', 'values'),
)
parser.add_argument(
'end_point_id', type=int, required=False, help='id of the end_point', location=('json', 'values')
)
parser.add_argument(
'billing_plan_id',
type=int,
required=False,
help='id of the billing_plan',
location=('json', 'values'),
)
parser.add_argument(
'type',
type=str,
required=False,
default='with_free_instances',
help='type of user: [with_free_instances, without_free_instances, super_user]',
location=('json', 'values'),
choices=['with_free_instances', 'without_free_instances', 'super_user'],
)
parser.add_argument('shape', type=geojson_argument, required=False, location=('json', 'values'))
parser.add_argument('default_coord', type=CoordFormat(), required=False, location=('json', 'values'))
args = parser.parse_args()
if not validate_email(
args['email'],
check_mx=current_app.config['EMAIL_CHECK_MX'],
verify=current_app.config['EMAIL_CHECK_SMTP'],
):
return ({'error': 'email invalid'}, 400)
end_point = None
if args['end_point_id']:
end_point = models.EndPoint.query.get(args['end_point_id'])
else:
end_point = models.EndPoint.get_default()
if not end_point:
return ({'error': 'end_point doesn\'t exist'}, 400)
if args['billing_plan_id']:
billing_plan = models.BillingPlan.query.get(args['billing_plan_id'])
else:
billing_plan = models.BillingPlan.get_default(end_point)
if not billing_plan:
return ({'error': 'billing plan doesn\'t exist'}, 400)
try:
user = models.User(login=args['login'], email=args['email'], block_until=args['block_until'])
user.type = args['type']
user.end_point = end_point
user.billing_plan = billing_plan
user.shape = ujson.dumps(args['shape'])
user.default_coord = args['default_coord']
db.session.add(user)
db.session.commit()
tyr_user_event = TyrUserEvent()
tyr_user_event.request(user, "create_user")
return marshal(user, user_fields_full)
except (sqlalchemy.exc.IntegrityError, sqlalchemy.orm.exc.FlushError):
return ({'error': 'duplicate user'}, 409)
except Exception:
logging.exception("fail")
raise
def put(self, user_id):
user = models.User.query.get_or_404(user_id)
parser = reqparse.RequestParser()
parser.add_argument(
'login',
type=unicode,
required=False,
default=user.login,
case_sensitive=False,
help='user identifier',
location=('json', 'values'),
)
parser.add_argument(
'email',
type=unicode,
required=False,
default=user.email,
case_sensitive=False,
help='email is required',
location=('json', 'values'),
)
parser.add_argument(
'type',
type=str,
required=False,
default=user.type,
location=('json', 'values'),
help='type of user: [with_free_instances, without_free_instances, super_user]',
choices=['with_free_instances', 'without_free_instances', 'super_user'],
)
parser.add_argument(
'end_point_id',
type=int,
default=user.end_point_id,
help='id of the end_point',
location=('json', 'values'),
)
parser.add_argument(
'block_until',
type=datetime_format,
required=False,
help='block until argument is not correct',
location=('json', 'values'),
)
parser.add_argument(
'billing_plan_id',
type=int,
default=user.billing_plan_id,
help='billing id of the end_point',
location=('json', 'values'),
)
parser.add_argument(
'shape',
type=geojson_argument,
default=ujson.loads(user.shape),
required=False,
location=('json', 'values'),
)
parser.add_argument('default_coord', type=CoordFormat(), required=False, location=('json', 'values'))
args = parser.parse_args()
if not validate_email(
args['email'],
check_mx=current_app.config['EMAIL_CHECK_MX'],
verify=current_app.config['EMAIL_CHECK_SMTP'],
):
return ({'error': 'email invalid'}, 400)
end_point = models.EndPoint.query.get(args['end_point_id'])
billing_plan = models.BillingPlan.query.get_or_404(args['billing_plan_id'])
if not end_point:
return ({'error': 'end_point doesn\'t exist'}, 400)
if not billing_plan:
return ({'error': 'billing_plan doesn\'t exist'}, 400)
# If the user gives the empty object, we don't change the
# shape. This is because the empty object can be outputed by
# GET to express "there is a shape, but I don't show it to you
# as you don't care". We want that giving the result of GET to
# PUT doesn't change anything. That explain this strangeness.
if args['shape'] == {}:
args['shape'] = ujson.loads(user.shape)
try:
last_login = user.login
user.email = args['email']
user.login = args['login']
user.type = args['type']
user.block_until = args['block_until']
user.end_point = end_point
user.billing_plan = billing_plan
user.shape = ujson.dumps(args['shape'])
user.default_coord = args['default_coord']
db.session.commit()
tyr_user_event = TyrUserEvent()
tyr_user_event.request(user, "update_user", last_login)
return marshal(user, user_fields_full)
except (sqlalchemy.exc.IntegrityError, sqlalchemy.orm.exc.FlushError):
return ({'error': 'duplicate user'}, 409) # Conflict
except Exception:
logging.exception("fail")
raise
def delete(self, user_id):
user = models.User.query.get_or_404(user_id)
try:
db.session.delete(user)
db.session.commit()
tyr_user_event = TyrUserEvent()
tyr_user_event.request(user, "delete_user")
except Exception:
logging.exception("fail")
raise
return ({}, 204)
class Key(flask_restful.Resource):
def __init__(self):
pass
@marshal_with(key_fields)
def get(self, user_id, key_id=None):
try:
return models.User.query.get_or_404(user_id).keys.all()
except Exception:
logging.exception("fail")
raise
@marshal_with(user_fields_full)
def post(self, user_id):
parser = reqparse.RequestParser()
parser.add_argument(
'valid_until',
type=inputs.date,
required=False,
help='end validity date of the key',
location=('json', 'values'),
)
parser.add_argument(
'app_name',
type=str,
required=True,
help='app name associated to this key',
location=('json', 'values'),
)
args = parser.parse_args()
user = models.User.query.get_or_404(user_id)
try:
user.add_key(args['app_name'], valid_until=args['valid_until'])
db.session.commit()
except Exception:
logging.exception("fail")
raise
return user
@marshal_with(user_fields_full)
def delete(self, user_id, key_id):
user = models.User.query.get_or_404(user_id)
try:
key = user.keys.filter_by(id=key_id).first()
if not key:
abort(404)
db.session.delete(key)
db.session.commit()
except Exception:
logging.exception("fail")
raise
return user
@marshal_with(user_fields_full)
def put(self, user_id, key_id):
parser = reqparse.RequestParser()
parser.add_argument(
'valid_until',
type=inputs.date,
required=False,
help='end validity date of the key',
location=('json', 'values'),
)
parser.add_argument(
'app_name',
type=str,
required=True,
help='app name associated to this key',
location=('json', 'values'),
)
args = parser.parse_args()
user = models.User.query.get_or_404(user_id)
try:
key = user.keys.filter_by(id=key_id).first()
if not key:
abort(404)
if args['valid_until']:
key.valid_until = args['valid_until']
key.app_name = args['app_name']
db.session.commit()
except Exception:
logging.exception("fail")
raise
return user
class Authorization(flask_restful.Resource):
def __init__(self):
pass
def delete(self, user_id):
parser = reqparse.RequestParser()
parser.add_argument(
'api_id', type=int, required=True, help='api_id is required', location=('json', 'values')
)
parser.add_argument(
'instance_id', type=int, required=True, help='instance_id is required', location=('json', 'values')
)
args = parser.parse_args()
try:
user = models.User.query.get_or_404(user_id)
authorizations = [
a
for a in user.authorizations
if a.api_id == args['api_id'] and a.instance_id == args['instance_id']
]
if not authorizations:
abort(404)
for authorization in authorizations:
db.session.delete(authorization)
db.session.commit()
except Exception:
logging.exception("fail")
raise
return marshal(user, user_fields_full)
def post(self, user_id):
parser = reqparse.RequestParser()
parser.add_argument(
'api_id', type=int, required=True, help='api_id is required', location=('json', 'values')
)
parser.add_argument(
'instance_id', type=int, required=True, help='instance_id is required', location=('json', 'values')
)
args = parser.parse_args()
user = models.User.query.get_or_404(user_id)
api = models.Api.query.get_or_404(args['api_id'])
instance = models.Instance.query.get_or_404(args['instance_id'])
try:
authorization = models.Authorization()
authorization.user = user
authorization.api = api
authorization.instance = instance
user.authorizations.append(authorization)
db.session.add(authorization)
db.session.commit()
except (sqlalchemy.exc.IntegrityError, sqlalchemy.orm.exc.FlushError):
return ({'error': 'duplicate entry'}, 409)
except Exception:
logging.exception("fail")
raise
return marshal(user, user_fields_full)
class EndPoint(flask_restful.Resource):
@marshal_with(end_point_fields)
def get(self):
return models.EndPoint.query.all()
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('name', type=unicode, required=True, help='name of the endpoint', location=('json'))
args = parser.parse_args()
try:
end_point = models.EndPoint()
end_point.name = args['name']
if 'hostnames' in request.json:
for host in request.json['hostnames']:
end_point.hosts.append(models.Host(host))
db.session.add(end_point)
db.session.commit()
tyr_end_point_event = EndPointEventMessage(EndPointEventMessage.CREATE, end_point)
tyr_events_rabbit_mq = TyrEventsRabbitMq()
tyr_events_rabbit_mq.request(tyr_end_point_event)
except (sqlalchemy.exc.IntegrityError, sqlalchemy.orm.exc.FlushError) as e:
return ({'error': str(e)}, 409)
except Exception:
logging.exception("fail")
raise
return marshal(end_point, end_point_fields)
def put(self, id):
end_point = models.EndPoint.query.get_or_404(id)
parser = reqparse.RequestParser()
parser.add_argument(
'name', type=unicode, default=end_point.name, help='name of the endpoint', location=('json')
)
args = parser.parse_args()
try:
old_name = end_point.name
end_point.name = args['name']
if 'hostnames' in request.json:
end_point.hosts = []
for host in request.json['hostnames']:
end_point.hosts.append(models.Host(host))
db.session.commit()
tyr_end_point_event = EndPointEventMessage(EndPointEventMessage.UPDATE, end_point, old_name)
tyr_events_rabbit_mq = TyrEventsRabbitMq()
tyr_events_rabbit_mq.request(tyr_end_point_event)
except (sqlalchemy.exc.IntegrityError, sqlalchemy.orm.exc.FlushError) as e:
return ({'error': str(e)}, 409)
except Exception:
logging.exception("fail")
raise
return marshal(end_point, end_point_fields)
def delete(self, id):
end_point = models.EndPoint.query.get_or_404(id)
try:
db.session.delete(end_point)
db.session.commit()
tyr_end_point_event = EndPointEventMessage(EndPointEventMessage.DELETE, end_point)
tyr_events_rabbit_mq = TyrEventsRabbitMq()
tyr_events_rabbit_mq.request(tyr_end_point_event)
except Exception:
logging.exception("fail")
raise
return ({}, 204)
class TravelerProfile(flask_restful.Resource):
"""
Traveler profile api for creating updating and removing
"""
def __init__(self):
# fallback modes
fb_modes = ['walking', 'car', 'bss', 'bike', 'ridesharing']
parser = reqparse.RequestParser()
parser.add_argument('walking_speed', type=PositiveFloat(), required=False, location=('json', 'values'))
parser.add_argument('bike_speed', type=PositiveFloat(), required=False, location=('json', 'values'))
parser.add_argument('bss_speed', type=PositiveFloat(), required=False, location=('json', 'values'))
parser.add_argument('car_speed', type=PositiveFloat(), required=False, location=('json', 'values'))
parser.add_argument('wheelchair', type=BooleanType(), required=False, location=('json', 'values'))
parser.add_argument(
'max_walking_duration_to_pt',
type=PositiveFloat(),
required=False,
help='in second',
location=('json', 'values'),
)
parser.add_argument(
'max_bike_duration_to_pt',
type=PositiveFloat(),
required=False,
help='in second',
location=('json', 'values'),
)
parser.add_argument(
'max_bss_duration_to_pt',
type=PositiveFloat(),
required=False,
help='in second',
location=('json', 'values'),
)
parser.add_argument(
'max_car_duration_to_pt',
type=PositiveFloat(),
required=False,
help='in second',
location=('json', 'values'),
)
parser.add_argument(
'first_section_mode[]',
type=OptionValue(fb_modes),
case_sensitive=False,
required=False,
action='append',
dest='first_section_mode',
location='values',
)
parser.add_argument(
'last_section_mode[]',
type=OptionValue(fb_modes),
case_sensitive=False,
required=False,
action='append',
dest='last_section_mode',
location='values',
)
# flask parser returns a list for first_section_mode and last_section_mode
parser.add_argument(
'first_section_mode', type=OptionValue(fb_modes), action='append', required=False, location='json'
)
parser.add_argument(
'last_section_mode', type=OptionValue(fb_modes), action='append', required=False, location='json'
)
self.args = parser.parse_args()
def check_resources(f):
@wraps(f)
def wrapper(*args, **kwds):
tp = kwds.get('traveler_type')
if tp in acceptable_traveler_types:
return f(*args, **kwds)
return (
{'error': 'traveler profile: {0} is not one of in {1}'.format(tp, acceptable_traveler_types)},
400,
)
return wrapper
@marshal_with(traveler_profile)
@check_resources
def get(self, name=None, traveler_type=None):
try:
traveler_profiles = []
# If traveler_type is not specified, we return all existent traveler profiles of this instance
if traveler_type is None:
traveler_profiles += models.TravelerProfile.get_all_by_coverage(coverage=name)
else:
profile = models.TravelerProfile.get_by_coverage_and_type(
coverage=name, traveler_type=traveler_type
)
if profile:
traveler_profiles.append(profile)
if traveler_profiles:
return traveler_profiles
return {'error': 'No matching traveler profiles are found in db'}, 404
except (sqlalchemy.exc.IntegrityError, sqlalchemy.orm.exc.FlushError) as e:
return {'error': str(e)}, 409
except Exception:
logging.exception("fail")
raise
@marshal_with(traveler_profile)
@check_resources
def post(self, name=None, traveler_type=None):
try:
instance = models.Instance.get_by_name(name)
if instance is None:
return {'error': "Coverage: {0} doesn't exist".format(name)}
profile = models.TravelerProfile()
profile.coverage_id = instance.id
for (attr, default_value) in default_traveler_profile_params[traveler_type].iteritems():
# override hardcoded values by args if args are not None
value = default_value if self.args.get(attr) is None else self.args.get(attr)
setattr(profile, attr, value)
db.session.add(profile)
db.session.commit()
return profile
except (sqlalchemy.exc.IntegrityError, sqlalchemy.orm.exc.FlushError) as e:
return {'error': str(e)}, 409
except Exception:
logging.exception("fail")
raise
@marshal_with(traveler_profile)
@check_resources
def put(self, name=None, traveler_type=None):
profile = models.TravelerProfile.get_by_coverage_and_type(name, traveler_type)
if profile is None:
return {'error': 'Non profile is found to update'}, 404
try:
for (attr, args_value) in self.args.iteritems():
# override hardcoded values by args if args are not None
if args_value is not None:
setattr(profile, attr, args_value)
db.session.commit()
return profile
except (sqlalchemy.exc.IntegrityError, sqlalchemy.orm.exc.FlushError) as e:
return {'error': str(e)}, 409
except Exception:
logging.exception("fail")
raise
@check_resources
def delete(self, name=None, traveler_type=None):
profile = models.TravelerProfile.get_by_coverage_and_type(name, traveler_type)
if profile is None:
return (
{'error': 'Instance: {0} has no such profile: {1} in db to delete'.format(name, traveler_type)},
400,
)
try:
db.session.delete(profile)
db.session.commit()
return '', 204
except sqlalchemy.orm.exc.FlushError as e:
return {'error': str(e)}, 409
except sqlalchemy.orm.exc.UnmappedInstanceError:
return {'error': 'no such profile in db to delete'}, 400
except Exception:
logging.exception("fail")
raise
class BillingPlan(flask_restful.Resource):
def get(self, billing_plan_id=None):
if billing_plan_id:
billing_plan = models.BillingPlan.query.get_or_404(billing_plan_id)
return marshal(billing_plan, billing_plan_fields_full)
else:
billing_plans = models.BillingPlan.query.all()
return marshal(billing_plans, billing_plan_fields_full)
def post(self):
parser = reqparse.RequestParser()
parser.add_argument(
'name',
type=unicode,
required=True,
case_sensitive=False,
help='name is required',
location=('json', 'values'),
)
parser.add_argument(
'max_request_count',
type=int,
required=False,
help='max request count for this billing plan',
location=('json', 'values'),
)
parser.add_argument(
'max_object_count',
type=int,
required=False,
help='max object count for this billing plan',
location=('json', 'values'),
)
parser.add_argument(
'default',
type=bool,
required=False,
default=True,
help='if this plan is the default one',
location=('json', 'values'),
)
parser.add_argument(
'end_point_id', type=int, required=False, help='id of the end_point', location=('json', 'values')
)
args = parser.parse_args()
if args['end_point_id']:
end_point = models.EndPoint.query.get(args['end_point_id'])
else:
end_point = models.EndPoint.get_default()
if not end_point:
return ({'error': 'end_point doesn\'t exist'}, 400)
try:
billing_plan = models.BillingPlan(
name=args['name'],
max_request_count=args['max_request_count'],
max_object_count=args['max_object_count'],
default=args['default'],
)
billing_plan.end_point = end_point
db.session.add(billing_plan)
db.session.commit()
return marshal(billing_plan, billing_plan_fields_full)
except Exception:
logging.exception("fail")
raise
def put(self, billing_plan_id=None):
billing_plan = models.BillingPlan.query.get_or_404(billing_plan_id)
parser = reqparse.RequestParser()
parser.add_argument(
'name',
type=unicode,
required=False,
default=billing_plan.name,
case_sensitive=False,
location=('json', 'values'),
)
parser.add_argument(
'max_request_count',
type=int,
required=False,
default=billing_plan.max_request_count,
help='max request count for this billing plan',
location=('json', 'values'),
)
parser.add_argument(
'max_object_count',
type=int,
required=False,
default=billing_plan.max_object_count,
help='max object count for this billing plan',
location=('json', 'values'),
)
parser.add_argument(
'default',
type=bool,
required=False,
default=billing_plan.default,
help='if this plan is the default one',
location=('json', 'values'),
)
parser.add_argument(
'end_point_id',
type=int,
default=billing_plan.end_point_id,
help='id of the end_point',
location=('json', 'values'),
)
args = parser.parse_args()
end_point = models.EndPoint.query.get(args['end_point_id'])
if not end_point:
return ({'error': 'end_point doesn\'t exist'}, 400)
try:
billing_plan.name = args['name']
billing_plan.max_request_count = args['max_request_count']
billing_plan.max_object_count = args['max_object_count']
billing_plan.default = args['default']
billing_plan.end_point = end_point
db.session.commit()
return marshal(billing_plan, billing_plan_fields_full)
except Exception:
logging.exception("fail")
raise
def delete(self, billing_plan_id=None):
billing_plan = models.BillingPlan.query.get_or_404(billing_plan_id)
try:
db.session.delete(billing_plan)
db.session.commit()
except (sqlalchemy.exc.IntegrityError, sqlalchemy.orm.exc.FlushError):
return ({'error': 'billing_plan used'}, 409) # Conflict
except Exception:
logging.exception("fail")
raise
return ({}, 204)
class AutocompleteParameter(flask_restful.Resource):
def get(self, name=None):
if name:
autocomplete_param = models.AutocompleteParameter.query.filter_by(name=name).first_or_404()
return marshal(autocomplete_param, autocomplete_parameter_fields)
else:
autocomplete_params = models.AutocompleteParameter.query.all()
return marshal(autocomplete_params, autocomplete_parameter_fields)
def post(self):
parser = reqparse.RequestParser()
parser.add_argument(
'name',
type=unicode,
required=True,
case_sensitive=False,
help='name is required',
location=('json', 'values'),
)
parser.add_argument(
'street',
type=str,
required=False,
default='OSM',
help='source for street: [OSM]',
location=('json', 'values'),
choices=utils.street_source_types,
)
parser.add_argument(
'address',
type=str,
required=False,
default='BANO',
help='source for address: [BANO, OpenAddresses]',
location=('json', 'values'),
choices=utils.address_source_types,
)
parser.add_argument(
'poi',
type=str,
required=False,
default='OSM',
help='source for poi: [FUSIO, OSM]',
location=('json', 'values'),
choices=utils.poi_source_types,
)
parser.add_argument(
'admin',
type=str,
required=False,
default='OSM',
help='source for admin: {}'.format(utils.admin_source_types),
location=('json', 'values'),
choices=utils.admin_source_types,
)
parser.add_argument('admin_level', type=int, action='append', required=False)
args = parser.parse_args()
try:
autocomplete_parameter = models.AutocompleteParameter()
autocomplete_parameter.name = args['name']
autocomplete_parameter.street = args['street']
autocomplete_parameter.address = args['address']
autocomplete_parameter.poi = args['poi']
autocomplete_parameter.admin = args['admin']
autocomplete_parameter.admin_level = args['admin_level']
db.session.add(autocomplete_parameter)
db.session.commit()
create_autocomplete_depot.delay(autocomplete_parameter.name)
except (sqlalchemy.exc.IntegrityError, sqlalchemy.orm.exc.FlushError):
return ({'error': 'duplicate name'}, 409)
except Exception:
logging.exception("fail")
raise
return marshal(autocomplete_parameter, autocomplete_parameter_fields)
def put(self, name=None):
autocomplete_param = models.AutocompleteParameter.query.filter_by(name=name).first_or_404()
parser = reqparse.RequestParser()
parser.add_argument(
'street',
type=str,
required=False,
default=autocomplete_param.street,
help='source for street: {}'.format(utils.street_source_types),
location=('json', 'values'),
choices=utils.street_source_types,
)
parser.add_argument(
'address',
type=str,
required=False,
default=autocomplete_param.address,
help='source for address: {}'.format(utils.address_source_types),
location=('json', 'values'),
choices=utils.address_source_types,
)
parser.add_argument(
'poi',
type=str,
required=False,
default=autocomplete_param.poi,
help='source for poi: {}'.format(utils.poi_source_types),
location=('json', 'values'),
choices=utils.poi_source_types,
)
parser.add_argument(
'admin',
type=str,
required=False,
default=autocomplete_param.admin,
help='source for admin: {}'.format(utils.admin_source_types),
location=('json', 'values'),
choices=utils.admin_source_types,
)
parser.add_argument(
'admin_level', type=int, action='append', required=False, default=autocomplete_param.admin_level
)
args = parser.parse_args()
try:
autocomplete_param.street = args['street']
autocomplete_param.address = args['address']
autocomplete_param.poi = args['poi']
autocomplete_param.admin = args['admin']
autocomplete_param.admin_level = args['admin_level']
db.session.commit()
create_autocomplete_depot.delay(autocomplete_param.name)
except Exception:
logging.exception("fail")
raise
return marshal(autocomplete_param, autocomplete_parameter_fields)
def delete(self, name=None):
autocomplete_param = models.AutocompleteParameter.query.filter_by(name=name).first_or_404()
try:
remove_autocomplete_depot.delay(name)
db.session.delete(autocomplete_param)
db.session.commit()
except Exception:
logging.exception("fail")
raise
return ({}, 204)
class InstanceDataset(flask_restful.Resource):
def get(self, instance_name):
parser = reqparse.RequestParser()
parser.add_argument(
'count',
type=int,
required=False,
help='number of last dataset to dump per type',
location=('json', 'values'),
default=1,
)
args = parser.parse_args()
instance = models.Instance.get_by_name(instance_name)
datasets = instance.last_datasets(args['count'])
return marshal(datasets, dataset_field)
class AutocompleteDataset(flask_restful.Resource):
def get(self, ac_instance_name):
parser = reqparse.RequestParser()
parser.add_argument(
'count',
type=int,
required=False,
help='number of last dataset to dump per type',
location=('json', 'values'),
default=1,
)
args = parser.parse_args()
instance = models.AutocompleteParameter.query.filter_by(name=ac_instance_name).first_or_404()
datasets = instance.last_datasets(args['count'])
return marshal(datasets, dataset_field)
class AutocompleteUpdateData(flask_restful.Resource):
def post(self, ac_instance_name):
instance = models.AutocompleteParameter.query.filter_by(name=ac_instance_name).first_or_404()
if not request.files:
return marshal({'error': {'message': 'the Data file is missing'}}, error_fields), 400
content = request.files['file']
logger = get_instance_logger(instance)
logger.info('content received: %s', content)
filename = save_in_tmp(content)
_, job = import_autocomplete([filename], instance)
job = models.db.session.merge(job) # reatache the object
return marshal({'job': job}, one_job_fields), 200
class DeleteDataset(flask_restful.Resource):
def delete(self, instance_name, type):
instance = models.Instance.get_by_name(instance_name)
if instance:
res = instance.delete_dataset(_type=type)
if res:
return_msg = 'All {} datasets deleted for instance {}'.format(type, instance_name)
else:
return_msg = 'No {} dataset to be deleted for instance {}'.format(type, instance_name)
return_status = 200
else:
return_msg = "No instance found for : {}".format(instance_name)
return_status = 404
return {'action': return_msg}, return_status
class MigrateFromPoiToOsm(flask_restful.Resource):
def put(self, instance_name):
instance = models.Instance.get_by_name(instance_name)
if instance:
instance_conf = load_instance_config(instance_name)
connection_string = "postgres://{u}:{pw}@{h}:{port}/{db}".format(
u=instance_conf.pg_username,
pw=instance_conf.pg_password,
h=instance_conf.pg_host,
db=instance_conf.pg_dbname,
port=instance_conf.pg_port,
)
engine = sqlalchemy.create_engine(connection_string)
engine.execute("""UPDATE navitia.parameters SET parse_pois_from_osm = TRUE""").close()
return_msg = 'Parameter parse_pois_from_osm activated'
return_status = 200
else:
return_msg = "No instance found for : {}".format(instance_name)
return_status = 404
return {'action': return_msg}, return_status
def check_db():
cities_db = sqlalchemy.create_engine(current_app.config['CITIES_DATABASE_URI'])
try:
cities_db.connect()
result = cities_db.execute("SELECT version_num FROM alembic_version")
for row in result:
return row['version_num']
except Exception as e:
logging.exception("cities db not created : {}".format(e.message))
return None
class CitiesStatus(flask_restful.Resource):
def get(self):
if not current_app.config['CITIES_DATABASE_URI']:
return {'message': 'cities db not configured'}, 404
msg = check_db()
if msg:
return {'message': 'cities db alembic version = {}'.format(msg)}, 200
else:
return {'message': 'cities db not reachable'}, 404
class Cities(flask_restful.Resource):
def post(self):
if not check_db():
return {'message': 'cities db not reachable'}, 404
parser = reqparse.RequestParser()
parser.add_argument('file', type=werkzeug.FileStorage, location='files')
args = parser.parse_args()
if not args['file']:
logging.info("No file provided")
return {'message': 'No file provided'}, 400
f = args['file']
file_name = f.filename
file_path = str(os.path.join(os.path.abspath(current_app.config['CITIES_OSM_FILE_PATH']), file_name))
f.save(file_path)
logging.info("file: {}".format(f))
if COSMOGONY_REGEXP.match(file_name):
# it's a cosmogony file, we import it with cosmogony2cities
cosmogony2cities.delay(file_path)
else:
# we import it the 'old' way, with cities
cities.delay(file_path)
return {'message': 'OK'}, 200
class BssProvider(flask_restful.Resource):
@marshal_with(bss_provider_list_fields)
def get(self, id=None):
if id:
try:
return {'bss_providers': [models.BssProvider.find_by_id(id)]}
except sqlalchemy.orm.exc.NoResultFound:
return {'bss_providers': []}, 404
else:
return {'bss_providers': models.BssProvider.all()}
def post(self, id=None):
if not id:
abort(400, status="error", message='id is required')
try:
input_json = request.get_json(force=True, silent=False)
# TODO validate input
except BadRequest:
abort(400, status="error", message='Incorrect json provided')
provider = models.BssProvider(id, input_json)
try:
models.db.session.add(provider)
models.db.session.commit()
except sqlalchemy.exc.IntegrityError as ex:
abort(400, status="error", message=str(ex))
return marshal(provider, bss_provider_fields), 201
def put(self, id=None):
if not id:
abort(400, status="error", message='id is required')
try:
input_json = request.get_json(force=True, silent=False)
# TODO validate input
except BadRequest:
abort(400, status="error", message='Incorrect json provided')
try:
provider = models.BssProvider.find_by_id(id)
status = 200
except sqlalchemy.orm.exc.NoResultFound:
provider = models.BssProvider(id)
models.db.session.add(provider)
status = 201
provider.from_json(input_json)
try:
models.db.session.commit()
except sqlalchemy.exc.IntegrityError as ex:
abort(400, status="error", message=str(ex))
return marshal(provider, bss_provider_fields), status
def delete(self, id=None):
if not id:
abort(400, status="error", message='id is required')
try:
provider = models.BssProvider.find_by_id(id)
provider.discarded = True
models.db.session.commit()
return None, 204
except sqlalchemy.orm.exc.NoResultFound:
abort(404, status="error", message='object not found')
| agpl-3.0 | -2,032,189,975,541,319,000 | 34.818994 | 113 | 0.559392 | false |
mvaled/sentry | src/sentry/models/projectoption.py | 1 | 4011 | from __future__ import absolute_import, print_function
from celery.signals import task_postrun
from django.core.signals import request_finished
from django.db import models
from sentry import projectoptions
from sentry.db.models import Model, FlexibleForeignKey, sane_repr
from sentry.db.models.fields import EncryptedPickledObjectField
from sentry.db.models.manager import BaseManager
from sentry.utils.cache import cache
class ProjectOptionManager(BaseManager):
def __init__(self, *args, **kwargs):
super(ProjectOptionManager, self).__init__(*args, **kwargs)
self.__cache = {}
def __getstate__(self):
d = self.__dict__.copy()
# we cant serialize weakrefs
d.pop("_ProjectOptionManager__cache", None)
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.__cache = {}
def _make_key(self, instance_id):
assert instance_id
return "%s:%s" % (self.model._meta.db_table, instance_id)
def get_value_bulk(self, instances, key):
instance_map = dict((i.id, i) for i in instances)
queryset = self.filter(project__in=instances, key=key)
result = dict((i, None) for i in instances)
for obj in queryset:
result[instance_map[obj.project_id]] = obj.value
return result
def get_value(self, project, key, default=None, validate=None):
result = self.get_all_values(project)
if key in result:
if validate is None or validate(result[key]):
return result[key]
if default is None:
well_known_key = projectoptions.lookup_well_known_key(key)
if well_known_key is not None:
return well_known_key.get_default(project)
return default
def unset_value(self, project, key):
self.filter(project=project, key=key).delete()
self.reload_cache(project.id)
def set_value(self, project, key, value):
inst, created = self.create_or_update(project=project, key=key, values={"value": value})
self.reload_cache(project.id)
return created or inst > 0
def get_all_values(self, project):
if isinstance(project, models.Model):
project_id = project.id
else:
project_id = project
if project_id not in self.__cache:
cache_key = self._make_key(project_id)
result = cache.get(cache_key)
if result is None:
result = self.reload_cache(project_id)
else:
self.__cache[project_id] = result
return self.__cache.get(project_id, {})
def clear_local_cache(self, **kwargs):
self.__cache = {}
def reload_cache(self, project_id):
cache_key = self._make_key(project_id)
result = dict((i.key, i.value) for i in self.filter(project=project_id))
cache.set(cache_key, result)
self.__cache[project_id] = result
return result
def post_save(self, instance, **kwargs):
self.reload_cache(instance.project_id)
def post_delete(self, instance, **kwargs):
self.reload_cache(instance.project_id)
def contribute_to_class(self, model, name):
super(ProjectOptionManager, self).contribute_to_class(model, name)
task_postrun.connect(self.clear_local_cache)
request_finished.connect(self.clear_local_cache)
class ProjectOption(Model):
"""
Project options apply only to an instance of a project.
Options which are specific to a plugin should namespace
their key. e.g. key='myplugin:optname'
"""
__core__ = True
project = FlexibleForeignKey("sentry.Project")
key = models.CharField(max_length=64)
value = EncryptedPickledObjectField()
objects = ProjectOptionManager()
class Meta:
app_label = "sentry"
db_table = "sentry_projectoptions"
unique_together = (("project", "key"),)
__repr__ = sane_repr("project_id", "key", "value")
| bsd-3-clause | -3,860,437,345,849,938,000 | 32.705882 | 96 | 0.629519 | false |
FIWARE-TMForum/business-ecosystem-charging-backend | src/wstore/store_commons/authentication.py | 1 | 2520 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 - 2015 CoNWeT Lab., Universidad Politécnica de Madrid
# This file belongs to the business-charging-backend
# of the Business API Ecosystem.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth import logout as django_logout
from django.http import HttpResponseRedirect
from django.conf import settings
from wstore.store_commons.utils.http import build_response
from wstore.store_commons.utils.url import add_slash
class Http403(Exception):
pass
def logout(request):
django_logout(request)
response = None
if settings.PORTALINSTANCE:
# Check if the logout request is originated in a different domain
if 'HTTP_ORIGIN' in request.META:
origin = request.META['HTTP_ORIGIN']
origin = add_slash(origin)
from wstore.views import ACCOUNT_PORTAL_URL, CLOUD_PORTAL_URL, MASHUP_PORTAL_URL, DATA_PORTAL_URL
allowed_origins = [
add_slash(ACCOUNT_PORTAL_URL),
add_slash(CLOUD_PORTAL_URL),
add_slash(MASHUP_PORTAL_URL),
add_slash(DATA_PORTAL_URL)
]
if origin in allowed_origins:
headers = {
'Access-Control-Allow-Origin': origin,
'Access-Control-Allow-Credentials': 'true'
}
response = build_response(request, 200, 'OK', headers=headers)
else:
response = build_response(request, 403, 'Forbidden')
else:
# If using the FI-LAB authentication and it is not a cross domain
# request redirect to the FI-LAB main page
response = build_response(request, 200, 'OK')
# If not using the FI-LAB authentication redirect to the login page
url = '/login?next=/'
response = HttpResponseRedirect(url)
return response
| agpl-3.0 | -2,010,787,913,831,776,300 | 34.478873 | 109 | 0.662565 | false |
gemfire/py-gemfire-rest | gemfire/Region.py | 1 | 7643 | '''
Copyright (c) 2014 Pivotal Software, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
'''
import logging
import requests
import jsonpickle
class Region:
def __init__(self, name, base_url, username, password, type):
''' Initializes a Region '''
self.name = name
self.base_url = base_url
self.user = username
self.password = password
self.type = type
def get_all(self):
''' Returns all the data in a Region '''
url = self.base_url + "?ALL"
data = requests.get(url, auth=(self.user, self.password))
logging.debug("Sending request to " + url)
fdata = jsonpickle.decode(data.text)
if data.status_code == 200:
logging.debug("Response from server: " + " ,".join(data))
return fdata[self.name]
else:
self.error_response(data)
def create(self, key, value):
''' Creates a new data value in the Region if the key is absent '''
url = self.base_url + "?key=" + str(key)
headers = {'content-type': 'application/json'}
jvalue = jsonpickle.encode(value)
data = requests.post(url, data=jvalue, headers=headers, auth=(self.user, self.password))
logging.debug("Sending request to " + url)
if data.status_code == 201:
logging.debug("The value " + str(value) + " was created in the region for the key " + str(key))
return True
else:
self.error_response(data)
def put(self, key, value):
''' Updates or inserts data for a specified key '''
url = self.base_url + "/" + str(key)
headers = {'content-type': 'application/json'}
jvalue = jsonpickle.encode(value)
data = requests.put(url, data=jvalue, headers=headers, auth=(self.user, self.password))
logging.debug("Sending request to " + url)
if data.status_code == 200:
logging.debug("The value " + str(value) + " was put in the region for the key " + str(key))
return True
else:
self.error_response(data)
def keys(self):
''' Returns all keys in the Region '''
url = self.base_url + "/keys"
data = requests.get(url, auth=(self.user, self.password))
logging.debug("Sending request to " + url)
fdata = jsonpickle.decode(data.text)
if data.status_code == 200:
logging.debug("Response from server: " + " ,".join(data))
return fdata["keys"]
else:
self.error_response(data)
def get(self, *arg):
''' Returns the data value for a specified key '''
sub_url = ','.join(str(key) for key in arg)
url = self.base_url + "/" + sub_url + "?ignoreMissingKey=true"
data = requests.get(url, auth=(self.user, self.password))
logging.debug("Sending request to " + url)
if data.status_code == 200:
logging.debug("Response from server: " + " ,".join(data))
return jsonpickle.decode(data.text)
else:
self.error_response(data)
def __getitem__(self, key):
''' Method to support region[key] notion '''
url = self.base_url + "/" + str(key) + "?ignoreMissingKey=true"
data = requests.get(url, auth=(self.user, self.password))
logging.debug("Sending request to " + url)
if data.status_code == 200:
logging.debug("Response from server: " + " ,".join(data))
return jsonpickle.decode(data.text)
else:
self.error_response(data)
def put_all(self, item):
''' Insert or updates data for multiple keys specified by a hashtable '''
sub_url = ','.join(str(keys) for keys in item)
url = self.base_url + "/" + sub_url
headers = {'content-type': 'application/json'}
jvalue = jsonpickle.encode(item.values())
data = requests.put(url, data=jvalue, headers=headers, auth=(self.user, self.password))
logging.debug("Sending request to " + url)
if data.status_code == 200:
logging.debug(str(item) + " was put into the region")
return True
else:
self.error_response(data)
def update(self, key, value):
''' Updates the data in a region only if the specified key is present '''
url = self.base_url + "/" + str(key) + "?op=REPLACE"
headers = {'content-type': 'application/json'}
jvalue = jsonpickle.encode(value)
data = requests.put(url, data=jvalue, headers=headers, auth=(self.user, self.password))
logging.debug("Sending request to " + url)
if data.status_code == 200:
logging.debug("The value at key: " + str(key) + " was updated to " + str(value))
return True
else:
self.error_response(data)
def compare_and_set(self, key, oldvalue, newvalue):
''' Compares old values and if identical replaces with a new value '''
url = self.base_url + "/" + str(key) + "?op=CAS"
headers = {'content-type': 'application/json'}
value = {"@old": oldvalue, "@new": newvalue}
jvalue = jsonpickle.encode(value)
data = requests.put(url, data=jvalue, headers=headers, auth=(self.user, self.password))
logging.debug("Sending request to " + url)
if data.status_code == 200:
logging.debug(str(oldvalue) + " was replaced with " + str(newvalue) + " at the key " + str(key))
return True
else:
self.error_response(data)
def delete(self, *arg):
''' Deletes the corresponding data value for the specified key '''
sub_url = ','.join(str(key) for key in arg)
url = self.base_url + "/" + sub_url
data = requests.delete(url, auth=(self.user, self.password))
logging.debug("Sending request to " + url)
if data.status_code == 200:
logging.debug("The values for the keys: " + str(arg) + " were deleted from the region")
return True
else:
self.error_response(data)
def clear(self):
''' Deletes all data in the Region '''
if self.type == "REPLICATE":
data = requests.delete(self.base_url, auth=(self.user, self.password))
if data.status_code == 200:
logging.debug("All data was cleared from the region")
return True
else:
self.error_response(data)
if self.type == "PARTITION":
keys = self.keys()
temp = ",".join(str(key) for key in keys)
self.delete(temp)
return True
def error_response(self, data):
''' Processes HTTP error responses '''
if data != 400 or data != 409 or data != 405:
logging.warning("Response from server: " + str(data.status_code) + " " + data.reason + " - " + data.text)
print str(data.status_code) + ": " + data.reason
return False
else:
logging.debug("Response from server: " + str(data.status_code) + " " + data.reason + " - " + data.text)
return False
| apache-2.0 | -7,803,278,751,868,218,000 | 42.180791 | 117 | 0.584718 | false |
slkaczma/iot-foosball | table/table.py | 1 | 1996 | #!/usr/bin/env python
# Modified from python program written by Vance Morris for IoT Foosball table
import RPi.GPIO as GPIO
import os,json
import ibmiotf.application
import ibmiotf.device
from ibmiotf.codecs import jsonIotfCodec
import uuid
from time import sleep
import signal
import sys
import logging
# setup IoT Foundation information
# replace with your credentials
org = "ORG"
type = "table"
id = "ID"
method="token"
token="AUTH-TOKEN"
# setup sensor input pins
inputPin1 = 11 #Board 11
inputPin2 = 13 #Board 13
inputButtonPin = 15 #Board 15
GPIO.setmode(GPIO.BOARD)
GPIO.setup(inputPin1,GPIO.IN,pull_up_down=GPIO.PUD_UP)
GPIO.setup(inputPin2,GPIO.IN,pull_up_down=GPIO.PUD_UP)
GPIO.setup(inputButtonPin,GPIO.IN,pull_up_down=GPIO.PUD_UP)
# setup SIGINT handler
def signal_handler(signal, frame):
print '\nExiting.'
GPIO.cleanup()
table.disconnect()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# setup callbacks for sensors
def sensor1_callback(gpio_id):
data = 1
print "Goal Team 1"
table.publishEventOverHTTP("status",data)
sleep(0.4)
def sensor2_callback(gpio_id):
data = 2
print "Goal Team 2"
table.publishEventOverHTTP("status",data)
sleep(0.4)
def button_callback(gpio_id):
data = 0
print "Reset button pushed"
table.publishEventOverHTTP("status",data)
try:
options = {"org":org,"type":type,"id":id,"auth-method":method,"auth-token":token}
table = ibmiotf.device.Client(options)
table.connect()
table.setMessageEncoderModule('json',jsonIotfCodec)
table.logger.setLevel(logging.INFO)
# Set up rising edge detection on pins
GPIO.add_event_detect(inputPin1, GPIO.FALLING, callback=sensor1_callback, bouncetime=1000)
GPIO.add_event_detect(inputPin2, GPIO.FALLING, callback=sensor2_callback, bouncetime=1000)
GPIO.add_event_detect(inputButtonPin, GPIO.FALLING, callback=button_callback, bouncetime=1000)
while True:
sleep(1)
except ibmiotf.ConnectionException as e:
print e
| mit | 5,044,002,817,899,014,000 | 25.613333 | 98 | 0.742986 | false |
sigopt/sigopt-python | sigopt/runs/factory.py | 1 | 2341 | import contextlib
from ..exception import RunException
from ..interface import Connection
from .context import LiveRunContext, NullRunContext
class RunFactory(object):
CONFIG_CONTEXT_KEY = 'run_connection'
RUN_CONTEXT_KEY = 'run_context'
_global_run_context = None
_global_connection = None
_null_run_context = NullRunContext()
@classmethod
def get_global_run_context(cls):
if cls._global_run_context:
return cls._global_run_context
return cls._null_run_context
@classmethod
def from_config(cls, config):
data = config.get_context_data(cls) or {}
run_factory = cls()
run_context_data = data.get(cls.RUN_CONTEXT_KEY)
if run_context_data:
cls._push_global_run(LiveRunContext.from_json(
run_factory.connection,
run_context_data,
))
config.set_context_entry(run_factory)
return run_factory
@classmethod
def _push_global_run(cls, run_context):
if cls._global_run_context is None:
cls._global_run_context = run_context
else:
raise RunException('A global run already exists')
@classmethod
def _pop_global_run(cls):
if cls._global_run_context is None:
raise RunException('No global run exists')
global_run = cls._global_run_context
cls._global_run_context = None
return global_run
@classmethod
def _get_connection_singleton(cls):
if cls._global_connection is None:
cls._global_connection = Connection()
return cls._global_connection
def __init__(self):
self._all_assignments = {}
@property
def connection(self):
return self._get_connection_singleton()
def to_json(self):
run_context_data = self._global_run_context and self._global_run_context.to_json()
return {
self.RUN_CONTEXT_KEY: run_context_data,
}
@contextlib.contextmanager
def create_global_run(self, name=None, project=None, suggestion=None):
with self.create_run(name=name, project=project, suggestion=suggestion) as run:
self._push_global_run(run)
try:
yield run
finally:
self._pop_global_run()
def create_run(self, name=None, project=None, suggestion=None):
return LiveRunContext.create(
self.connection,
run_name=name,
project_id=project,
suggestion=suggestion,
all_assignments=self._all_assignments,
)
| mit | -8,505,193,482,968,139,000 | 26.541176 | 86 | 0.680906 | false |
kg-bot/SupyBot | plugins/SahadHelp/test.py | 1 | 1769 | ###
# Copyright (c) 2013, KG-Bot
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
class SahadHelpTestCase(PluginTestCase):
plugins = ('SahadHelp',)
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| gpl-3.0 | -237,627,253,507,384,420 | 45.810811 | 79 | 0.752968 | false |
alessandrod/txloadbalancer | txlb/schedulers.py | 1 | 3888 | import random
import itertools
rand = 'rand'
roundr = 'roundr'
leastc = 'leastc'
weightr = 'weightr'
weightlc = 'weightlc'
sticky = 'sticky'
def schedulerFactory(lbType, tracker):
"""
A dispatch function for a service's scheduler.
"""
if lbType == rand:
return RandomScheduler(tracker)
elif lbType == roundr:
return RoundRobinScheduler(tracker)
elif lbType == leastc:
return LeastConnsScheduler(tracker)
elif lbType == weightr:
return RandomWeightedScheduler(tracker)
else:
raise ValueError, "Unknown scheduler type `%s'" % lbType
class BaseScheduler(object):
"""
schedulers need the following:
* access to a proxy's hosts
* a proxy's open connections
* the "lastclose" for a host (don't know what that is yet)
"""
def __init__(self, tracker):
self.tracker = tracker
self.tracker.scheduler = self
def hasHost(self):
"""
"""
if self.tracker.available.keys():
return True
return False
class RandomScheduler(BaseScheduler):
"""
Select a random proxied host to receive the next request.
"""
schedulerName = rand
def nextHost(self, clientAddr):
if not self.hasHost():
return
pick = random.choice(self.hosts)
return pick
class RoundRobinScheduler(BaseScheduler):
"""
This scheduler presents a simple algorighm for selecting hosts based on
nothing other than who's next in the list.
"""
schedulerName = roundr
counter = 0
def nextHost(self, clientAddr):
if not self.hasHost():
return
if self.counter >= len(self.tracker.hosts):
self.counter = 0
if self.tracker.hosts:
d = self.tracker.hosts[self.counter]
self.counter += 1
return d
class LeastConnsScheduler(BaseScheduler):
"""
This scheduler passes the connection to the destination with the least
number of current open connections. If multiple machines have the same
number of open connections, send to the least recently used.
"""
schedulerName = leastc
counter = 0
def nextHost(self, clientAddr):
if not self.hasHost():
return
hosts = [(x[1], self.tracker.lastclose.get(x[0],0), x[0])
for x in self.tracker.available.items()]
hosts.sort()
return hosts[0][2]
class RandomWeightedScheduler(BaseScheduler):
"""
This scheduler passes the connection in a semi-random fashion, with the
highest likelihood of selection going to the host with the largest weight
value.
In particular, it uses hosts and their associated weights to build a
"simulated population" of hosts. These to not get place into memory
en-masse, thanks to the existence of iterators. A single host in the
"population" is chosen, with hosts of greater weights being selected more
often (over time).
"""
schedulerName = weightr
def nextHost(self, clientAddr):
if not self.hasHost():
return
group = self.tracker.group
# hosts is a list of (host, port) tuples
# XXX
# this is pretty slow... perhaps the weight data should also be stored
# on the tracker object and we should put the getWeightDistribution and
# getWeights methods on this scheduler...
# XXX
# or we can cache the computed values and refresh them in a tracker
hosts = self.tracker.available.keys()
population = group.getWeightDistribution(hostPorts=hosts)
populationSize = sum([weight for hostPort, weight
in group.getWeights().items() if hostPort in hosts])
index = random.randint(0, populationSize - 1)
return itertools.islice(population, index, None).next()
| mit | 3,526,978,727,571,597,300 | 26.188811 | 79 | 0.63786 | false |
SEL-Columbia/commcare-hq | corehq/pillows/base.py | 1 | 4512 | from pillowtop.listener import AliasedElasticPillow
from dimagi.utils.decorators.memoized import memoized
from django.conf import settings
VALUE_TAG = '#value'
def map_types(item, mapping, override_root_keys=None):
if isinstance(item, dict):
return convert_property_dict(item, mapping, override_root_keys=override_root_keys)
elif isinstance(item, list):
return [map_types(x, mapping) for x in item]
else:
return {VALUE_TAG: item}
def convert_property_dict(sub_dict, mapping, override_root_keys=None):
"""
For mapping out ALL nested properties on cases, convert everything to a dict so as to
prevent string=>object and object=>string mapping errors.
sub_dict: the doc dict you want to modify in place before sending to ES
mapping: The mapping at the level of the properties you are at - originally passing as the default mapping of the pillow
override_root_keys: a list of keys you want explicitly skipped at the root level and are not recursed down
"""
mapping = mapping or {}
override_root_keys = override_root_keys or []
for k, v in sub_dict.items():
if k in mapping.get('properties', {}) or k in override_root_keys:
continue
dynamic_mapping = mapping.get('dynamic', True)
sub_mapping = mapping.get('properties', {}).get(k, {})
if dynamic_mapping is not False:
sub_dict[k] = map_types(v, sub_mapping, override_root_keys=override_root_keys)
return sub_dict
def restore_property_dict(report_dict_item):
"""
Revert a converted/retrieved document from Report<index> and deconvert all its properties
back from {#value: <val>} to just <val>
"""
restored = {}
if not isinstance(report_dict_item, dict):
return report_dict_item
for k, v in report_dict_item.items():
if isinstance(v, list):
restored[k] = [restore_property_dict(x) for x in v]
elif isinstance(v, dict):
if VALUE_TAG in v:
restored[k] = v[VALUE_TAG]
else:
restored[k] = restore_property_dict(v)
else:
restored[k] = v
return restored
class HQPillow(AliasedElasticPillow):
es_host = settings.ELASTICSEARCH_HOST
es_port = settings.ELASTICSEARCH_PORT
es_timeout = 60
default_mapping = None
es_meta = {
"settings": {
"analysis": {
"analyzer": {
"default": {
"type": "custom",
"tokenizer": "whitespace",
"filter": ["lowercase"]
},
"sortable_exact": {
"type": "custom",
"tokenizer": "keyword",
"filter": ["lowercase"]
}
}
}
}
}
def __init__(self, **kwargs):
super(HQPillow, self).__init__(**kwargs)
@memoized
def calc_meta(self):
"""
override of the meta calculator since we're separating out all the types,
so we just do a hash of the "prototype" instead to determind md5
"""
return self.calc_mapping_hash(self.default_mapping)
def get_domain(self, doc_dict):
"""
A cache/buffer for the _changes feed situation for xforms.
"""
return doc_dict.get('domain', None)
def get_type_string(self, doc_dict):
return self.es_type
def get_mapping_from_type(self, doc_dict):
"""
Define mapping uniquely to the domain_type document.
See below on why date_detection is False
NOTE: DO NOT MODIFY THIS UNLESS ABSOLUTELY NECESSARY. A CHANGE BELOW WILL GENERATE A NEW
HASH FOR THE INDEX NAME REQUIRING A REINDEX+RE-ALIAS. THIS IS A SERIOUSLY RESOURCE
INTENSIVE OPERATION THAT REQUIRES SOME CAREFUL LOGISTICS TO MIGRATE
"""
#the meta here is defined for when the case index + type is created for the FIRST time
#subsequent data added to it will be added automatically, but date_detection is necessary
# to be false to prevent indexes from not being created due to the way we store dates
#all are strings EXCEPT the core case properties which we need to explicitly define below.
#that way date sort and ranges will work with canonical date formats for queries.
return {
self.get_type_string(doc_dict): self.default_mapping
}
| bsd-3-clause | -1,508,023,410,971,384,000 | 35.983607 | 124 | 0.609707 | false |
kapteyn-astro/kapteyn | doc/source/EXAMPLES/allskyf4.py | 1 | 2216 | from kapteyn import maputils
import numpy
from service import *
fignum = 4
fig = plt.figure(figsize=figsize)
frame = fig.add_axes(plotbox)
mu = 2.0; phi = 180.0; theta = 60
title = r"""Slant zenithal perspective (SZP) with:
($\mu,\phi,\theta)=(2,180,60)$ with special algorithm for border (Cal. fig.7)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---SZP',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--SZP',
'CRVAL2' : dec0, 'CRPIX2' : 20, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,
'PV2_1' : mu, 'PV2_2' : phi, 'PV2_3' : theta,
}
X = numpy.arange(0,360.0,30.0)
Y = numpy.arange(-90,90,15.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum=(1,2),
wylim=(-90.0,90.0), wxlim=(-180,180),
startx=X, starty=Y)
grat.setp_lineswcs0(0, lw=2)
grat.setp_lineswcs1(0, lw=2)
# Special care for the boundary
# The algorithm seems to work but is not very accurate
xp = -mu * numpy.cos(theta*numpy.pi/180.0)* numpy.sin(phi*numpy.pi/180.0)
yp = mu * numpy.cos(theta*numpy.pi/180.0)* numpy.cos(phi*numpy.pi/180.0)
zp = mu * numpy.sin(theta*numpy.pi/180.0) + 1.0
a = numpy.linspace(0.0,360.0,500)
arad = a*numpy.pi/180.0
rho = zp - 1.0
sigma = xp*numpy.sin(arad) - yp*numpy.cos(arad)
sq = numpy.sqrt(rho*rho+sigma*sigma)
omega = numpy.arcsin(1/sq)
psi = numpy.arctan2(sigma,rho)
thetaxrad = psi - omega
thetax = thetaxrad * 180.0/numpy.pi + 5
g = grat.addgratline(a, thetax, pixels=False)
grat.setp_linespecial(g, lw=2, color='c')
# Select two starting points for a scan in pixel to find borders
g2 = grat.scanborder(68.26,13,3,3)
g3 = grat.scanborder(30,66.3,3,3)
grat.setp_linespecial(g2, color='r', lw=1)
grat.setp_linespecial(g3, color='r', lw=1)
lon_world = list(range(0,360,30))
lat_world = [-60, -30, 30, 60, 90]
#labkwargs0 = {'color':'r', 'va':'center', 'ha':'center'}
#labkwargs1 = {'color':'b', 'va':'bottom', 'ha':'right'}
doplot(frame, fignum, annim, grat, title,
lon_world=lon_world, lat_world=lat_world,
# labkwargs0=labkwargs0, labkwargs1=labkwargs1,
markerpos=markerpos)
| bsd-3-clause | -7,820,787,603,152,512,000 | 37.877193 | 80 | 0.630866 | false |
simplicitylab/python-experiments | flask-scribus/app/api_resources/scribusjob.py | 1 | 1806 | # Import os, json, UUD
import uuid, os, json
# Import jsonify
from flask import jsonify, request
# Import resource
from flask_restful import Resource
# Import Redis
from redis import Redis
# Import rq
from rq import Worker, Queue, Connection
# Import buildPDF method which we will pass to our redis queue
from app.modules.scribus import buildPDF
# setup redis & queue
redis_conn = Redis()
q = Queue(connection=redis_conn)
class ScribusJob(Resource):
""" REST: ScribusJob resource """
def writeJobsInfoJson(self, jobsInfoID, template, title, text, image):
""" Writes json jobs info file """
jobsInfo = {}
jobsInfo['template'] = template
jobsInfo['title'] = title
jobsInfo['text'] = text
jobsInfo['image'] = image
with open(os.path.join('jobs', str(jobsInfoID) + '.json' ), 'w') as outfile:
outfile.write(json.dumps(jobsInfo))
def post(self):
""" handle post method: submitting new jobs """
# generate job info id (not python-rq id!)
jobsInfoID = uuid.uuid1()
# save files
file = request.files['image']
fileName = ""
if file:
fileName = file.filename
file.save(os.path.join('jobs', fileName))
# store job information in a json file
self.writeJobsInfoJson(
jobsInfoID,
request.form['template'],
request.form['title'],
request.form['text'],
fileName
)
# add job to our queue
job = q.enqueue_call(
func=buildPDF,
args=(jobsInfoID,),
result_ttl=86400
)
# return our job id
return jsonify(
jobQueueID = job.get_id(),
jobsInfoID = jobsInfoID
)
| agpl-3.0 | -5,209,896,729,705,639,000 | 25.558824 | 84 | 0.584164 | false |
deathowl/huey | huey/tests/test_queue.py | 1 | 14119 | import datetime
from huey import crontab
from huey import exceptions as huey_exceptions
from huey import RedisHuey
from huey.api import Huey
from huey.api import QueueTask
from huey.registry import registry
from huey.storage import RedisDataStore
from huey.storage import RedisQueue
from huey.storage import RedisSchedule
from huey.tests.base import b
from huey.tests.base import BaseTestCase
from huey.utils import EmptyData
from huey.utils import local_to_utc
huey = RedisHuey(result_store=None, schedule=None, events=None, blocking=False)
huey_results = RedisHuey(blocking=False)
huey_store_none = RedisHuey(store_none=True, blocking=False)
# Global state.
state = {}
@huey.task()
def put_data(key, value):
state[key] = value
@huey.task(include_task=True)
def put_data_ctx(key, value, task=None):
state['last_task_class'] = type(task).__name__
class PutTask(QueueTask):
def execute(self):
k, v = self.data
state[k] = v
class TestException(Exception):
pass
@huey.task()
def throw_error_task():
raise TestException('bampf')
@huey_results.task()
def add_values(a, b):
return a + b
@huey_results.periodic_task(crontab(minute='0'))
def hourly_task2():
state['periodic'] = 2
@huey_results.task()
def returns_none():
return None
@huey_store_none.task()
def returns_none2():
return None
class BaseQueueTestCase(BaseTestCase):
def setUp(self):
global state
state = {}
huey.flush()
huey_results.flush()
huey_store_none.flush()
self.assertEqual(len(huey), 0)
def tearDown(self):
huey.flush()
huey_results.flush()
huey_store_none.flush()
class TestHueyQueueMetadataAPIs(BaseQueueTestCase):
def test_queue_metadata(self):
put_data('k1', 'v1')
put_data('k2', 'v2')
cmd2, cmd1 = huey.pending()
self.assertEqual(cmd2.data, (('k2', 'v2'), {}))
self.assertEqual(cmd1.data, (('k1', 'v1'), {}))
huey.dequeue()
cmd1, = huey.pending()
self.assertEqual(cmd1.data, (('k2', 'v2'), {}))
def test_schedule_metadata(self):
add_values.schedule((1, 2), delay=10)
add_values.schedule((3, 4), delay=5)
self.assertEqual(len(huey_results), 2)
huey_results.add_schedule(huey.dequeue())
huey_results.add_schedule(huey.dequeue())
cmd2, cmd1 = huey_results.scheduled()
self.assertEqual(cmd1.data, ((1, 2), {}))
self.assertEqual(cmd2.data, ((3, 4), {}))
def test_results_metadata(self):
add_values(1, 2)
add_values(3, 4)
t1 = huey_results.dequeue()
t2 = huey_results.dequeue()
self.assertEqual(huey_results.all_results(), {})
huey_results.execute(t1)
self.assertEqual(list(huey_results.all_results()), [b(t1.task_id)])
huey_results.execute(t2)
self.assertEqual(sorted(huey_results.all_results().keys()),
sorted([b(t1.task_id), b(t2.task_id)]))
class TestHueyQueueAPIs(BaseQueueTestCase):
def test_enqueue(self):
# initializing the command does not enqueue it
task = PutTask(('k', 'v'))
self.assertEqual(len(huey), 0)
# ok, enqueue it, then check that it was enqueued
huey.enqueue(task)
self.assertEqual(len(huey), 1)
self.assertEqual(state, {})
# it can be enqueued multiple times
huey.enqueue(task)
self.assertEqual(len(huey), 2)
# no changes to state
self.assertEqual(state, {})
def test_enqueue_decorator(self):
put_data('k', 'v')
self.assertEqual(len(huey), 1)
put_data('k', 'v')
self.assertEqual(len(huey), 2)
# no changes to state
self.assertEqual(state, {})
def test_scheduled_time(self):
put_data('k', 'v')
task = huey.dequeue()
self.assertEqual(len(huey), 0)
self.assertEqual(task.execute_time, None)
dt = datetime.datetime(2011, 1, 1, 0, 1)
put_data.schedule(args=('k2', 'v2'), eta=dt)
self.assertEqual(len(huey), 1)
task = huey.dequeue()
self.assertEqual(task.execute_time, local_to_utc(dt))
put_data.schedule(args=('k3', 'v3'), eta=dt, convert_utc=False)
self.assertEqual(len(huey), 1)
task = huey.dequeue()
self.assertEqual(task.execute_time, dt)
def test_error_raised(self):
throw_error_task()
task = huey.dequeue()
self.assertRaises(TestException, huey.execute, task)
def test_internal_error(self):
"""
Verify that exceptions are wrapped with the special "huey"
exception classes.
"""
class SpecialException(Exception):
pass
class BrokenQueue(RedisQueue):
def read(self):
raise SpecialException('read error')
def write(self, data):
raise SpecialException('write error')
class BrokenDataStore(RedisDataStore):
def get(self, key):
raise SpecialException('get error')
def put(self, key, value):
raise SpecialException('put error')
class BrokenSchedule(RedisSchedule):
def add(self, data, ts):
raise SpecialException('add error')
def read(self, ts):
raise SpecialException('read error')
task = PutTask(('foo', 'bar'))
huey = Huey(
BrokenQueue('q', None),
BrokenDataStore('q', None),
BrokenSchedule('q', None))
self.assertRaises(
huey_exceptions.QueueWriteException,
huey.enqueue,
task)
self.assertRaises(
huey_exceptions.QueueReadException,
huey.dequeue)
self.assertRaises(
huey_exceptions.DataStorePutException,
huey.revoke,
task)
self.assertRaises(
huey_exceptions.DataStoreGetException,
huey.restore,
task)
self.assertRaises(
huey_exceptions.ScheduleAddException,
huey.add_schedule,
task)
self.assertRaises(
huey_exceptions.ScheduleReadException,
huey.read_schedule,
1)
def test_dequeueing(self):
res = huey.dequeue() # no error raised if queue is empty
self.assertEqual(res, None)
put_data('k', 'v')
task = huey.dequeue()
self.assertTrue(isinstance(task, QueueTask))
self.assertEqual(task.get_data(), (('k', 'v'), {}))
def test_execution(self):
self.assertEqual(state, {})
put_data('k', 'v')
task = huey.dequeue()
self.assertFalse('k' in state)
huey.execute(task)
self.assertEqual(state, {'k': 'v'})
put_data('k', 'X')
self.assertEqual(state, {'k': 'v'})
huey.execute(huey.dequeue())
self.assertEqual(state, {'k': 'X'})
self.assertRaises(TypeError, huey.execute, huey.dequeue())
def test_self_awareness(self):
put_data_ctx('k', 'v')
task = huey.dequeue()
huey.execute(task)
self.assertEqual(state['last_task_class'], 'queuecmd_put_data_ctx')
del state['last_task_class']
put_data('k', 'x')
huey.execute(huey.dequeue())
self.assertFalse('last_task_class' in state)
def test_call_local(self):
self.assertEqual(len(huey), 0)
self.assertEqual(state, {})
put_data.call_local('nugget', 'green')
self.assertEqual(len(huey), 0)
self.assertEqual(state, {'nugget': 'green'})
def test_revoke(self):
ac = PutTask(('k', 'v'))
ac2 = PutTask(('k2', 'v2'))
ac3 = PutTask(('k3', 'v3'))
huey_results.enqueue(ac)
huey_results.enqueue(ac2)
huey_results.enqueue(ac3)
huey_results.enqueue(ac2)
huey_results.enqueue(ac)
self.assertEqual(len(huey_results), 5)
huey_results.revoke(ac2)
while huey_results:
task = huey_results.dequeue()
if not huey_results.is_revoked(task):
huey_results.execute(task)
self.assertEqual(state, {'k': 'v', 'k3': 'v3'})
def test_revoke_periodic(self):
hourly_task2.revoke()
self.assertTrue(hourly_task2.is_revoked())
# it is still revoked
self.assertTrue(hourly_task2.is_revoked())
hourly_task2.restore()
self.assertFalse(hourly_task2.is_revoked())
hourly_task2.revoke(revoke_once=True)
self.assertTrue(hourly_task2.is_revoked()) # it is revoked once, but we are preserving that state
self.assertTrue(hourly_task2.is_revoked(peek=False)) # is revoked once, but clear state
self.assertFalse(hourly_task2.is_revoked()) # no longer revoked
d = datetime.datetime
hourly_task2.revoke(revoke_until=d(2011, 1, 1, 11, 0))
self.assertTrue(hourly_task2.is_revoked(dt=d(2011, 1, 1, 10, 0)))
self.assertTrue(hourly_task2.is_revoked(dt=d(2011, 1, 1, 10, 59)))
self.assertFalse(hourly_task2.is_revoked(dt=d(2011, 1, 1, 11, 0)))
hourly_task2.restore()
self.assertFalse(hourly_task2.is_revoked())
def test_result_store(self):
res = add_values(1, 2)
res2 = add_values(4, 5)
res3 = add_values(0, 0)
# none have been executed as yet
self.assertEqual(res.get(), None)
self.assertEqual(res2.get(), None)
self.assertEqual(res3.get(), None)
# execute the first task
huey_results.execute(huey_results.dequeue())
self.assertEqual(res.get(), 3)
self.assertEqual(res2.get(), None)
self.assertEqual(res3.get(), None)
# execute the second task
huey_results.execute(huey_results.dequeue())
self.assertEqual(res.get(), 3)
self.assertEqual(res2.get(), 9)
self.assertEqual(res3.get(), None)
# execute the 3rd, which returns a zero value
huey_results.execute(huey_results.dequeue())
self.assertEqual(res.get(), 3)
self.assertEqual(res2.get(), 9)
self.assertEqual(res3.get(), 0)
# check that it returns None when nothing is present
res = returns_none()
self.assertEqual(res.get(), None)
# execute, it will still return None, but underneath it is an EmptyResult
# indicating its actual result was not persisted
huey_results.execute(huey_results.dequeue())
self.assertEqual(res.get(), None)
self.assertEqual(res._result, EmptyData)
# execute again, this time note that we're pointing at the invoker
# that *does* accept None as a store-able result
res = returns_none2()
self.assertEqual(res.get(), None)
# it stores None
huey_store_none.execute(huey_store_none.dequeue())
self.assertEqual(res.get(), None)
self.assertEqual(res._result, None)
def test_task_store(self):
dt1 = datetime.datetime(2011, 1, 1, 0, 0)
dt2 = datetime.datetime(2035, 1, 1, 0, 0)
add_values.schedule(args=('k', 'v'), eta=dt1, convert_utc=False)
task1 = huey_results.dequeue()
add_values.schedule(args=('k2', 'v2'), eta=dt2, convert_utc=False)
task2 = huey_results.dequeue()
add_values('k3', 'v3')
task3 = huey_results.dequeue()
# add the command to the schedule
huey_results.add_schedule(task1)
self.assertEqual(len(huey_results.schedule), 1)
# add a future-dated command
huey_results.add_schedule(task2)
self.assertEqual(len(huey_results.schedule), 2)
huey_results.add_schedule(task3)
tasks = huey_results.read_schedule(dt1)
self.assertEqual(tasks, [task3, task1])
tasks = huey_results.read_schedule(dt1)
self.assertEqual(tasks, [])
tasks = huey_results.read_schedule(dt2)
self.assertEqual(tasks, [task2])
def test_ready_to_run_method(self):
dt1 = datetime.datetime(2011, 1, 1, 0, 0)
dt2 = datetime.datetime(2035, 1, 1, 0, 0)
add_values.schedule(args=('k', 'v'), eta=dt1)
task1 = huey_results.dequeue()
add_values.schedule(args=('k2', 'v2'), eta=dt2)
task2 = huey_results.dequeue()
add_values('k3', 'v3')
task3 = huey_results.dequeue()
add_values.schedule(args=('k4', 'v4'), task_id='test_task_id')
task4 = huey_results.dequeue()
# sanity check what should be run
self.assertTrue(huey_results.ready_to_run(task1))
self.assertFalse(huey_results.ready_to_run(task2))
self.assertTrue(huey_results.ready_to_run(task3))
self.assertTrue(huey_results.ready_to_run(task4))
self.assertEqual('test_task_id', task4.task_id)
def test_task_delay(self):
curr = datetime.datetime.utcnow()
curr50 = curr + datetime.timedelta(seconds=50)
curr70 = curr + datetime.timedelta(seconds=70)
add_values.schedule(args=('k', 'v'), delay=60)
task1 = huey_results.dequeue()
add_values.schedule(args=('k2', 'v2'), delay=600)
task2 = huey_results.dequeue()
add_values('k3', 'v3')
task3 = huey_results.dequeue()
# add the command to the schedule
huey_results.add_schedule(task1)
huey_results.add_schedule(task2)
huey_results.add_schedule(task3)
# sanity check what should be run
self.assertFalse(huey_results.ready_to_run(task1))
self.assertFalse(huey_results.ready_to_run(task2))
self.assertTrue(huey_results.ready_to_run(task3))
self.assertFalse(huey_results.ready_to_run(task1, curr50))
self.assertFalse(huey_results.ready_to_run(task2, curr50))
self.assertTrue(huey_results.ready_to_run(task3, curr50))
self.assertTrue(huey_results.ready_to_run(task1, curr70))
self.assertFalse(huey_results.ready_to_run(task2, curr70))
self.assertTrue(huey_results.ready_to_run(task3, curr70))
| mit | -3,633,250,256,391,840,000 | 30.375556 | 105 | 0.60323 | false |
Zerknechterer/pyload | module/plugins/hoster/MultishareCz.py | 1 | 1866 | # -*- coding: utf-8 -*-
import random
import re
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class MultishareCz(SimpleHoster):
__name__ = "MultishareCz"
__type__ = "hoster"
__version__ = "0.40"
__pattern__ = r'http://(?:www\.)?multishare\.cz/stahnout/(?P<ID>\d+)'
__config__ = [("use_premium", "bool", "Use premium account if available", True)]
__description__ = """MultiShare.cz hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]")]
SIZE_REPLACEMENTS = [(' ', '')]
CHECK_TRAFFIC = True
MULTI_HOSTER = True
INFO_PATTERN = ur'(?:<li>Název|Soubor): <strong>(?P<N>[^<]+)</strong><(?:/li><li|br)>Velikost: <strong>(?P<S>[^<]+)</strong>'
OFFLINE_PATTERN = ur'<h1>Stáhnout soubor</h1><p><strong>Požadovaný soubor neexistuje.</strong></p>'
def handleFree(self, pyfile):
self.download("http://www.multishare.cz/html/download_free.php", get={'ID': self.info['pattern']['ID']})
def handlePremium(self, pyfile):
self.download("http://www.multishare.cz/html/download_premium.php", get={'ID': self.info['pattern']['ID']})
def handleMulti(self, pyfile):
self.html = self.load('http://www.multishare.cz/html/mms_ajax.php', post={"link": pyfile.url}, decode=True)
self.checkInfo()
if not self.checkTrafficLeft():
self.fail(_("Not enough credit left to download file"))
self.download("http://dl%d.mms.multishare.cz/html/mms_process.php" % round(random.random() * 10000 * random.random()),
get={'u_ID' : self.acc_info['u_ID'],
'u_hash': self.acc_info['u_hash'],
'link' : pyfile.url},
disposition=True)
getInfo = create_getInfo(MultishareCz)
| gpl-3.0 | 7,182,225,593,763,931 | 33.481481 | 132 | 0.583781 | false |
common-workflow-language/cwltool | cwltool/builder.py | 1 | 25085 | import copy
import logging
import math
from typing import (
IO,
Any,
Callable,
Dict,
List,
MutableMapping,
MutableSequence,
Optional,
Set,
Tuple,
Union,
cast,
)
from rdflib import Graph, URIRef
from rdflib.namespace import OWL, RDFS
from ruamel.yaml.comments import CommentedMap
from schema_salad.avro.schema import Names, Schema, make_avsc_object
from schema_salad.exceptions import ValidationException
from schema_salad.sourceline import SourceLine
from schema_salad.utils import convert_to_dict, json_dumps
from schema_salad.validate import validate
from typing_extensions import TYPE_CHECKING, Type # pylint: disable=unused-import
from . import expression
from .errors import WorkflowException
from .loghandler import _logger
from .mutation import MutationManager
from .software_requirements import DependenciesConfiguration
from .stdfsaccess import StdFsAccess
from .utils import (
CONTENT_LIMIT,
CWLObjectType,
CWLOutputType,
aslist,
get_listing,
normalizeFilesDirs,
visit_class,
)
if TYPE_CHECKING:
from .pathmapper import PathMapper
from .provenance_profile import ProvenanceProfile # pylint: disable=unused-import
INPUT_OBJ_VOCAB: Dict[str, str] = {
"Any": "https://w3id.org/cwl/salad#Any",
"File": "https://w3id.org/cwl/cwl#File",
"Directory": "https://w3id.org/cwl/cwl#Directory",
}
def content_limit_respected_read_bytes(f): # type: (IO[bytes]) -> bytes
contents = f.read(CONTENT_LIMIT + 1)
if len(contents) > CONTENT_LIMIT:
raise WorkflowException(
"file is too large, loadContents limited to %d bytes" % CONTENT_LIMIT
)
return contents
def content_limit_respected_read(f): # type: (IO[bytes]) -> str
return content_limit_respected_read_bytes(f).decode("utf-8")
def substitute(value, replace): # type: (str, str) -> str
if replace.startswith("^"):
try:
return substitute(value[0 : value.rindex(".")], replace[1:])
except ValueError:
# No extension to remove
return value + replace.lstrip("^")
return value + replace
def formatSubclassOf(
fmt: str, cls: str, ontology: Optional[Graph], visited: Set[str]
) -> bool:
"""Determine if `fmt` is a subclass of `cls`."""
if URIRef(fmt) == URIRef(cls):
return True
if ontology is None:
return False
if fmt in visited:
return False
visited.add(fmt)
uriRefFmt = URIRef(fmt)
for _s, _p, o in ontology.triples((uriRefFmt, RDFS.subClassOf, None)):
# Find parent classes of `fmt` and search upward
if formatSubclassOf(o, cls, ontology, visited):
return True
for _s, _p, o in ontology.triples((uriRefFmt, OWL.equivalentClass, None)):
# Find equivalent classes of `fmt` and search horizontally
if formatSubclassOf(o, cls, ontology, visited):
return True
for s, _p, _o in ontology.triples((None, OWL.equivalentClass, uriRefFmt)):
# Find equivalent classes of `fmt` and search horizontally
if formatSubclassOf(s, cls, ontology, visited):
return True
return False
def check_format(
actual_file: Union[CWLObjectType, List[CWLObjectType]],
input_formats: Union[List[str], str],
ontology: Optional[Graph],
) -> None:
"""Confirm that the format present is valid for the allowed formats."""
for afile in aslist(actual_file):
if not afile:
continue
if "format" not in afile:
raise ValidationException(
f"File has no 'format' defined: {json_dumps(afile, indent=4)}"
)
for inpf in aslist(input_formats):
if afile["format"] == inpf or formatSubclassOf(
afile["format"], inpf, ontology, set()
):
return
raise ValidationException(
f"File has an incompatible format: {json_dumps(afile, indent=4)}"
)
class HasReqsHints:
"""Base class for get_requirement()."""
def __init__(self) -> None:
"""Initialize this reqs decorator."""
self.requirements = [] # type: List[CWLObjectType]
self.hints = [] # type: List[CWLObjectType]
def get_requirement(
self, feature: str
) -> Tuple[Optional[CWLObjectType], Optional[bool]]:
for item in reversed(self.requirements):
if item["class"] == feature:
return (item, True)
for item in reversed(self.hints):
if item["class"] == feature:
return (item, False)
return (None, None)
class Builder(HasReqsHints):
def __init__(
self,
job: CWLObjectType,
files: List[CWLObjectType],
bindings: List[CWLObjectType],
schemaDefs: MutableMapping[str, CWLObjectType],
names: Names,
requirements: List[CWLObjectType],
hints: List[CWLObjectType],
resources: Dict[str, Union[int, float, str]],
mutation_manager: Optional[MutationManager],
formatgraph: Optional[Graph],
make_fs_access: Type[StdFsAccess],
fs_access: StdFsAccess,
job_script_provider: Optional[DependenciesConfiguration],
timeout: float,
debug: bool,
js_console: bool,
force_docker_pull: bool,
loadListing: str,
outdir: str,
tmpdir: str,
stagedir: str,
cwlVersion: str,
) -> None:
"""Initialize this Builder."""
self.job = job
self.files = files
self.bindings = bindings
self.schemaDefs = schemaDefs
self.names = names
self.requirements = requirements
self.hints = hints
self.resources = resources
self.mutation_manager = mutation_manager
self.formatgraph = formatgraph
self.make_fs_access = make_fs_access
self.fs_access = fs_access
self.job_script_provider = job_script_provider
self.timeout = timeout
self.debug = debug
self.js_console = js_console
self.force_docker_pull = force_docker_pull
# One of "no_listing", "shallow_listing", "deep_listing"
self.loadListing = loadListing
self.outdir = outdir
self.tmpdir = tmpdir
self.stagedir = stagedir
self.cwlVersion = cwlVersion
self.pathmapper = None # type: Optional[PathMapper]
self.prov_obj = None # type: Optional[ProvenanceProfile]
self.find_default_container = None # type: Optional[Callable[[], str]]
def build_job_script(self, commands: List[str]) -> Optional[str]:
if self.job_script_provider is not None:
return self.job_script_provider.build_job_script(self, commands)
return None
def bind_input(
self,
schema: CWLObjectType,
datum: Union[CWLObjectType, List[CWLObjectType]],
discover_secondaryFiles: bool,
lead_pos: Optional[Union[int, List[int]]] = None,
tail_pos: Optional[Union[str, List[int]]] = None,
) -> List[MutableMapping[str, Union[str, List[int]]]]:
if tail_pos is None:
tail_pos = []
if lead_pos is None:
lead_pos = []
bindings = [] # type: List[MutableMapping[str, Union[str, List[int]]]]
binding = (
{}
) # type: Union[MutableMapping[str, Union[str, List[int]]], CommentedMap]
value_from_expression = False
if "inputBinding" in schema and isinstance(
schema["inputBinding"], MutableMapping
):
binding = CommentedMap(schema["inputBinding"].items())
bp = list(aslist(lead_pos))
if "position" in binding:
position = binding["position"]
if isinstance(position, str): # no need to test the CWL Version
# the schema for v1.0 only allow ints
binding["position"] = self.do_eval(position, context=datum)
bp.append(binding["position"])
else:
bp.extend(aslist(binding["position"]))
else:
bp.append(0)
bp.extend(aslist(tail_pos))
binding["position"] = bp
binding["datum"] = datum
if "valueFrom" in binding:
value_from_expression = True
# Handle union types
if isinstance(schema["type"], MutableSequence):
bound_input = False
for t in schema["type"]:
avsc = None # type: Optional[Schema]
if isinstance(t, str) and self.names.has_name(t, None):
avsc = self.names.get_name(t, None)
elif (
isinstance(t, MutableMapping)
and "name" in t
and self.names.has_name(cast(str, t["name"]), None)
):
avsc = self.names.get_name(cast(str, t["name"]), None)
if not avsc:
avsc = make_avsc_object(convert_to_dict(t), self.names)
if validate(avsc, datum, vocab=INPUT_OBJ_VOCAB):
schema = copy.deepcopy(schema)
schema["type"] = t
if not value_from_expression:
return self.bind_input(
schema,
datum,
lead_pos=lead_pos,
tail_pos=tail_pos,
discover_secondaryFiles=discover_secondaryFiles,
)
else:
self.bind_input(
schema,
datum,
lead_pos=lead_pos,
tail_pos=tail_pos,
discover_secondaryFiles=discover_secondaryFiles,
)
bound_input = True
if not bound_input:
raise ValidationException(
"'{}' is not a valid union {}".format(datum, schema["type"])
)
elif isinstance(schema["type"], MutableMapping):
st = copy.deepcopy(schema["type"])
if (
binding
and "inputBinding" not in st
and "type" in st
and st["type"] == "array"
and "itemSeparator" not in binding
):
st["inputBinding"] = {}
for k in ("secondaryFiles", "format", "streamable"):
if k in schema:
st[k] = schema[k]
if value_from_expression:
self.bind_input(
st,
datum,
lead_pos=lead_pos,
tail_pos=tail_pos,
discover_secondaryFiles=discover_secondaryFiles,
)
else:
bindings.extend(
self.bind_input(
st,
datum,
lead_pos=lead_pos,
tail_pos=tail_pos,
discover_secondaryFiles=discover_secondaryFiles,
)
)
else:
if schema["type"] in self.schemaDefs:
schema = self.schemaDefs[cast(str, schema["type"])]
if schema["type"] == "record":
datum = cast(CWLObjectType, datum)
for f in cast(List[CWLObjectType], schema["fields"]):
name = cast(str, f["name"])
if name in datum and datum[name] is not None:
bindings.extend(
self.bind_input(
f,
cast(CWLObjectType, datum[name]),
lead_pos=lead_pos,
tail_pos=name,
discover_secondaryFiles=discover_secondaryFiles,
)
)
else:
datum[name] = f.get("default")
if schema["type"] == "array":
for n, item in enumerate(cast(MutableSequence[CWLObjectType], datum)):
b2 = None
if binding:
b2 = cast(CWLObjectType, copy.deepcopy(binding))
b2["datum"] = item
itemschema = {
"type": schema["items"],
"inputBinding": b2,
} # type: CWLObjectType
for k in ("secondaryFiles", "format", "streamable"):
if k in schema:
itemschema[k] = schema[k]
bindings.extend(
self.bind_input(
itemschema,
item,
lead_pos=n,
tail_pos=tail_pos,
discover_secondaryFiles=discover_secondaryFiles,
)
)
binding = {}
def _capture_files(f: CWLObjectType) -> CWLObjectType:
self.files.append(f)
return f
if schema["type"] == "org.w3id.cwl.cwl.File":
datum = cast(CWLObjectType, datum)
self.files.append(datum)
loadContents_sourceline = (
None
) # type: Union[None, MutableMapping[str, Union[str, List[int]]], CWLObjectType]
if binding and binding.get("loadContents"):
loadContents_sourceline = binding
elif schema.get("loadContents"):
loadContents_sourceline = schema
if loadContents_sourceline and loadContents_sourceline["loadContents"]:
with SourceLine(
loadContents_sourceline, "loadContents", WorkflowException
):
try:
with self.fs_access.open(
cast(str, datum["location"]), "rb"
) as f2:
datum["contents"] = content_limit_respected_read(f2)
except Exception as e:
raise Exception(
"Reading {}\n{}".format(datum["location"], e)
)
if "secondaryFiles" in schema:
if "secondaryFiles" not in datum:
datum["secondaryFiles"] = []
for sf in aslist(schema["secondaryFiles"]):
if "required" in sf:
sf_required = self.do_eval(sf["required"], context=datum)
else:
sf_required = True
if "$(" in sf["pattern"] or "${" in sf["pattern"]:
sfpath = self.do_eval(sf["pattern"], context=datum)
else:
sfpath = substitute(
cast(str, datum["basename"]), sf["pattern"]
)
for sfname in aslist(sfpath):
if not sfname:
continue
found = False
if isinstance(sfname, str):
d_location = cast(str, datum["location"])
if "/" in d_location:
sf_location = (
d_location[0 : d_location.rindex("/") + 1]
+ sfname
)
else:
sf_location = d_location + sfname
sfbasename = sfname
elif isinstance(sfname, MutableMapping):
sf_location = sfname["location"]
sfbasename = sfname["basename"]
else:
raise WorkflowException(
"Expected secondaryFile expression to return type 'str' or 'MutableMapping', received '%s'"
% (type(sfname))
)
for d in cast(
MutableSequence[MutableMapping[str, str]],
datum["secondaryFiles"],
):
if not d.get("basename"):
d["basename"] = d["location"][
d["location"].rindex("/") + 1 :
]
if d["basename"] == sfbasename:
found = True
if not found:
def addsf(
files: MutableSequence[CWLObjectType],
newsf: CWLObjectType,
) -> None:
for f in files:
if f["location"] == newsf["location"]:
f["basename"] = newsf["basename"]
return
files.append(newsf)
if isinstance(sfname, MutableMapping):
addsf(
cast(
MutableSequence[CWLObjectType],
datum["secondaryFiles"],
),
sfname,
)
elif discover_secondaryFiles and self.fs_access.exists(
sf_location
):
addsf(
cast(
MutableSequence[CWLObjectType],
datum["secondaryFiles"],
),
{
"location": sf_location,
"basename": sfname,
"class": "File",
},
)
elif sf_required:
raise WorkflowException(
"Missing required secondary file '%s' from file object: %s"
% (sfname, json_dumps(datum, indent=4))
)
normalizeFilesDirs(
cast(MutableSequence[CWLObjectType], datum["secondaryFiles"])
)
if "format" in schema:
try:
check_format(
datum,
cast(Union[List[str], str], self.do_eval(schema["format"])),
self.formatgraph,
)
except ValidationException as ve:
raise WorkflowException(
"Expected value of '%s' to have format %s but\n "
" %s" % (schema["name"], schema["format"], ve)
) from ve
visit_class(
datum.get("secondaryFiles", []),
("File", "Directory"),
_capture_files,
)
if schema["type"] == "org.w3id.cwl.cwl.Directory":
datum = cast(CWLObjectType, datum)
ll = schema.get("loadListing") or self.loadListing
if ll and ll != "no_listing":
get_listing(
self.fs_access,
datum,
(ll == "deep_listing"),
)
self.files.append(datum)
if schema["type"] == "Any":
visit_class(datum, ("File", "Directory"), _capture_files)
# Position to front of the sort key
if binding:
for bi in bindings:
bi["position"] = cast(List[int], binding["position"]) + cast(
List[int], bi["position"]
)
bindings.append(binding)
return bindings
def tostr(self, value: Union[MutableMapping[str, str], Any]) -> str:
if isinstance(value, MutableMapping) and value.get("class") in (
"File",
"Directory",
):
if "path" not in value:
raise WorkflowException(
'{} object missing "path": {}'.format(value["class"], value)
)
return value["path"]
else:
return str(value)
def generate_arg(self, binding: CWLObjectType) -> List[str]:
value = binding.get("datum")
if "valueFrom" in binding:
with SourceLine(
binding,
"valueFrom",
WorkflowException,
_logger.isEnabledFor(logging.DEBUG),
):
value = self.do_eval(cast(str, binding["valueFrom"]), context=value)
prefix = cast(Optional[str], binding.get("prefix"))
sep = binding.get("separate", True)
if prefix is None and not sep:
with SourceLine(
binding,
"separate",
WorkflowException,
_logger.isEnabledFor(logging.DEBUG),
):
raise WorkflowException(
"'separate' option can not be specified without prefix"
)
argl = [] # type: MutableSequence[CWLOutputType]
if isinstance(value, MutableSequence):
if binding.get("itemSeparator") and value:
itemSeparator = cast(str, binding["itemSeparator"])
argl = [itemSeparator.join([self.tostr(v) for v in value])]
elif binding.get("valueFrom"):
value = [self.tostr(v) for v in value]
return cast(List[str], ([prefix] if prefix else [])) + cast(
List[str], value
)
elif prefix and value:
return [prefix]
else:
return []
elif isinstance(value, MutableMapping) and value.get("class") in (
"File",
"Directory",
):
argl = cast(MutableSequence[CWLOutputType], [value])
elif isinstance(value, MutableMapping):
return [prefix] if prefix else []
elif value is True and prefix:
return [prefix]
elif value is False or value is None or (value is True and not prefix):
return []
else:
argl = [value]
args = []
for j in argl:
if sep:
args.extend([prefix, self.tostr(j)])
else:
args.append(self.tostr(j) if prefix is None else prefix + self.tostr(j))
return [a for a in args if a is not None]
def do_eval(
self,
ex: Optional[CWLOutputType],
context: Optional[Any] = None,
recursive: bool = False,
strip_whitespace: bool = True,
) -> Optional[CWLOutputType]:
if recursive:
if isinstance(ex, MutableMapping):
return {k: self.do_eval(v, context, recursive) for k, v in ex.items()}
if isinstance(ex, MutableSequence):
return [self.do_eval(v, context, recursive) for v in ex]
resources = self.resources
if self.resources and "cores" in self.resources:
cores = resources["cores"]
if not isinstance(cores, str):
resources = copy.copy(resources)
resources["cores"] = int(math.ceil(cores))
return expression.do_eval(
ex,
self.job,
self.requirements,
self.outdir,
self.tmpdir,
resources,
context=context,
timeout=self.timeout,
debug=self.debug,
js_console=self.js_console,
force_docker_pull=self.force_docker_pull,
strip_whitespace=strip_whitespace,
cwlVersion=self.cwlVersion,
)
| apache-2.0 | 6,632,848,062,089,828,000 | 37.181126 | 127 | 0.467411 | false |
krischer/prov | prov/tests/test_model.py | 1 | 5958 | """
Created on Jan 25, 2012
@author: Trung Dong Huynh
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import unittest
import logging
import os
from prov.model import ProvDocument, ProvException
from prov.tests import examples
from prov.tests.attributes import TestAttributesBase
from prov.tests.qnames import TestQualifiedNamesBase
from prov.tests.statements import TestStatementsBase
from prov.tests.utility import BaseTestCase, RoundTripTestCase
logger = logging.getLogger(__name__)
EX_URI = 'http://www.example.org'
class TestExamplesBase(object):
"""This is the base class for testing support for all the examples provided in prov.tests.examples.
It is not runnable and needs to be included in a subclass of RoundTripTestCase.
"""
def test_all_examples(self):
counter = 0
for name, graph in examples.tests:
counter += 1
logger.info('%d. Testing the %s example', counter, name)
g = graph()
self.assertRoundTripEquivalence(g)
class TestLoadingProvToolboxJSON(BaseTestCase):
def setUp(self):
self.json_path = os.path.dirname(os.path.abspath(__file__)) + '/json/'
filenames = os.listdir(self.json_path)
self.fails = []
for filename in filenames:
if filename.endswith('.json'):
with open(self.json_path + filename) as json_file:
try:
g1 = ProvDocument.deserialize(json_file)
json_str = g1.serialize(indent=4)
g2 = ProvDocument.deserialize(content=json_str)
self.assertEqual(g1, g2, 'Round-trip JSON encoding/decoding failed: %s.' % filename)
except:
self.fails.append(filename)
def test_loading_all_json(self):
# self.assertFalse(fails, 'Failed to load/round-trip %d JSON files (%s)' % (len(fails), ', '.join(fails)))
# Code for debugging the failed tests
for filename in self.fails:
# Reload the failed files
filepath = self.json_path + filename
# os.rename(json_path + filename, json_path + filename + '-fail')
with open(filepath) as json_file:
logger.info("Loading %s...", filepath)
g1 = ProvDocument.deserialize(json_file)
json_str = g1.serialize(indent=4)
g2 = ProvDocument.deserialize(content=json_str)
self.assertEqual(g1, g2, 'Round-trip JSON encoding/decoding failed: %s.' % filename)
class TestFlattening(BaseTestCase):
def test_flattening(self):
for name, graph in examples.tests:
logger.info('Testing flattening of the %s example', name)
document = graph()
flattened = document.flattened()
flattened_records = set(flattened.get_records())
# counting all the records:
n_records = 0
for record in document.get_records():
n_records += 1
self.assertIn(record, flattened_records)
for bundle in document.bundles:
for record in bundle.get_records():
n_records += 1
self.assertIn(record, flattened_records)
self.assertEqual(n_records, len(flattened.get_records()))
class TestUnification(BaseTestCase):
def test_unifying(self):
# This is a very trivial test just to exercise the unified() function
# TODO: Create a proper unification test
json_path = os.path.dirname(os.path.abspath(__file__)) + '/unification/'
filenames = os.listdir(json_path)
for filename in filenames:
if not filename.endswith('.json'):
continue
filepath = json_path + filename
with open(filepath) as json_file:
logger.info('Testing unifying: %s', filename)
logger.debug("Loading %s...", filepath)
document = ProvDocument.deserialize(json_file)
flattened = document.flattened()
unified = flattened.unified()
self.assertLess(len(unified.get_records()), len(flattened.get_records()))
class TestBundleUpdate(BaseTestCase):
def test_bundle_update_simple(self):
doc = ProvDocument()
doc.set_default_namespace(EX_URI)
b1 = doc.bundle('b1')
b1.entity('e')
b2 = doc.bundle('b2')
b2.entity('e')
self.assertRaises(ProvException, lambda: b1.update(1))
self.assertRaises(ProvException, lambda: b1.update(doc))
b1.update(b2)
self.assertEqual(len(b1.get_records()), 2)
def test_document_update_simple(self):
d1 = ProvDocument()
d1.set_default_namespace(EX_URI)
d1.entity('e')
b1 = d1.bundle('b1')
b1.entity('e')
d2 = ProvDocument()
d2.set_default_namespace(EX_URI)
d2.entity('e')
b1 = d2.bundle('b1')
b1.entity('e')
b2 = d2.bundle('b2')
b2.entity('e')
self.assertRaises(ProvException, lambda: d1.update(1))
d1.update(d2)
self.assertEqual(len(d1.get_records()), 2)
self.assertEqual(len(d1.bundles), 2)
class AllTestsBase(TestExamplesBase, TestStatementsBase, TestAttributesBase, TestQualifiedNamesBase):
"""This is a test to include all available tests.
"""
pass
class RoundTripModelTest(RoundTripTestCase, AllTestsBase):
def assertRoundTripEquivalence(self, prov_doc, msg=None):
"""Exercises prov.model without the actual serialization and PROV-N generation.
"""
provn_content = prov_doc.get_provn()
# Checking for self-equality
self.assertEqual(prov_doc, prov_doc, 'The document is not self-equal:\n' + provn_content)
if __name__ == "__main__":
unittest.main()
| mit | 3,401,653,147,550,091,000 | 34.891566 | 114 | 0.606747 | false |
rocky/python2-trepan | trepan/processor/command/show_subcmd/cmdtrace.py | 1 | 1184 | # -*- coding: utf-8 -*-
# Copyright (C) 2009 Rocky Bernstein
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA.
# Our local modules
from trepan.processor.command import base_subcmd as Mbase_subcmd
class ShowCmdtrace(Mbase_subcmd.DebuggerShowBoolSubcommand):
"""*show cmdtrace**
Show debugger commands before running them
See also:
---------
`set cmdtrace`"""
min_abbrev = 4 # Need at least "show cmdt"
short_help = "Show debugger commands before running them"
pass
| gpl-3.0 | -8,622,917,256,363,378,000 | 34.878788 | 73 | 0.71875 | false |
andymckay/zamboni | mkt/ratings/models.py | 1 | 6186 | import logging
from datetime import datetime, timedelta
from django.core.cache import cache
from django.db import models
import bleach
from celeryutils import task
from tower import ugettext_lazy as _
import amo.models
from mkt.translations.fields import save_signal, TranslatedField
from mkt.users.models import UserProfile
log = logging.getLogger('z.review')
class ReviewManager(amo.models.ManagerBase):
def valid(self):
"""Get all reviews that aren't replies."""
# Use extra because Django wants to do a LEFT OUTER JOIN.
return self.extra(where=['reply_to IS NULL'])
class Review(amo.models.ModelBase):
addon = models.ForeignKey('webapps.Addon', related_name='_reviews')
version = models.ForeignKey('versions.Version', related_name='reviews',
null=True)
user = models.ForeignKey('users.UserProfile', related_name='_reviews_all')
reply_to = models.ForeignKey('self', null=True, unique=True,
related_name='replies', db_column='reply_to')
rating = models.PositiveSmallIntegerField(null=True)
title = TranslatedField(require_locale=False)
body = TranslatedField(require_locale=False)
ip_address = models.CharField(max_length=255, default='0.0.0.0')
editorreview = models.BooleanField(default=False)
flag = models.BooleanField(default=False)
sandbox = models.BooleanField(default=False)
# Denormalized fields for easy lookup queries.
# TODO: index on addon, user, latest
is_latest = models.BooleanField(
default=True, editable=False,
help_text="Is this the user's latest review for the add-on?")
previous_count = models.PositiveIntegerField(
default=0, editable=False,
help_text="How many previous reviews by the user for this add-on?")
objects = ReviewManager()
class Meta:
db_table = 'reviews'
ordering = ('-created',)
def get_url_path(self):
return '/app/%s/ratings/%s' % (self.addon.app_slug, self.id)
@classmethod
def get_replies(cls, reviews):
reviews = [r.id for r in reviews]
qs = Review.objects.filter(reply_to__in=reviews)
return dict((r.reply_to_id, r) for r in qs)
@staticmethod
def post_save(sender, instance, created, **kwargs):
if kwargs.get('raw'):
return
instance.refresh(update_denorm=created)
if created:
# Avoid slave lag with the delay.
check_spam.apply_async(args=[instance.id], countdown=600)
@staticmethod
def post_delete(sender, instance, **kwargs):
if kwargs.get('raw'):
return
instance.refresh(update_denorm=True)
def refresh(self, update_denorm=False):
from . import tasks
if update_denorm:
pair = self.addon_id, self.user_id
# Do this immediately so is_latest is correct. Use default
# to avoid slave lag.
tasks.update_denorm(pair, using='default')
# Review counts have changed, so run the task and trigger a reindex.
tasks.addon_review_aggregates.delay(self.addon_id, using='default')
@staticmethod
def transformer(reviews):
user_ids = dict((r.user_id, r) for r in reviews)
for user in UserProfile.objects.no_cache().filter(id__in=user_ids):
user_ids[user.id].user = user
models.signals.post_save.connect(Review.post_save, sender=Review,
dispatch_uid='review_post_save')
models.signals.post_delete.connect(Review.post_delete, sender=Review,
dispatch_uid='review_post_delete')
models.signals.pre_save.connect(save_signal, sender=Review,
dispatch_uid='review_translations')
# TODO: translate old flags.
class ReviewFlag(amo.models.ModelBase):
SPAM = 'review_flag_reason_spam'
LANGUAGE = 'review_flag_reason_language'
SUPPORT = 'review_flag_reason_bug_support'
OTHER = 'review_flag_reason_other'
FLAGS = (
(SPAM, _(u'Spam or otherwise non-review content')),
(LANGUAGE, _(u'Inappropriate language/dialog')),
(SUPPORT, _(u'Misplaced bug report or support request')),
(OTHER, _(u'Other (please specify)')),
)
review = models.ForeignKey(Review)
user = models.ForeignKey('users.UserProfile', null=True)
flag = models.CharField(max_length=64, default=OTHER,
choices=FLAGS, db_column='flag_name')
note = models.CharField(max_length=100, db_column='flag_notes', blank=True,
default='')
class Meta:
db_table = 'reviews_moderation_flags'
unique_together = (('review', 'user'),)
class Spam(object):
def add(self, review, reason):
reason = 'mkt:review:spam:%s' % reason
try:
reasonset = cache.get('mkt:review:spam:reasons', set())
except KeyError:
reasonset = set()
try:
idset = cache.get(reason, set())
except KeyError:
idset = set()
reasonset.add(reason)
cache.set('mkt:review:spam:reasons', reasonset)
idset.add(review.id)
cache.set(reason, idset)
return True
def reasons(self):
return cache.get('mkt:review:spam:reasons')
@task
def check_spam(review_id, **kw):
spam = Spam()
try:
review = Review.objects.using('default').get(id=review_id)
except Review.DoesNotExist:
log.error('Review does not exist, check spam for review_id: %s'
% review_id)
return
thirty_days = datetime.now() - timedelta(days=30)
others = (Review.objects.no_cache().exclude(id=review.id)
.filter(user=review.user, created__gte=thirty_days))
if len(others) > 10:
spam.add(review, 'numbers')
if (review.body is not None and
bleach.url_re.search(review.body.localized_string)):
spam.add(review, 'urls')
for other in others:
if ((review.title and review.title == other.title) or
review.body == other.body):
spam.add(review, 'matches')
break
| bsd-3-clause | 389,650,824,489,188,000 | 33.558659 | 79 | 0.626253 | false |
stczhc/neupy | neupy/algorithms/__init__.py | 1 | 1182 | from .gd.base import *
from .gd.lev_marq import *
from .gd.quasi_newton import *
from .gd.conjgrad import *
from .gd.hessian import *
from .gd.hessdiag import *
from .gd.rprop import *
from .gd.quickprop import *
from .gd.momentum import *
from .gd.adadelta import *
from .gd.adagrad import *
from .gd.rmsprop import *
from .gd.adam import *
from .gd.adamax import *
from .ensemble.dan import *
from .ensemble.mixture_of_experts import *
from .weights.weight_decay import *
from .weights.weight_elimination import *
from .steps.simple_step_minimization import *
from .steps.search_then_converge import *
from .steps.errdiff import *
from .steps.leak_step import *
from .steps.linear_search import *
from .memory.discrete_hopfield_network import *
from .memory.bam import *
from .memory.cmac import *
from .associative.oja import *
from .associative.hebb import *
from .associative.instar import *
from .associative.kohonen import *
from .competitive.sofm import *
from .competitive.art import *
from .rbfn.pnn import *
from .rbfn.rbf_kmeans import *
from .rbfn.grnn import *
from .linear.lms import *
from .linear.modify_relaxation import *
from .linear.perceptron import *
| mit | 7,409,837,597,486,790,000 | 24.695652 | 47 | 0.754653 | false |
lucacioria/socialplus-prototype | DJANGO_GAE/socialplus/routines/sync_people.py | 1 | 2291 | # -*- coding: UTF-8 -*-
import httplib2
import json
import logging
from socialplus.utils import *
from socialplus.api import create_plus_service
from socialplus.routines import update_progress, mark_as_completed
from socialplus.data.people import User, save_person
from socialplus.data.domain import Domain
from google.appengine.api import search
from google.appengine.ext import ndb
def _sync_person_profile(user):
plus = create_plus_service(user.primary_email)
statistics = {
"is_person": False,
}
# update user profile
try:
person_api = plus.people().get(userId=user.primary_email).execute()
except: #todo restrict to right exception HttpError 404 dove cazzo si importa
return statistics
person = save_person(person_api, user)
statistics["is_person"] = True
return statistics
def sync_people(task):
statistics = {
"total_users": 0,
"total_people": 0,
}
# get domain for progress reasons (know total num of users)
domain = ndb.Key(Domain,"main").get()
# batch size of user fetch
batch_size = 10
update_progress(task, "\nstarting update of all Domain users G+ profiles..\n", 0, 100)
n = 0
while True:
if task.sync_people_org_unit_path != None and len(task.sync_people_org_unit_path) > 0:
q = User.query(User.org_unit_path==task.sync_people_org_unit_path).fetch(limit=batch_size, offset=n*batch_size)
else:
q = User.query().fetch(limit=batch_size, offset=n*batch_size)
for user in q:
statistics["total_users"] += 1
person_statistics = _sync_person_profile(user)
if person_statistics["is_person"]:
statistics["total_people"] += 1
update_progress(task, user.primary_email + ", ", statistics["total_users"], domain.user_count)
else:
update_progress(task, ".", statistics["total_users"], domain.user_count)
if len(q) == batch_size:
n += 1
else:
break
mark_as_completed(task, "\n" + str(statistics["total_people"]) + " user profiles synced, out of " + \
str(statistics["total_users"]) + " users in the domain\n")
domain.person_count = statistics["total_people"]
domain.put()
| unlicense | -7,203,887,517,442,140,000 | 36.557377 | 123 | 0.637713 | false |
ArionMiles/MIS-Bot | mis_bot/scraper/spiders/profile_spider.py | 1 | 4860 | from os import environ
import base64
from multiprocessing import Process, Queue
from scrapy.spiders.init import InitSpider
from scrapy.http import Request, FormRequest
from scrapy_splash import SplashRequest
import scrapy.crawler as crawler
from twisted.internet import reactor
from misbot.mis_utils import solve_captcha
class ProfileSpider(InitSpider):
"""Take screenshot of ``http://report.aldel.org/student/test_marks_report.php``
and send it to the user via :py:class:`scraper.pipelines.ProfileScreenshotPipeline`
:param InitSpider: Base Spider with initialization facilities
:type InitSpider: Spider
"""
name = 'profile'
allowed_domains = ['report.aldel.org']
login_page = 'http://report.aldel.org/student_page.php'
start_urls = ['http://report.aldel.org/student/view_profile.php']
def __init__(self, username, password, chatID, *args, **kwargs):
super(ProfileSpider, self).__init__(*args, **kwargs)
self.username = username
self.password = password
self.chatID = chatID
def init_request(self):
"""This function is called before crawling starts."""
return Request(url=self.login_page, callback=self.login)
def login(self, response):
"""Generate a login request."""
session_id = str(response.headers.getlist('Set-Cookie')[0].decode().split(';')[0].split("=")[1])
captcha_answer = solve_captcha(session_id)
self.logger.info("Captcha Answer: {}".format(captcha_answer))
return FormRequest.from_response(response,
formdata={'studentid': self.username, 'studentpwd': self.password, 'captcha_code':captcha_answer},
callback=self.check_login_response)
def check_login_response(self, response):
"""Check the response returned by a login request to see if we are
successfully logged in."""
if self.username in response.body.decode():
self.logger.info("Login Successful!")
# Now the crawling can begin..
return self.initialized()
else:
self.logger.warning("Login failed! Check site status and credentials.")
# Something went wrong, we couldn't log in, so nothing happens.
def parse(self, response):
"""Send a SplashRequest and forward the response to :py:func:`parse_result`"""
url = self.start_urls[0]
splash_args = {
'html': 1,
'png': 1,
'wait':0.1,
'render_all':1
}
self.logger.info("Taking snapshot of Test Report for {}...".format(self.username))
yield SplashRequest(url, self.parse_result, endpoint='render.json', args=splash_args)
def parse_result(self, response):
"""Downloads and saves the attendance report in ``files/<Student_ID>_profile.png``
format.
"""
imgdata = base64.b64decode(response.data['png'])
filename = 'files/{}_profile.png'.format(self.username)
with open(filename, 'wb') as f:
f.write(imgdata)
self.logger.info("Saved student profile as: {}_profile.png".format(self.username))
def scrape_profile(username, password, chatID):
"""Run the spider multiple times, without hitting ``ReactorNotRestartable`` exception. Forks own process.
:param username: student's PID (format: XXXNameXXXX)
where X - integers
:type username: str
:param password: student's password for student portal
:type password: str
:param chatID: 9-Digit unique user ID
:type chatID: str
"""
def f(q):
try:
runner = crawler.CrawlerRunner({
'ITEM_PIPELINES': {'scraper.pipelines.ProfileScreenshotPipeline':300,},
'DOWNLOADER_MIDDLEWARES': {'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,},
'SPLASH_URL':environ['SPLASH_INSTANCE'],
'SPIDER_MIDDLEWARES':{'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,},
'DUPEFILTER_CLASS':'scrapy_splash.SplashAwareDupeFilter',
'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
})
deferred = runner.crawl(ProfileSpider, username=username, password=password, chatID=chatID)
deferred.addBoth(lambda _: reactor.stop())
reactor.run()
q.put(None)
except Exception as e:
q.put(e)
q = Queue()
p = Process(target=f, args=(q,))
p.start()
result = q.get()
p.join()
if result is not None:
raise result
| mit | -7,269,594,011,955,169,000 | 41.26087 | 148 | 0.626955 | false |
dollarcoins/source | contrib/bitrpc/bitrpc.py | 1 | 7835 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8146")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8146")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | mit | -4,328,271,154,730,532,400 | 23.185185 | 79 | 0.661774 | false |
americanpezza/SecureBox | src/sync/actions.py | 1 | 6989 | import os, traceback, textwrap, shutil
from settings import APP_PATH
from dropbox.rest import ErrorResponse
from repo.dropbox.wrapper import DropboxWrapper
from cStringIO import StringIO
from crypto.file import FileMeta
from crypto.exceptions import AuthenticationException
__author__ = 'pezza'
class ActionFailedException(Exception):
pass
class FileAction(DropboxWrapper):
def __init__(self, cl, *args, **kwargs):
self.action = cl
self.args = args
self.kwargs = kwargs
self.fileObj = None
self.metas = []
def execute(self):
self.setup()
def setup(self):
self.fileObj = self.action(*self.args, **self.kwargs)
def getMetas(self):
return self.metas
def removeLocal(self, file):
try:
os.unlink(file)
print "Removed %s" % file
rootFolder = os.path.dirname(file)
if rootFolder != APP_PATH:
files = os.listdir(rootFolder)
if len(files) == 0:
os.rmdir(rootFolder)
print "Removed empty folder %s" % rootFolder
except Exception, e:
print "Cannot remove %s:%s" % ( file, e )
#print traceback.format_exc()
class DeleteAction(FileAction):
def __init__(self, meta):
FileAction.__init__(self, None)
self.metas.append(meta)
def setup(self):
pass
def execute(self, client, local_index):
pass
class DeleteRemoteFromMetaAction(DeleteAction):
def __init__(self, meta, path):
DeleteAction.__init__(self, meta)
self.path = path
def execute(self, client, local_index):
FileAction.execute(self)
print "Deleting %s remotely" % self.path
# get the remote "view" on this local path
try:
self.removeRemote(self.metas[0].asUrl(), client)
local_index.removeFile(self.path)
print "* index updated"
except ErrorResponse, er:
status = er.status
# if the remote file is gone already, remove it from the index
if status == 404:
local_index.removeFile(self.path)
print "* file has been removed already, updated index"
class DeleteLocalAction(DeleteAction):
def __init__(self, root, path):
self.file = os.path.join(root, path)
self.path = path
self.metas = []
def execute(self, client, local_index):
FileAction.execute(self)
print "Deleting %s (complete path is %s)" % (self.path, self.file)
self.removeLocal(self.file)
self.metas.append( local_index.getFileMeta(self.path) )
local_index.removeFile(self.path)
print "* index updated"
class UploadAction(FileAction):
def __init__(self, cl, remotePath, currentRemotePath, *args, **kwargs):
FileAction.__init__(self, cl, *args, **kwargs)
self.remotePath = remotePath
self.currentRemotePath = currentRemotePath
def execute(self, client, local_index):
FileAction.execute(self)
self.metas.append(self.fileObj.getFileMeta())
fileLength = os.path.getsize(self.fileObj.getFileFullPath())
print "Uploading %s" % self.fileObj.getFilePath()
# upoad in one go if the file is small
if fileLength < 8192:
print "* using single chunk method"
self.checkCurrentRemotePath(client, local_index)
self.upload(client, self.remotePath)
else:
# If the file has been removed, this will cause a deadlock,
# since actions cannot fail.
# TODO Investigate ways to lock the file before adding the action
print "* using streaming method"
uploader = self.uploadChunks(client)
self.checkCurrentRemotePath(client, local_index)
print "* upload complete, moving to final destination"
self.finalizeUpload(uploader, self.remotePath)
# update index
local_index.addFile(self.fileObj.getFilePath(), self.fileObj.getFileMeta().getMeta())
print "* index updated"
def checkCurrentRemotePath(self, client, local_index):
# Remove the remotely stored version before uploading a new one
# Note that the fileMeta stored in the fileObj is *not* the
# one used remotely, due to encryption/salting
# In other words, if you don't remove the "old" version before uploading a new one
# you'll end up with 2 encrypted version of the same file!
if self.currentRemotePath is not None:
print "* removing remote copy before uploading a more recent version"
try:
self.removeRemote(self.currentRemotePath, client)
local_index.removeFile(self.fileObj.getFilePath())
oldMeta = "".join(self.currentRemotePath.split('/'))
# return the additional meta, so that we'll keep it into account
# when handling changes next time
self.metas.append(FileMeta(oldMeta.encode("utf8")))
except ErrorResponse, er:
status = er.status
# generic error
if status == 400 or status == 406:
print traceback.format_exc()
# not found
elif status == 404:
local_index.removeFile(self.fileObj.getFilePath())
print "* remote copy has been deleted already, updating local index"
else:
print traceback.format_exc()
class DownloadAction(FileAction):
def __init__(self, cl, remotePath, *args, **kwargs):
FileAction.__init__(self, cl, *args, **kwargs)
self.remotePath = remotePath
def execute(self, client, local_index):
FileAction.execute(self)
self.meta = self.fileObj.getFileMeta()
print "Downloading %s" % self.fileObj.getFilePath()
f = self.fileObj.getFileFullPath()
if os.path.isfile(f):
self.removeLocal(f)
local_index.removeFile(self.fileObj.getFilePath())
print "* removed old copy"
try:
self.download(client, self.fileObj.getFilePath(), self.remotePath )
print "* download complete"
# update indexes
local_index.addFile(self.fileObj.getFilePath(), self.fileObj.getFileMeta().getMeta())
print "* index updated"
except ErrorResponse, er:
status = er.status
if status == 404:
pass
else:
raise er
except AuthenticationException, e:
# remove any file fragment
self.removeLocal(self.fileObj.getFileFullPath())
# remove file from index
local_index.removeFile(self.fileObj.getFilePath())
print "* WARNING: failed authentication, file invalid. Removed from filesystem"
| mit | -4,650,268,618,778,500,000 | 34.120603 | 97 | 0.601087 | false |
osu-cass/working-waterfronts-api | working_waterfronts/working_waterfronts_api/tests/views/test_pois_categories.py | 1 | 43228 | from django.test import TestCase
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
import json
class POIsCategoriesTestCase(TestCase):
fixtures = ['test_fixtures']
def setUp(self):
self.maxDiff = None
self.expected_json = """
{
"error": {
"status": false,
"name": null,
"text": null,
"debug": null,
"level": null
},
"pointsofinterest": [
{
"street": "123 Fake St",
"alt_name": "",
"contact_name": "",
"facts": "It's a lighthouse",
"lng": -124.10534,
"id": 1,
"city": "Newport",
"zip": "11234",
"hazards": [
{
"name": "Falling Rocks",
"description": "If these fall on you, you're dead.",
"id": 1
}
],
"ext": {
},
"state": "Oregon",
"email": "",
"website": "",
"description": "A pretty nice lighthouse",
"phone": null,
"lat": 43.966874,
"categories": [
{
"category": "Cool Stuff",
"id": 1
}
],
"videos": [
{
"caption": "Traveling at the speed of light!",
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship"
}
],
"images": [
{
"caption": "Woof!",
"link": "/media/dog.jpg",
"name": "A dog"
}
],
"name": "Newport Lighthouse",
"created": "2014-08-08T23:27:05.568Z",
"modified": "2014-08-08T23:27:05.568Z",
"location_description": "out on the cape over there",
"history": "It was built at some time in the past"
}
]
}"""
self.limited_pois_error = """
{
"error": {
"status": false,
"name": null,
"text": null,
"debug": null,
"level": null
}
}"""
def test_url_endpoint(self):
url = reverse('pois-categories', kwargs={'id': '1'})
self.assertEqual(url, '/1/pois/categories/1')
def test_no_location_parameter(self):
response = self.client.get(
reverse('pois-categories', kwargs={'id': '1'})).content
parsed_answer = json.loads(response)
expected_answer = json.loads(self.expected_json)
self.assertEqual(parsed_answer, expected_answer)
def test_limit_parameter(self):
response = self.client.get(
"%s?limit=1" % reverse(
'pois-categories', kwargs={'id': '2'})
).content
parsed_answer = json.loads(response)
expected_error = json.loads(self.limited_pois_error)
self.assertEqual(parsed_answer['error'], expected_error['error'])
self.assertEqual(len(parsed_answer['pointsofinterest']), 1)
class POIsCategoriesLocationTestCase(TestCase):
"""
Test whether the /pois/categories/<id> view returns the correct results
when given a coordinate to center on.
This is an individual class to allow the use of different fixture sets.
"""
fixtures = ['location_fixtures']
# These tests are made assuming a proximity of 20. If this default value
# is changed, then the tests would break without overriding it.
@override_settings(DEFAULT_PROXIMITY='20')
def setUp(self):
self.maxDiff = None
# No POIs. This is the return for location queries from
# the middle of nowhere.
self.expected_no_pois = """
{
"error": {
"debug": "",
"status": true,
"level": "Information",
"text": "No PointsOfInterest found for category 1",
"name": "No PointsOfInterest"
},
"pointsofinterest": []
}"""
# Nearby POIs for category 1.
# This JSON contains the two halibut stores in Newport and Waldport,
# but not Portland. This is the return for a good coordinates.
self.expected_cat1 = """
{
"error": {
"level": null,
"status": false,
"name": null,
"debug": null,
"text": null
},
"pointsofinterest": [
{
"videos": [
{
"caption": "Traveling at the speed of light!",
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship"
}
],
"images": [
{
"caption": "Meow!",
"link": "/media/cat.jpg",
"name": "A cat"
}
],
"street": "146 SE Bay Blvd",
"alt_name": "",
"contact_name": "",
"facts": "",
"lng": -124.050122,
"city": "Newport",
"zip": "97365",
"hazards": [
{
"id": 1,
"name": "Falling Rocks",
"description": "If these fall on you, you're dead."
},
{
"id": 3,
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead."
},
{
"id": 5,
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead."
}
],
"location_description": "Located on Bay Blvd in Newport",
"id": 3,
"state": "OR",
"email": "",
"website": "",
"description": "Fake Newport Tuna",
"phone": null,
"lat": 44.631592,
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"name": "Newport Tuna",
"created": "2014-08-08T23:27:05.568Z",
"modified": "2014-08-08T23:27:05.568Z",
"ext": {
},
"history": ""
},
{
"videos": [
{
"caption": "Traveling at the speed of light!",
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship"
}
],
"images": [
{
"caption": "Meow!",
"link": "/media/cat.jpg",
"name": "A cat"
}
],
"street": "522 NW Spring St",
"alt_name": "",
"contact_name": "",
"facts": "",
"lng": -124.066166,
"city": "Waldport",
"zip": "97394",
"hazards": [
{
"id": 1,
"name": "Falling Rocks",
"description": "If these fall on you, you're dead."
},
{
"id": 3,
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead."
},
{
"id": 5,
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead."
}
],
"location_description": "Located on Spring St in Waldport",
"id": 5,
"state": "OR",
"email": "",
"website": "",
"description": "Fake Waldport Tuna",
"phone": null,
"lat": 44.427761,
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"name": "Waldport Tuna",
"created": "2014-08-08T23:27:05.568Z",
"modified": "2014-08-08T23:27:05.568Z",
"ext": {
},
"history": ""
}]
}"""
# Nearby pois for cat1, with 50 mile range.
# This JSON contains the POIs in Newport, Waldport, and
# Pacific City, but not Portland. This is the return for a good
# coordinates.
self.expected_cat1_extended = """
{
"error": {
"debug": null,
"status": false,
"text": null,
"name": null,
"level": null
},
"pointsofinterest": [
{
"videos": [
{
"caption": "Traveling at the speed of light!",
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship"
}
],
"images": [
{
"caption": "Meow!",
"link": "/media/cat.jpg",
"name": "A cat"
}
],
"street": "146 SE Bay Blvd",
"alt_name": "",
"contact_name": "",
"facts": "",
"lng": -124.050122,
"city": "Newport",
"zip": "97365",
"hazards": [
{
"id": 1,
"name": "Falling Rocks",
"description": "If these fall on you, you're dead."
},
{
"id": 3,
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead."
},
{
"id": 5,
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead."
}
],
"location_description": "Located on Bay Blvd in Newport",
"id": 3,
"state": "OR",
"email": "",
"website": "",
"description": "Fake Newport Tuna",
"phone": null,
"lat": 44.631592,
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"name": "Newport Tuna",
"created": "2014-08-08T23:27:05.568Z",
"modified": "2014-08-08T23:27:05.568Z",
"ext": {
},
"history": ""
},
{
"videos": [
{
"caption": "Traveling at the speed of light!",
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship"
}
],
"images": [
{
"caption": "Meow!",
"link": "/media/cat.jpg",
"name": "A cat"
}
],
"street": "522 NW Spring St",
"alt_name": "",
"contact_name": "",
"facts": "",
"lng": -124.066166,
"city": "Waldport",
"zip": "97394",
"hazards": [
{
"id": 1,
"name": "Falling Rocks",
"description": "If these fall on you, you're dead."
},
{
"id": 3,
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead."
},
{
"id": 5,
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead."
}
],
"location_description": "Located on Spring St in Waldport",
"id": 5,
"state": "OR",
"email": "",
"website": "",
"description": "Fake Waldport Tuna",
"phone": null,
"lat": 44.427761,
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"name": "Waldport Tuna",
"created": "2014-08-08T23:27:05.568Z",
"modified": "2014-08-08T23:27:05.568Z",
"ext": {
},
"history": ""
},
{
"videos": [
{
"caption": "Traveling at the speed of light!",
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship"
}
],
"facts": "",
"street": "35650 Roger Ave",
"alt_name": "",
"contact_name": "",
"images": [
{
"caption": "Meow!",
"link": "/media/cat.jpg",
"name": "A cat"
}
],
"id": 7,
"city": "Cloverdale",
"zip": "97112",
"hazards": [
{
"id": 1,
"name": "Falling Rocks",
"description": "If these fall on you, you're dead."
},
{
"id": 3,
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead."
},
{
"id": 5,
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead."
}
],
"ext": {
},
"state": "OR",
"email": "",
"website": "",
"description": "Fake Pacific City Tuna",
"phone": null,
"lat": 45.197105,
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"name": "Pacific City Tuna",
"created": "2014-08-08T23:27:05.568Z",
"modified": "2014-08-08T23:27:05.568Z",
"location_description": "Located on Roger Ave in Pacific City",
"lng": -123.958093,
"history": ""
}
]
}"""
# Nearby POIs for Cat1
# This JSON contains the two halibut stores in Newport and Waldport,
# but not Portland. This is the return for a good coordinates.
self.expected_cat1_bad_limit = """
{
"error": {
"level": "Warning",
"status": true,
"name": "Bad Limit",
"debug": "ValueError: invalid literal for int() with base 10: 'cat'",
"text": "Invalid limit. Returning all results."
},
"pointsofinterest": [
{
"videos": [
{
"caption": "Traveling at the speed of light!",
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship"
}
],
"images": [
{
"caption": "Meow!",
"link": "/media/cat.jpg",
"name": "A cat"
}
],
"street": "146 SE Bay Blvd",
"alt_name": "",
"contact_name": "",
"facts": "",
"lng": -124.050122,
"city": "Newport",
"zip": "97365",
"hazards": [
{
"id": 1,
"name": "Falling Rocks",
"description": "If these fall on you, you're dead."
},
{
"id": 3,
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead."
},
{
"id": 5,
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead."
}
],
"location_description": "Located on Bay Blvd in Newport",
"id": 3,
"state": "OR",
"email": "",
"website": "",
"description": "Fake Newport Tuna",
"phone": null,
"lat": 44.631592,
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"name": "Newport Tuna",
"created": "2014-08-08T23:27:05.568Z",
"modified": "2014-08-08T23:27:05.568Z",
"ext": {
},
"history": ""
},
{
"videos": [
{
"caption": "Traveling at the speed of light!",
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship"
}
],
"images": [
{
"caption": "Meow!",
"link": "/media/cat.jpg",
"name": "A cat"
}
],
"street": "522 NW Spring St",
"alt_name": "",
"contact_name": "",
"facts": "",
"lng": -124.066166,
"city": "Waldport",
"zip": "97394",
"hazards": [
{
"id": 1,
"name": "Falling Rocks",
"description": "If these fall on you, you're dead."
},
{
"id": 3,
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead."
},
{
"id": 5,
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead."
}
],
"location_description": "Located on Spring St in Waldport",
"id": 5,
"state": "OR",
"email": "",
"website": "",
"description": "Fake Waldport Tuna",
"phone": null,
"lat": 44.427761,
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"name": "Waldport Tuna",
"created": "2014-08-08T23:27:05.568Z",
"modified": "2014-08-08T23:27:05.568Z",
"ext": {
},
"history": ""
}]
}"""
self.expected_cat1_limit_1 = """
{
"error": {
"level": null,
"status": false,
"name": null,
"debug": null,
"text": null
},
"pointsofinterest": [
{
"videos": [
{
"caption": "Traveling at the speed of light!",
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship"
}
],
"images": [
{
"caption": "Meow!",
"link": "/media/cat.jpg",
"name": "A cat"
}
],
"street": "146 SE Bay Blvd",
"alt_name": "",
"contact_name": "",
"facts": "",
"lng": -124.050122,
"city": "Newport",
"zip": "97365",
"hazards": [
{
"id": 1,
"name": "Falling Rocks",
"description": "If these fall on you, you're dead."
},
{
"id": 3,
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead."
},
{
"id": 5,
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead."
}
],
"location_description": "Located on Bay Blvd in Newport",
"id": 3,
"state": "OR",
"email": "",
"website": "",
"description": "Fake Newport Tuna",
"phone": null,
"lat": 44.631592,
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"name": "Newport Tuna",
"created": "2014-08-08T23:27:05.568Z",
"modified": "2014-08-08T23:27:05.568Z",
"ext": {
},
"history": ""
}]
}"""
# All POIs for Category 1
# This JSON contains the POIs in Newport, Waldport,
# and Portland. This is the return for a bad coordinates.
self.expected_all_pois_cat1 = """
{
"error": {
"level": "Warning",
"status": true,
"text": "There was an error with the given coordinates \
not_a_latitude, not_a_longitude",
"name": "Bad location",
"debug": "ValueError: String or unicode input unrecognized \
as WKT EWKT, and HEXEWKB."
},
"pointsofinterest": [
{
"id": 1,
"website": "",
"street": "720 SW Broadway",
"city": "Portland",
"zip": "97204",
"location_description": "Located on Broadway in Portland",
"lng": -122.67963,
"state": "OR",
"email": "",
"modified": "2014-08-08T23:27:05.568Z",
"description": "Fake Portland Tuna",
"phone": null,
"lat": 45.518962,
"name": "Portland Tuna",
"created": "2014-08-08T23:27:05.568Z",
"alt_name": "",
"contact_name": "",
"history": "",
"facts": "",
"ext": {
},
"images": [{
"link": "/media/cat.jpg",
"caption": "Meow!",
"name": "A cat"}],
"videos": [{
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship",
"caption": "Traveling at the speed of light!"}
],
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"hazards": [
{
"name": "Falling Rocks",
"description": "If these fall on you, you're dead.",
"id": 1
},
{
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead.",
"id": 3
},
{
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead.",
"id": 5
}
]
},
{
"id": 3,
"website": "",
"street": "146 SE Bay Blvd",
"city": "Newport",
"zip": "97365",
"location_description": "Located on Bay Blvd in Newport",
"lng": -124.050122,
"state": "OR",
"email": "",
"modified": "2014-08-08T23:27:05.568Z",
"description": "Fake Newport Tuna",
"phone": null,
"lat": 44.631592,
"name": "Newport Tuna",
"created": "2014-08-08T23:27:05.568Z",
"alt_name": "",
"contact_name": "",
"history": "",
"facts": "",
"ext": {
},
"images": [{
"link": "/media/cat.jpg",
"caption": "Meow!",
"name": "A cat"}],
"videos": [{
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship",
"caption": "Traveling at the speed of light!"}
],
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"hazards": [
{
"name": "Falling Rocks",
"description": "If these fall on you, you're dead.",
"id": 1
},
{
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead.",
"id": 3
},
{
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead.",
"id": 5
}
]
},
{
"id": 5,
"website": "",
"street": "522 NW Spring St",
"city": "Waldport",
"zip": "97394",
"location_description": "Located on Spring St in Waldport",
"lng": -124.066166,
"state": "OR",
"email": "",
"modified": "2014-08-08T23:27:05.568Z",
"description": "Fake Waldport Tuna",
"phone": null,
"lat": 44.427761,
"name": "Waldport Tuna",
"created": "2014-08-08T23:27:05.568Z",
"alt_name": "",
"contact_name": "",
"history": "",
"facts": "",
"ext": {
},
"images": [{
"link": "/media/cat.jpg",
"caption": "Meow!",
"name": "A cat"}],
"videos": [{
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship",
"caption": "Traveling at the speed of light!"}
],
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"hazards": [
{
"name": "Falling Rocks",
"description": "If these fall on you, you're dead.",
"id": 1
},
{
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead.",
"id": 3
},
{
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead.",
"id": 5
}
]
},
{
"city": "Cloverdale",
"website": "",
"modified": "2014-08-08T23:27:05.568Z",
"description": "Fake Pacific City Tuna",
"zip": "97112",
"created": "2014-08-08T23:27:05.568Z",
"alt_name": "",
"contact_name": "",
"history": "",
"facts": "",
"ext": {
},
"location_description": "Located on Roger Ave in Pacific City",
"lng": -123.958093,
"email": "",
"phone": null,
"state": "OR",
"street": "35650 Roger Ave",
"lat": 45.197105,
"id": 7,
"name": "Pacific City Tuna",
"images": [{
"link": "/media/cat.jpg",
"caption": "Meow!",
"name": "A cat"}],
"videos": [{
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship",
"caption": "Traveling at the speed of light!"}
],
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"hazards": [
{
"name": "Falling Rocks",
"description": "If these fall on you, you're dead.",
"id": 1
},
{
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead.",
"id": 3
},
{
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead.",
"id": 5
}
]
}
]
}"""
# All POIs for category 1 This is the return for a bad proximity with
# good location -- the default proximity of 20 miles.
self.expected_cat1_bad_prox = """
{
"error": {
"level": "Warning",
"status": true,
"text": "There was an error finding PointsOfInterest within cat miles",
"name": "Bad proximity",
"debug": "ValueError: invalid literal for int() with base 10: 'cat'"
},
"pointsofinterest": [
{
"videos": [
{
"caption": "Traveling at the speed of light!",
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship"
}
],
"images": [
{
"caption": "Meow!",
"link": "/media/cat.jpg",
"name": "A cat"
}
],
"street": "146 SE Bay Blvd",
"alt_name": "",
"contact_name": "",
"facts": "",
"lng": -124.050122,
"city": "Newport",
"zip": "97365",
"hazards": [
{
"id": 1,
"name": "Falling Rocks",
"description": "If these fall on you, you're dead."
},
{
"id": 3,
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead."
},
{
"id": 5,
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead."
}
],
"location_description": "Located on Bay Blvd in Newport",
"id": 3,
"state": "OR",
"email": "",
"website": "",
"description": "Fake Newport Tuna",
"phone": null,
"lat": 44.631592,
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"name": "Newport Tuna",
"created": "2014-08-08T23:27:05.568Z",
"modified": "2014-08-08T23:27:05.568Z",
"ext": {
},
"history": ""
},
{
"videos": [
{
"caption": "Traveling at the speed of light!",
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship"
}
],
"images": [
{
"caption": "Meow!",
"link": "/media/cat.jpg",
"name": "A cat"
}
],
"street": "522 NW Spring St",
"alt_name": "",
"contact_name": "",
"facts": "",
"lng": -124.066166,
"city": "Waldport",
"zip": "97394",
"hazards": [
{
"id": 1,
"name": "Falling Rocks",
"description": "If these fall on you, you're dead."
},
{
"id": 3,
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead."
},
{
"id": 5,
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead."
}
],
"location_description": "Located on Spring St in Waldport",
"id": 5,
"state": "OR",
"email": "",
"website": "",
"description": "Fake Waldport Tuna",
"phone": null,
"lat": 44.427761,
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"name": "Waldport Tuna",
"created": "2014-08-08T23:27:05.568Z",
"modified": "2014-08-08T23:27:05.568Z",
"ext": {
},
"history": ""
}]
}"""
# All POIs for category 1. This is the return for a bad coordinates.
self.expected_all_missing_long = """
{
"error": {
"level": "Warning",
"status": true,
"name": "Bad location",
"text": "There was an error with the given coordinates -45.232, None",
"debug": "GEOSException: Error encountered checking Geometry returned \
from GEOS C function \\"GEOSWKTReader_read_r\\"."
},
"pointsofinterest": [
{
"id": 1,
"website": "",
"street": "720 SW Broadway",
"city": "Portland",
"zip": "97204",
"location_description": "Located on Broadway in Portland",
"lng": -122.67963,
"state": "OR",
"email": "",
"modified": "2014-08-08T23:27:05.568Z",
"description": "Fake Portland Tuna",
"phone": null,
"lat": 45.518962,
"name": "Portland Tuna",
"created": "2014-08-08T23:27:05.568Z",
"alt_name": "",
"contact_name": "",
"history": "",
"facts": "",
"ext": {
},
"images": [{
"link": "/media/cat.jpg",
"caption": "Meow!",
"name": "A cat"}],
"videos": [{
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship",
"caption": "Traveling at the speed of light!"}
],
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"hazards": [
{
"name": "Falling Rocks",
"description": "If these fall on you, you're dead.",
"id": 1
},
{
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead.",
"id": 3
},
{
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead.",
"id": 5
}
]
},
{
"id": 3,
"website": "",
"street": "146 SE Bay Blvd",
"city": "Newport",
"zip": "97365",
"location_description": "Located on Bay Blvd in Newport",
"lng": -124.050122,
"state": "OR",
"email": "",
"modified": "2014-08-08T23:27:05.568Z",
"description": "Fake Newport Tuna",
"phone": null,
"lat": 44.631592,
"name": "Newport Tuna",
"created": "2014-08-08T23:27:05.568Z",
"alt_name": "",
"contact_name": "",
"history": "",
"facts": "",
"ext": {
},
"images": [{
"link": "/media/cat.jpg",
"caption": "Meow!",
"name": "A cat"}],
"videos": [{
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship",
"caption": "Traveling at the speed of light!"}
],
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"hazards": [
{
"name": "Falling Rocks",
"description": "If these fall on you, you're dead.",
"id": 1
},
{
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead.",
"id": 3
},
{
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead.",
"id": 5
}
]
},
{
"id": 5,
"website": "",
"street": "522 NW Spring St",
"city": "Waldport",
"zip": "97394",
"location_description": "Located on Spring St in Waldport",
"lng": -124.066166,
"state": "OR",
"email": "",
"modified": "2014-08-08T23:27:05.568Z",
"description": "Fake Waldport Tuna",
"phone": null,
"lat": 44.427761,
"name": "Waldport Tuna",
"created": "2014-08-08T23:27:05.568Z",
"alt_name": "",
"contact_name": "",
"history": "",
"facts": "",
"ext": {
},
"images": [{
"link": "/media/cat.jpg",
"caption": "Meow!",
"name": "A cat"}],
"videos": [{
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship",
"caption": "Traveling at the speed of light!"}
],
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"hazards": [
{
"name": "Falling Rocks",
"description": "If these fall on you, you're dead.",
"id": 1
},
{
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead.",
"id": 3
},
{
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead.",
"id": 5
}
]
},
{
"city": "Cloverdale",
"website": "",
"modified": "2014-08-08T23:27:05.568Z",
"description": "Fake Pacific City Tuna",
"zip": "97112",
"created": "2014-08-08T23:27:05.568Z",
"alt_name": "",
"contact_name": "",
"history": "",
"facts": "",
"ext": {
},
"location_description": "Located on Roger Ave in Pacific City",
"lng": -123.958093,
"email": "",
"phone": null,
"state": "OR",
"street": "35650 Roger Ave",
"lat": 45.197105,
"id": 7,
"name": "Pacific City Tuna",
"images": [{
"link": "/media/cat.jpg",
"caption": "Meow!",
"name": "A cat"}],
"videos": [{
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship",
"caption": "Traveling at the speed of light!"}
],
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"hazards": [
{
"name": "Falling Rocks",
"description": "If these fall on you, you're dead.",
"id": 1
},
{
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead.",
"id": 3
},
{
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead.",
"id": 5
}
]
}
]
}"""
# All POIs with cat1. This is the return for a bad coordinates.
self.expected_all_missing_lat = """
{
"error": {
"level": "Warning",
"status": true,
"name": "Bad location",
"text": "There was an error with the given coordinates None, -45.232",
"debug": "GEOSException: Error encountered checking Geometry \
returned from GEOS C function \\"GEOSWKTReader_read_r\\"."
},
"pointsofinterest": [
{
"id": 1,
"website": "",
"street": "720 SW Broadway",
"city": "Portland",
"zip": "97204",
"location_description": "Located on Broadway in Portland",
"lng": -122.67963,
"state": "OR",
"email": "",
"modified": "2014-08-08T23:27:05.568Z",
"description": "Fake Portland Tuna",
"phone": null,
"lat": 45.518962,
"name": "Portland Tuna",
"created": "2014-08-08T23:27:05.568Z",
"alt_name": "",
"contact_name": "",
"history": "",
"facts": "",
"ext": {
},
"images": [{
"link": "/media/cat.jpg",
"caption": "Meow!",
"name": "A cat"}],
"videos": [{
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship",
"caption": "Traveling at the speed of light!"}
],
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"hazards": [
{
"name": "Falling Rocks",
"description": "If these fall on you, you're dead.",
"id": 1
},
{
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead.",
"id": 3
},
{
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead.",
"id": 5
}
]
},
{
"id": 3,
"website": "",
"street": "146 SE Bay Blvd",
"city": "Newport",
"zip": "97365",
"location_description": "Located on Bay Blvd in Newport",
"lng": -124.050122,
"state": "OR",
"email": "",
"modified": "2014-08-08T23:27:05.568Z",
"description": "Fake Newport Tuna",
"phone": null,
"lat": 44.631592,
"name": "Newport Tuna",
"created": "2014-08-08T23:27:05.568Z",
"alt_name": "",
"contact_name": "",
"history": "",
"facts": "",
"ext": {
},
"images": [{
"link": "/media/cat.jpg",
"caption": "Meow!",
"name": "A cat"}],
"videos": [{
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship",
"caption": "Traveling at the speed of light!"}
],
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"hazards": [
{
"name": "Falling Rocks",
"description": "If these fall on you, you're dead.",
"id": 1
},
{
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead.",
"id": 3
},
{
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead.",
"id": 5
}
]
},
{
"id": 5,
"website": "",
"street": "522 NW Spring St",
"city": "Waldport",
"zip": "97394",
"location_description": "Located on Spring St in Waldport",
"lng": -124.066166,
"state": "OR",
"email": "",
"modified": "2014-08-08T23:27:05.568Z",
"description": "Fake Waldport Tuna",
"phone": null,
"lat": 44.427761,
"name": "Waldport Tuna",
"created": "2014-08-08T23:27:05.568Z",
"alt_name": "",
"contact_name": "",
"history": "",
"facts": "",
"ext": {
},
"images": [{
"link": "/media/cat.jpg",
"caption": "Meow!",
"name": "A cat"}],
"videos": [{
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship",
"caption": "Traveling at the speed of light!"}
],
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"hazards": [
{
"name": "Falling Rocks",
"description": "If these fall on you, you're dead.",
"id": 1
},
{
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead.",
"id": 3
},
{
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead.",
"id": 5
}
]
},
{
"city": "Cloverdale",
"website": "",
"modified": "2014-08-08T23:27:05.568Z",
"description": "Fake Pacific City Tuna",
"zip": "97112",
"created": "2014-08-08T23:27:05.568Z",
"alt_name": "",
"contact_name": "",
"history": "",
"facts": "",
"ext": {
},
"location_description": "Located on Roger Ave in Pacific City",
"lng": -123.958093,
"email": "",
"phone": null,
"state": "OR",
"street": "35650 Roger Ave",
"lat": 45.197105,
"id": 7,
"name": "Pacific City Tuna",
"images": [{
"link": "/media/cat.jpg",
"caption": "Meow!",
"name": "A cat"}],
"videos": [{
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g",
"name": "A Starship",
"caption": "Traveling at the speed of light!"}
],
"categories": [
{
"category": "Cool Stuff",
"id": 1
},
{
"category": "Fish Shops",
"id": 3
},
{
"category": "Large Obelisks",
"id": 5
}
],
"hazards": [
{
"name": "Falling Rocks",
"description": "If these fall on you, you're dead.",
"id": 1
},
{
"name": "Falling Rocks 3",
"description": "If these fall on you, you're dead.",
"id": 3
},
{
"name": "Falling Rocks 5",
"description": "If these fall on you, you're dead.",
"id": 5
}
]
}
]
}"""
def test_no_pois_nearby_poi_categories(self):
"""
Test that, when there are no pois, we get an empty list back for the
pois/categories endpoint.
"""
no_poi_data = json.loads(
self.client.get(
'%s?lat=44.015225&lng=-123.016873' % reverse(
'pois-categories', kwargs={'id': '1'})).content)
expected_answer = json.loads(self.expected_no_pois)
self.assertEqual(no_poi_data, expected_answer)
def test_successful_location_by_poi_category(self):
"""
Test that good parameters return poi/category results ordered by
location. There will also be a default limit of 20 miles.
"""
response_cat1 = json.loads(self.client.get(
'%s?lat=44.609079&lng=-124.052538' % reverse('pois-categories',
kwargs={'id': '1'})
).content)
expected_answer = json.loads(self.expected_cat1)
self.assertEqual(response_cat1, expected_answer)
def test_good_limit_by_poi_category(self):
"""
Test that good parameters return poi/category results ordered by
location. There will also be a default limit of 20 miles.
"""
response_cat1_limit = json.loads(self.client.get(
'%s?lat=44.609079&lng=-124.052538&limit=1' % reverse(
'pois-categories', kwargs={'id': '1'})).content)
expected_answer = json.loads(self.expected_cat1_limit_1)
self.assertEqual(response_cat1_limit, expected_answer)
def test_limit_larger_than_length_all_categories(self):
"""
Test that a limit larger than the length of the list does not
affect the list.
"""
response_cat1 = json.loads(self.client.get(
'%s?lat=44.609079&lng=-124.052538&limit=10' % reverse(
'pois-categories', kwargs={'id': '1'})).content)
expected_answer = json.loads(self.expected_cat1)
self.assertEqual(response_cat1, expected_answer)
def test_bad_limit_by_poi_category(self):
"""
Test that good parameters return poi/category results ordered by
location. There will also be a default limit of 20 miles.
"""
response_cat1_limit = json.loads(self.client.get(
'%s?lat=44.609079&lng=-124.052538&limit=cat' % reverse(
'pois-categories', kwargs={'id': '1'})).content)
expected_answer = json.loads(self.expected_cat1_bad_limit)
self.assertEqual(response_cat1_limit, expected_answer)
def test_bad_location_parameters_poi_categories(self):
"""
Test that only one parameter (only lat/only long) returns a Warning,
and that bad parameter values (text) return Warning, for the
pois/categories endpoint.
"""
# Coordinates are not numbers
broken_data = json.loads(self.client.get(
'%s?lat=not_a_latitude&lng=not_a_longitude' % reverse(
'pois-categories', kwargs={'id': '1'})).content)
expected_answer = json.loads(self.expected_all_pois_cat1)
self.assertEqual(broken_data, expected_answer)
# lat is missing
broken_data = json.loads(self.client.get(
'%s?lng=-45.232' % reverse(
'pois-categories', kwargs={'id': '1'})).content)
expected_answer = json.loads(self.expected_all_missing_lat)
self.assertEqual(broken_data, expected_answer)
# long is missing
broken_data = json.loads(self.client.get(
'%s?lat=-45.232' % reverse(
'pois-categories', kwargs={'id': '1'})).content)
expected_answer = json.loads(self.expected_all_missing_long)
self.assertEqual(broken_data, expected_answer)
def test_successful_location_by_poi_category_extended_proximity(self):
"""
Test that good parameters return poi/category results ordered by
location, with an extended proximity of 50 miles. This will include
the Pacific City location.
"""
response_cat1_extended = json.loads(self.client.get(
'%s?lat=44.609079&lng=-124.052538'
'&proximity=50' % reverse(
'pois-categories', kwargs={'id': '1'})).content)
expected_answer = json.loads(self.expected_cat1_extended)
self.assertEqual(response_cat1_extended, expected_answer)
def test_proximity_bad_location_poi_categories(self):
"""
Test that bad location returns a Warning.
"""
# Good proximity, bad location
broken_data = json.loads(self.client.get(
'%s?lat=not_a_latitude&lng=not_a_longitude&proximity=50' % reverse(
'pois-categories', kwargs={'id': '1'})).content)
expected_answer = json.loads(self.expected_all_pois_cat1)
self.assertEqual(broken_data, expected_answer)
def test_bad_proximity_good_location_poi_categories(self):
"""
Test that bad proximity returns a Warning.
"""
broken_data = json.loads(self.client.get(
'%s?lat=44.609079&lng=-124.052538&proximity=cat' % reverse(
'pois-categories', kwargs={'id': '1'})).content)
expected_answer = json.loads(self.expected_cat1_bad_prox)
self.assertEqual(broken_data, expected_answer)
| apache-2.0 | -1,015,574,004,897,092,500 | 22.341253 | 79 | 0.495512 | false |
Didacti/elixir | tests/test_sa_integration.py | 1 | 3367 | """
test integrating Elixir entities with plain SQLAlchemy defined classes
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
from sqlalchemy.orm import *
from sqlalchemy import *
from elixir import *
class TestSQLAlchemyToElixir(object):
def setup(self):
metadata.bind = "sqlite://"
def teardown(self):
cleanup_all(True)
def test_simple(self):
class A(Entity):
name = Field(String(60))
# Remember the entity need to be setup before you can refer to it from
# SQLAlchemy.
setup_all(True)
b_table = Table('b', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(60)),
Column('a_id', Integer, ForeignKey(A.id))
)
b_table.create()
class B(object):
pass
mapper(B, b_table, properties={
'a': relation(A)
})
b1 = B()
b1.name = 'b1'
b1.a = A(name='a1')
session.add(b1)
session.commit()
session.close()
b = session.query(B).one()
assert b.a.name == 'a1'
class TestElixirToSQLAlchemy(object):
def setup(self):
metadata.bind = "sqlite://"
def teardown(self):
cleanup_all(True)
def test_m2o(self):
a_table = Table('a', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(60)),
)
a_table.create()
class A(object):
pass
mapper(A, a_table)
class B(Entity):
name = Field(String(60))
a = ManyToOne(A)
setup_all(True)
a1 = A()
a1.name = 'a1'
b1 = B(name='b1', a=a1)
session.add(b1)
session.commit()
session.close()
b = B.query.one()
assert b.a.name == 'a1'
def test_m2o_non_pk_target(self):
a_table = Table('a', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(60), unique=True)
)
a_table.create()
class A(object):
pass
mapper(A, a_table)
class B(Entity):
name = Field(String(60))
a = ManyToOne(A, target_column=['name'])
# currently fails
# c = ManyToOne('C', target_column=['id', 'name'])
# class C(Entity):
# name = Field(String(60), unique=True)
setup_all(True)
a1 = A()
a1.name = 'a1'
b1 = B(name='b1', a=a1)
session.commit()
session.close()
b = B.query.one()
assert b.a.name == 'a1'
# def test_m2m(self):
# a_table = Table('a', metadata,
# Column('id', Integer, primary_key=True),
# Column('name', String(60), unique=True)
# )
# a_table.create()
#
# class A(object):
# pass
#
# mapper(A, a_table)
#
# class B(Entity):
# name = Field(String(60))
# many_a = ManyToMany(A)
#
# setup_all(True)
#
# a1 = A()
# a1.name = 'a1'
# b1 = B(name='b1', many_a=[a1])
#
# session.commit()
# session.close()
#
# b = B.query.one()
#
# assert b.many_a[0].name == 'a1'
| mit | 431,422,153,868,239,550 | 20.722581 | 78 | 0.501931 | false |
bmng-dev/PyBitmessage | src/helper_sql.py | 1 | 1747 | import Queue
import threading
sqlSubmitQueue = Queue.Queue() #SQLITE3 is so thread-unsafe that they won't even let you call it from different threads using your own locks. SQL objects can only be called from one thread.
sqlReturnQueue = Queue.Queue()
sqlLock = threading.Lock()
def sqlQuery(sqlStatement, *args):
sqlLock.acquire()
sqlSubmitQueue.put(sqlStatement)
if args == ():
sqlSubmitQueue.put('')
elif type(args[0]) in [list, tuple]:
sqlSubmitQueue.put(args[0])
else:
sqlSubmitQueue.put(args)
queryreturn, rowcount = sqlReturnQueue.get()
sqlLock.release()
return queryreturn
def sqlExecute(sqlStatement, *args):
sqlLock.acquire()
sqlSubmitQueue.put(sqlStatement)
if args == ():
sqlSubmitQueue.put('')
else:
sqlSubmitQueue.put(args)
queryreturn, rowcount = sqlReturnQueue.get()
sqlSubmitQueue.put('commit')
sqlLock.release()
return rowcount
def sqlStoredProcedure(procName):
sqlLock.acquire()
sqlSubmitQueue.put(procName)
sqlLock.release()
class SqlBulkExecute:
def __enter__(self):
sqlLock.acquire()
return self
def __exit__(self, type, value, traceback):
sqlSubmitQueue.put('commit')
sqlLock.release()
def execute(self, sqlStatement, *args):
sqlSubmitQueue.put(sqlStatement)
if args == ():
sqlSubmitQueue.put('')
else:
sqlSubmitQueue.put(args)
sqlReturnQueue.get()
def query(self, sqlStatement, *args):
sqlSubmitQueue.put(sqlStatement)
if args == ():
sqlSubmitQueue.put('')
else:
sqlSubmitQueue.put(args)
return sqlReturnQueue.get()
| mit | 7,283,002,600,752,957,000 | 24.318841 | 189 | 0.637092 | false |
joshfalter/alexa-zillow-py | zillow.py | 1 | 2977 | import logging
from pyzillow.pyzillow import ZillowWrapper, GetDeepSearchResults
from flask import Flask
from flask_ask import Ask, request, session, question, statement
zillow_data=ZillowWrapper('YOUR ZILLOW WEBSERVICES ID')
app = Flask(__name__)
ask = Ask(app, '/')
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
ADDRESS_KEY= "ADDRESS"
ZIP_KEY="ZIPCODE"
@ask.launch
def launch():
speech_text = 'Welcome to Zillow. What address would you like to know more about?'
reprompt_speech_text = 'What address would you like to know more about?'
return question(speech_text).reprompt(reprompt_speech_text).simple_card('Launch', speech_text)
@ask.intent('LookUpAddressIntent', mapping={'location': 'Address'})
def change_address(location):
address=location
session.attributes[ADDRESS_KEY] = address
speech_text = 'Okay. What\'s the zipcode for that address?'
return question(speech_text).reprompt(speech_text).simple_card('Address changed to:', address)
@ask.intent('ZipcodeIntent', mapping={'zip': 'Zipcode'})
def change_zip(zip):
zipcode=zip
session.attributes[ZIP_KEY] = zipcode
speech_text = 'Great, now what would you like to know? Say help for a list of commands.'
return question(speech_text).reprompt(speech_text).simple_card('Zip code changed to:', zipcode)
def search_results():
address = session.attributes.get(ADDRESS_KEY)
zipcode = session.attributes.get(ZIP_KEY)
deep_search_response=zillow_data.get_deep_search_results(address, zipcode)
result=GetDeepSearchResults(deep_search_response)
return result
#search with zillow api
#In case they want to stop
@ask.intent('NoIntent')
def no():
speech_text = 'Goodbye.'
return statement(speech_text).simple_card('Goodbye.', speech_text)
@ask.intent('ValueofHomeIntent')
def value_of_home():
speech_text = 'The z-estimate value of your house is $' + search_results().zestimate_amount +' What else would you like to know?'
return question(speech_text).simple_card('Zestimate of Home', speech_text)
@ask.intent('LatitudeIntent')
def latitude_of_home():
speech_text = 'The latitude of the house is ' + search_results().latitude + ' degrees. What else would you like to know?'
return question(speech_text).simple_card('Zestimate of Home', speech_text)
@ask.intent('AMAZON.HelpIntent')
def help():
speech_text = 'I can tell you the following about an address. Latitude, longitude, coordinates, tax value, the year it was built, property size, home size, number of bedrooms and bathrooms, the last year it was sold, the date it was sold, the price it sold for, the z estimate, and the valuation range.'
return question(speech_text).reprompt(speech_text).simple_card('Help: Commands ', speech_text)
@ask.session_ended
def session_ended():
return "", 200
if __name__ == '__main__':
app.run(debug=True)
| mit | -6,332,835,460,785,553,000 | 34.753086 | 307 | 0.701713 | false |
gergo-/pylibjit | examples/classes.py | 1 | 2076 | # pylibjit can be used to statically resolve method calls in object-oriented
# code. A call from a compiled method to a compiled method (in the same or
# in a different class) bypasses all dynamic lookups if the dynamic type of
# the receiver is declared. Note that all instances at runtime must be of
# exactly the declared type, *not* a subclass.
import jit, pylibjit
class Foo:
@pylibjit.compile(return_type=object,
argument_types=[object],
dump_code=False)
def hello(self):
print('hi there from Foo')
# The call self.hello() will be statically bound to Foo.hello because
# - say_hello is compiled;
# - the receiver (self) is declared to be an instance of exactly this
# class (Foo); and
# - Foo.hello is compiled.
# The use of pylibjit.this_class() is a hack that is necessary because the
# name Foo is not visible in the global namespace at this point, only
# after the class definition is finished.
@pylibjit.compile(return_type=object,
argument_types=[pylibjit.this_class()],
dump_code=False)
def say_hello(self):
print('will call Foo.hello')
self.hello()
class Bar:
# This call to Foo.hello will be resolved statically, as above.
@pylibjit.compile(return_type=object,
argument_types=[object, Foo],
dump_code=False)
def say_hello_foo(self, some_foo_object):
print('will call Foo.hello from Bar')
some_foo_object.hello()
def hello(self):
print('hello from Bar')
# This looks like Foo.say_hello, but the call to Bar.hello will *not* be
# resolved statically because Bar.hello is not a compiled function.
@pylibjit.compile(return_type=object,
argument_types=[pylibjit.this_class()],
dump_code=False)
def say_hello(self):
print('will call Bar.hello')
self.hello()
foo = Foo()
bar = Bar()
foo.say_hello()
bar.say_hello_foo(foo)
bar.say_hello()
| gpl-2.0 | 6,963,909,362,068,810,000 | 35.421053 | 78 | 0.632466 | false |
libyal/esedb-kb | tests/catalog_extractor.py | 1 | 2077 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the ESE database catalog extractor."""
import unittest
from esedbrc import catalog_extractor
from tests import test_lib
class TestOutputWriter(object):
"""Test output writer."""
def Close(self):
"""Closes the output writer object."""
return
def Open(self, database_type): # pylint: disable=unused-argument
"""Opens the output writer object.
Args:
database_type (str): ESE database type.
Returns:
bool: True if successful or False if not.
"""
return True
def WriteDatabaseDefinition(self, database_definition): # pylint: disable=unused-argument
"""Writes the database definition.
Args:
database_definition (EseDatabaseDefinition): database definition.
"""
return
def WriteTableDefinitions(self, table_definitions): # pylint: disable=unused-argument
"""Writes the table definitions.
Args:
table_definitions (list[EseTableDefinition]): table definitions.
"""
return
class EseDbCatalogExtractorTest(test_lib.BaseTestCase):
"""Tests for the ESE database catalog extractor."""
# pylint: disable=protected-access
def testInitialize(self):
"""Tests the __init__ function."""
test_extractor = catalog_extractor.EseDbCatalogExtractor()
self.assertIsNotNone(test_extractor)
def testDetermineDatabaseType(self):
"""Tests the _DetermineDatabaseType function."""
test_extractor = catalog_extractor.EseDbCatalogExtractor()
database_type = test_extractor._DetermineDatabaseType([
'SystemIndex_0A', 'SystemIndex_Gthr'])
self.assertEqual(database_type, 'search')
def testExtractCatalog(self):
"""Tests the ExtractCatalog function."""
test_file_path = self._GetTestFilePath(['WebCacheV01.dat'])
self._SkipIfPathNotExists(test_file_path)
test_extractor = catalog_extractor.EseDbCatalogExtractor()
test_output_writer = TestOutputWriter()
test_extractor.ExtractCatalog(test_file_path, test_output_writer)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 9,209,506,612,418,782,000 | 26.328947 | 92 | 0.705826 | false |
BurtBiel/azure-cli | scripts/smoke_test_install/test_install_linux.py | 1 | 8694 | #---------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#---------------------------------------------------------------------------------------------
from _common import (create_vm,
install_cli_interactive,
verify_basic,
verify_tab_complete,
AZURE_CLI_PACKAGE_VERSION_PREV)
## Ubuntu 14.04 LTS
class Ubuntu1404Base(object):
@classmethod
def setUpClass(cls):
cls.vm = create_vm(cls.__name__, 'Canonical:UbuntuServer:14.04.4-LTS:latest')
cls.vm(['sudo', 'apt-get', 'update'])
cls.vm(['sudo', 'apt-get', 'install', '-y', 'libssl-dev', 'libffi-dev'])
cls.vm(['sudo', 'apt-get', 'install', '-y', 'python-dev'])
class TestUbuntu1404_global(Ubuntu1404Base):
def test(self):
install_cli_interactive(self.vm, sudo=True)
verify_basic(self.vm)
class TestUbuntu1404_local(Ubuntu1404Base):
def test(self):
install_cli_interactive(self.vm, install_directory='~/myaz', exec_directory='~/myaz', tab_completion_ans='n', sudo=False)
verify_basic(self.vm, az='~/myaz/az')
class TestUbuntu1404_global_tab(Ubuntu1404Base):
def test(self):
install_cli_interactive(self.vm, install_directory=None, exec_directory=None, tab_completion_ans='y', sudo=True)
verify_basic(self.vm)
verify_tab_complete(self.vm)
class TestUbuntu1404_b2b(Ubuntu1404Base):
def test(self):
install_cli_interactive(self.vm, sudo=True, nightly_version=AZURE_CLI_PACKAGE_VERSION_PREV)
verify_basic(self.vm)
install_cli_interactive(self.vm, sudo=True)
verify_basic(self.vm)
# Ubuntu 12.04 LTS
class Ubuntu1204Base(object):
@classmethod
def setUpClass(cls):
cls.vm = create_vm(cls.__name__, 'Canonical:UbuntuServer:12.04.5-LTS:12.04.201605160')
cls.vm(['sudo', 'apt-get', 'update'])
cls.vm(['sudo', 'apt-get', 'install', '-y', 'libssl-dev', 'libffi-dev'])
cls.vm(['sudo', 'apt-get', 'install', '-y', 'python-dev'])
class TestUbuntu1204_global(Ubuntu1204Base):
def test(self):
install_cli_interactive(self.vm, sudo=True)
verify_basic(self.vm)
class TestUbuntu1204_local(Ubuntu1204Base):
def test(self):
install_cli_interactive(self.vm, install_directory='~/myaz', exec_directory='~/myaz', tab_completion_ans='n', sudo=False)
verify_basic(self.vm, az='~/myaz/az')
# Ubuntu 15.10
class Ubuntu1510Base(object):
@classmethod
def setUpClass(cls):
cls.vm = create_vm(cls.__name__, 'Canonical:UbuntuServer:15.10:15.10.201605160')
cls.vm(['sudo', 'apt-get', 'update'])
cls.vm(['sudo', 'apt-get', 'install', '-y', 'libssl-dev', 'libffi-dev'])
cls.vm(['sudo', 'apt-get', 'install', '-y', 'python-dev'])
cls.vm(['sudo', 'apt-get', 'install', '-y', 'build-essential'])
class TestUbuntu1510_global(Ubuntu1510Base):
def test(self):
install_cli_interactive(self.vm, sudo=True)
verify_basic(self.vm)
class TestUbuntu1510_local(Ubuntu1510Base):
def test(self):
install_cli_interactive(self.vm, install_directory='~/myaz', exec_directory='~/myaz', tab_completion_ans='n', sudo=False)
verify_basic(self.vm, az='~/myaz/az')
# Ubuntu 16.04 LTS
class Ubuntu1604Base(object):
@classmethod
def setUpClass(cls):
cls.vm = create_vm(cls.__name__, 'Canonical:UbuntuServer:16.04.0-LTS:16.04.201605161')
cls.vm(['sudo', 'apt-get', 'update'])
cls.vm(['sudo', 'apt-get', 'install', '-y', 'libssl-dev', 'libffi-dev'])
cls.vm(['sudo', 'apt-get', 'install', '-y', 'python-dev'])
cls.vm(['sudo', 'apt-get', 'install', '-y', 'build-essential'])
class TestUbuntu1604_global(Ubuntu1604Base):
def test(self):
install_cli_interactive(self.vm, sudo=True)
verify_basic(self.vm)
class TestUbuntu1604_local(Ubuntu1604Base):
def test(self):
install_cli_interactive(self.vm, install_directory='~/myaz', exec_directory='~/myaz', tab_completion_ans='n', sudo=False)
verify_basic(self.vm, az='~/myaz/az')
# centos 7.1
class Centos71Base(object):
@classmethod
def setUpClass(cls):
cls.vm = create_vm(cls.__name__, 'OpenLogic:CentOS:7.1:7.1.20160308')
cls.vm(['sudo', 'yum', 'check-update'], _ok_code=[0, 100])
cls.vm(['sudo', 'yum', 'install', '-y', 'gcc', 'libffi-devel', 'python-devel', 'openssl-devel'])
class TestCentos71_global(Centos71Base):
def test(self):
install_cli_interactive(self.vm, sudo=True)
verify_basic(self.vm)
class TestCentos71_local(Centos71Base):
def test(self):
install_cli_interactive(self.vm, install_directory='~/myaz', exec_directory='~/myaz', tab_completion_ans='n', sudo=False)
verify_basic(self.vm, az='~/myaz/az')
# centos 7.2
class Centos72Base(object):
@classmethod
def setUpClass(cls):
cls.vm = create_vm(cls.__name__, 'OpenLogic:CentOS:7.2:7.2.20160308')
cls.vm(['sudo', 'yum', 'check-update'], _ok_code=[0, 100])
cls.vm(['sudo', 'yum', 'install', '-y', 'gcc', 'libffi-devel', 'python-devel', 'openssl-devel'])
class TestCentos72_global(Centos72Base):
def test(self):
install_cli_interactive(self.vm, sudo=True)
verify_basic(self.vm)
class TestCentos72_local(Centos72Base):
def test(self):
install_cli_interactive(self.vm, install_directory='~/myaz', exec_directory='~/myaz', tab_completion_ans='n', sudo=False)
verify_basic(self.vm, az='~/myaz/az')
# debian 8
class Debian8Base(object):
@classmethod
def setUpClass(cls):
cls.vm = create_vm(cls.__name__, 'credativ:Debian:8:8.0.201604200')
cls.vm(['sudo', 'apt-get', 'update'])
cls.vm(['sudo', 'apt-get', 'install', '-y', 'curl'])
cls.vm(['sudo', 'apt-get', 'install', '-y', 'libssl-dev', 'libffi-dev', 'python-dev'])
cls.vm(['sudo', 'apt-get', 'install', '-y', 'build-essential'])
class TestDebian8_global(Debian8Base):
def test(self):
install_cli_interactive(self.vm, sudo=True)
verify_basic(self.vm)
class TestDebian8_local(Debian8Base):
def test(self):
install_cli_interactive(self.vm, install_directory='~/myaz', exec_directory='~/myaz', tab_completion_ans='n', sudo=False)
verify_basic(self.vm, az='~/myaz/az')
# debian 7
class Debian7Base(object):
@classmethod
def setUpClass(cls):
cls.vm = create_vm(cls.__name__, 'credativ:Debian:7:7.0.201604200')
cls.vm(['sudo', 'apt-get', 'update'])
cls.vm(['sudo', 'apt-get', 'install', '-y', 'curl'])
cls.vm(['sudo', 'apt-get', 'install', '-y', 'libssl-dev', 'libffi-dev', 'python-dev'])
class TestDebian7_global(Debian7Base):
def test(self):
install_cli_interactive(self.vm, sudo=True)
verify_basic(self.vm)
class TestDebian7_local(Debian7Base):
def test(self):
install_cli_interactive(self.vm, install_directory='~/myaz', exec_directory='~/myaz', tab_completion_ans='n', sudo=False)
verify_basic(self.vm, az='~/myaz/az')
# RedHat RHEL 7.2
class RHEL72Base(object):
@classmethod
def setUpClass(cls):
cls.vm = create_vm(cls.__name__, 'RedHat:RHEL:7.2:7.2.20160302')
cls.vm(['sudo', 'yum', 'check-update'], _ok_code=[0, 100])
cls.vm(['sudo', 'yum', 'install', '-y', 'gcc', 'libffi-devel', 'python-devel', 'openssl-devel'])
class TestRHEL72_global(RHEL72Base):
def test(self):
install_cli_interactive(self.vm, sudo=True)
verify_basic(self.vm)
class TestRHEL72_local(RHEL72Base):
def test(self):
install_cli_interactive(self.vm, install_directory='~/myaz', exec_directory='~/myaz', tab_completion_ans='n', sudo=False)
verify_basic(self.vm, az='~/myaz/az')
# SUSE OpenSUSE 13.2
class SUSE132Base(object):
@classmethod
def setUpClass(cls):
cls.vm = create_vm(cls.__name__, 'SUSE:openSUSE:13.2:2016.03.02')
cls.vm(['sudo', 'zypper', 'refresh'])
cls.vm(['sudo', 'zypper', '--non-interactive', 'install', 'gcc', 'libffi-devel', 'python-devel', 'openssl-devel'])
class TestSUSE132_global(SUSE132Base):
def test(self):
install_cli_interactive(self.vm, sudo=True)
verify_basic(self.vm)
class TestSUSE132_local(SUSE132Base):
def test(self):
install_cli_interactive(self.vm, install_directory='~/myaz', exec_directory='~/myaz', tab_completion_ans='n', sudo=False)
verify_basic(self.vm, az='~/myaz/az')
| mit | 9,013,378,225,437,108,000 | 37.986547 | 129 | 0.615597 | false |
esacosta/u-mooc | models/courses.py | 1 | 67128 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and methods for managing Courses."""
__author__ = 'Pavel Simakov ([email protected])'
import copy
from datetime import datetime
import logging
import os
import pickle
import sys
import appengine_config
from common.schema_fields import FieldRegistry
from common.schema_fields import SchemaField
import common.tags
from tools import verify
import yaml
from models import MemcacheManager
import progress
import review
import transforms
import vfs
COURSE_MODEL_VERSION_1_2 = '1.2'
COURSE_MODEL_VERSION_1_3 = '1.3'
# Date format string for validating input in ISO 8601 format without a
# timezone. All such strings are assumed to refer to UTC datetimes.
# Example: '2013-03-21 13:00'
ISO_8601_DATE_FORMAT = '%Y-%m-%d %H:%M'
def deep_dict_merge(real_values_dict, default_values_dict):
"""Merges default and real value dictionaries recursively."""
def _deep_merge(real_values, default_values):
"""Updates real with default values recursively."""
# Recursively merge dictionaries.
for key, value in real_values.items():
default_value = default_values.get(key)
if (default_value and isinstance(
value, dict) and isinstance(default_value, dict)):
_deep_merge(value, default_value)
# Copy over other values.
for key, value in default_values.items():
if not key in real_values:
real_values[key] = value
result = {}
if real_values_dict:
result = copy.deepcopy(real_values_dict)
_deep_merge(result, default_values_dict)
return result
# Here are the defaults for a new course.
DEFAULT_COURSE_YAML_DICT = {
'course': {
'title': 'UNTITLED COURSE',
'locale': 'en_US',
'main_image': {},
'now_available': False},
'base': {
'show_gplus_button': True},
'institution': {
'logo': {},
'url': ''},
'preview': {},
'unit': {},
'reg_form': {
'can_register': True,
'additional_registration_fields': (
'<!-- reg_form.additional_registration_fields -->')}
}
# Here are the defaults for an existing course.
DEFAULT_EXISTING_COURSE_YAML_DICT = deep_dict_merge(
{'course': {
'now_available': True}},
DEFAULT_COURSE_YAML_DICT)
# Here is the default course.yaml for a new course.
EMPTY_COURSE_YAML = u"""# my new course.yaml
course:
title: 'New Course by %s'
now_available: False
"""
# Here are the default assessment weights corresponding to the sample course.
DEFAULT_LEGACY_ASSESSMENT_WEIGHTS = {'Pre': 0, 'Mid': 30, 'Fin': 70}
# Indicates that an assessment is graded automatically.
AUTO_GRADER = 'auto'
# Indicates that an assessment is graded by a human.
HUMAN_GRADER = 'human'
# Allowed graders.
ALLOWED_GRADERS = [AUTO_GRADER, HUMAN_GRADER]
# Keys in unit.workflow (when it is converted to a dict).
GRADER_KEY = 'grader'
MATCHER_KEY = 'matcher'
SUBMISSION_DUE_DATE_KEY = 'submission_due_date'
REVIEW_DUE_DATE_KEY = 'review_due_date'
REVIEW_MIN_COUNT_KEY = 'review_min_count'
REVIEW_WINDOW_MINS_KEY = 'review_window_mins'
DEFAULT_REVIEW_MIN_COUNT = 2
DEFAULT_REVIEW_WINDOW_MINS = 60
# Keys specific to human-graded assessments.
HUMAN_GRADED_ASSESSMENT_KEY_LIST = [
MATCHER_KEY, REVIEW_MIN_COUNT_KEY, REVIEW_WINDOW_MINS_KEY,
SUBMISSION_DUE_DATE_KEY, REVIEW_DUE_DATE_KEY
]
# The name for the peer review assessment used in the sample v1.2 CSV file.
# This is here so that a peer review assessment example is available when
# U-MOOC loads with the sample course. However, in general, peer
# review assessments should only be specified in U-MOOC v1.4 or
# later (via the web interface).
LEGACY_REVIEW_ASSESSMENT = 'ReviewAssessmentExample'
# This value is the default workflow for assessment grading,
DEFAULT_AUTO_GRADER_WORKFLOW = yaml.safe_dump({
GRADER_KEY: AUTO_GRADER
}, default_flow_style=False)
# This value is meant to be used only for the human-reviewed assessments in the
# sample v1.2 Power Searching course.
LEGACY_HUMAN_GRADER_WORKFLOW = yaml.safe_dump({
GRADER_KEY: HUMAN_GRADER,
MATCHER_KEY: review.PEER_MATCHER,
SUBMISSION_DUE_DATE_KEY: '2014-03-14 12:00',
REVIEW_DUE_DATE_KEY: '2014-03-21 12:00',
REVIEW_MIN_COUNT_KEY: DEFAULT_REVIEW_MIN_COUNT,
REVIEW_WINDOW_MINS_KEY: DEFAULT_REVIEW_WINDOW_MINS,
}, default_flow_style=False)
def is_editable_fs(app_context):
return isinstance(app_context.fs.impl, vfs.DatastoreBackedFileSystem)
def copy_attributes(source, target, converter):
"""Copies source object attributes into a target using a converter."""
for source_name, value in converter.items():
if value:
target_name = value[0]
target_type = value[1]
setattr(
target, target_name, target_type(getattr(source, source_name)))
def load_csv_course(app_context):
"""Loads course data from the CSV files."""
logging.info('Initializing datastore from CSV files.')
unit_file = os.path.join(app_context.get_data_home(), 'unit.csv')
lesson_file = os.path.join(app_context.get_data_home(), 'lesson.csv')
# Check files exist.
if (not app_context.fs.isfile(unit_file) or
not app_context.fs.isfile(lesson_file)):
return None, None
unit_stream = app_context.fs.open(unit_file)
lesson_stream = app_context.fs.open(lesson_file)
# Verify CSV file integrity.
units = verify.read_objects_from_csv_stream(
unit_stream, verify.UNITS_HEADER, verify.Unit)
lessons = verify.read_objects_from_csv_stream(
lesson_stream, verify.LESSONS_HEADER, verify.Lesson)
verifier = verify.Verifier()
verifier.verify_unit_fields(units)
verifier.verify_lesson_fields(lessons)
verifier.verify_unit_lesson_relationships(units, lessons)
assert verifier.errors == 0
assert verifier.warnings == 0
# Load data from CSV files into a datastore.
units = verify.read_objects_from_csv_stream(
app_context.fs.open(unit_file), verify.UNITS_HEADER, Unit12,
converter=verify.UNIT_CSV_TO_DB_CONVERTER)
lessons = verify.read_objects_from_csv_stream(
app_context.fs.open(lesson_file), verify.LESSONS_HEADER, Lesson12,
converter=verify.LESSON_CSV_TO_DB_CONVERTER)
return units, lessons
def index_units_and_lessons(course):
"""Index all 'U' type units and their lessons. Indexes are 1-based."""
unit_index = 1
for unit in course.get_units():
if verify.UNIT_TYPE_UNIT == unit.type:
unit._index = unit_index # pylint: disable-msg=protected-access
unit_index += 1
lesson_index = 1
for lesson in course.get_lessons(unit.unit_id):
lesson._index = ( # pylint: disable-msg=protected-access
lesson_index)
lesson_index += 1
def create_course_registry():
"""Create the registry for course properties."""
reg = FieldRegistry('Basic Course Settings', description='Course Settings')
# Course level settings.
course_opts = reg.add_sub_registry('course', 'Course Config')
course_opts.add_property(
SchemaField('course:title', 'Course Name', 'string'))
course_opts.add_property(
SchemaField(
'course:admin_user_emails', 'Course Admin Emails', 'string',
description='A space-separated list of email addresses of course '
'administrators. Each email address must be placed between \'[\' '
'and \']\'.'))
course_opts.add_property(
SchemaField(
'course:forum_email', 'Forum Email', 'string', optional=True,
description='Email for the forum, e.g. '
'\'[email protected]\'.'))
course_opts.add_property(SchemaField(
'course:announcement_list_email', 'Announcement List Email', 'string',
optional=True, description='Email for the mailing list where students '
'can register to receive course announcements, e.g. '
'\'[email protected]\''))
course_opts.add_property(SchemaField('course:locale', 'Locale', 'string'))
course_opts.add_property(SchemaField(
'course:start_date', 'Course Start Date', 'string', optional=True))
course_opts.add_property(SchemaField(
'course:now_available', 'Make Course Available', 'boolean'))
# Course registration settings.
reg_opts = reg.add_sub_registry('reg_form', 'Student Registration Options')
reg_opts.add_property(SchemaField(
'reg_form:can_register', 'Enable Registrations', 'boolean',
description='Checking this box allows new students to register for '
'the course.'))
# Course homepage settings.
homepage_opts = reg.add_sub_registry('homepage', 'Homepage Settings')
homepage_opts.add_property(SchemaField(
'course:instructor_details', 'Instructor Details', 'html',
optional=True))
homepage_opts.add_property(SchemaField(
'course:blurb', 'Course Abstract', 'html', optional=True,
description='Text, shown on the course homepage, that explains what '
'the course is about.',
extra_schema_dict_values={
'supportCustomTags': common.tags.CAN_USE_DYNAMIC_TAGS.value}))
homepage_opts.add_property(SchemaField(
'course:main_video:url', 'Course Video', 'url', optional=True,
description='URL for the preview video shown on the course homepage.'))
homepage_opts.add_property(SchemaField(
'course:main_image:url', 'Course Image', 'string', optional=True,
description='URL for the preview image shown on the course homepage. '
'This will only be shown if no course video is specified.'))
homepage_opts.add_property(SchemaField(
'course:main_image:alt_text', 'Alternate Text', 'string',
optional=True,
description='Alt text for the preview image on the course homepage.'))
return reg
class AbstractCachedObject(object):
"""Abstract serializable versioned object that can stored in memcache."""
@classmethod
def _make_key(cls):
# The course content files may change between deployment. To avoid
# reading old cached values by the new version of the application we
# add deployment version to the key. Now each version of the
# application can put/get its own version of the course and the
# deployment.
return 'course:model:pickle:%s:%s' % (
cls.VERSION, os.environ.get('CURRENT_VERSION_ID'))
@classmethod
def new_memento(cls):
"""Creates new empty memento instance; must be pickle serializable."""
raise Exception('Not implemented')
@classmethod
def instance_from_memento(cls, unused_app_context, unused_memento):
"""Creates instance from serializable memento."""
raise Exception('Not implemented')
@classmethod
def memento_from_instance(cls, unused_instance):
"""Creates serializable memento from instance."""
raise Exception('Not implemented')
@classmethod
def load(cls, app_context):
"""Loads instance from memcache; does not fail on errors."""
try:
binary_data = MemcacheManager.get(
cls._make_key(),
namespace=app_context.get_namespace_name())
if binary_data:
memento = cls.new_memento()
memento.deserialize(binary_data)
return cls.instance_from_memento(app_context, memento)
except Exception as e: # pylint: disable-msg=broad-except
logging.error(
'Failed to load object \'%s\' from memcache. %s',
cls._make_key(), e)
return None
@classmethod
def save(cls, app_context, instance):
"""Saves instance to memcache."""
MemcacheManager.set(
cls._make_key(),
cls.memento_from_instance(instance).serialize(),
namespace=app_context.get_namespace_name())
@classmethod
def delete(cls, app_context):
"""Deletes instance from memcache."""
MemcacheManager.delete(
cls._make_key(),
namespace=app_context.get_namespace_name())
def serialize(self):
"""Saves instance to a pickle representation."""
return pickle.dumps(self.__dict__)
def deserialize(self, binary_data):
"""Loads instance from a pickle representation."""
adict = pickle.loads(binary_data)
if not self.version == adict.get('version'):
raise Exception('Expected version %s, found %s.' % (
self.version, adict.get('version')))
self.__dict__.update(adict)
class Unit12(object):
"""An object to represent a Unit, Assessment or Link (version 1.2)."""
def __init__(self):
self.unit_id = '' # primary key
self.type = ''
self.title = ''
self.release_date = ''
self.now_available = False
# Units of 'U' types have 1-based index. An index is automatically
# computed.
self._index = None
@property
def href(self):
assert verify.UNIT_TYPE_LINK == self.type
return self.unit_id
@property
def index(self):
assert verify.UNIT_TYPE_UNIT == self.type
return self._index
@property
def workflow_yaml(self):
"""Returns the workflow as a YAML text string."""
assert verify.UNIT_TYPE_ASSESSMENT == self.type
if self.unit_id == LEGACY_REVIEW_ASSESSMENT:
return LEGACY_HUMAN_GRADER_WORKFLOW
else:
return DEFAULT_AUTO_GRADER_WORKFLOW
@property
def workflow(self):
"""Returns the workflow as an object."""
return Workflow(self.workflow_yaml)
class Lesson12(object):
"""An object to represent a Lesson (version 1.2)."""
def __init__(self):
self.lesson_id = 0 # primary key
self.unit_id = 0 # unit.unit_id of parent
self.title = ''
self.objectives = ''
self.video = ''
self.notes = ''
self.duration = ''
self.activity = ''
self.activity_title = ''
self.activity_listed = True
# Lessons have 1-based index inside the unit they belong to. An index
# is automatically computed.
self._index = None
@property
def now_available(self):
return True
@property
def index(self):
return self._index
class CachedCourse12(AbstractCachedObject):
"""A representation of a Course12 optimized for storing in memcache."""
VERSION = COURSE_MODEL_VERSION_1_2
def __init__(self, units=None, lessons=None, unit_id_to_lessons=None):
self.version = self.VERSION
self.units = units
self.lessons = lessons
self.unit_id_to_lessons = unit_id_to_lessons
@classmethod
def new_memento(cls):
return CachedCourse12()
@classmethod
def instance_from_memento(cls, app_context, memento):
return CourseModel12(
app_context, units=memento.units, lessons=memento.lessons,
unit_id_to_lessons=memento.unit_id_to_lessons)
@classmethod
def memento_from_instance(cls, course):
return CachedCourse12(
units=course.units, lessons=course.lessons,
unit_id_to_lessons=course.unit_id_to_lessons)
class CourseModel12(object):
"""A course defined in terms of CSV files (version 1.2)."""
VERSION = COURSE_MODEL_VERSION_1_2
@classmethod
def load(cls, app_context):
"""Loads course data into a model."""
course = CachedCourse12.load(app_context)
if not course:
units, lessons = load_csv_course(app_context)
if units and lessons:
course = CourseModel12(app_context, units, lessons)
if course:
CachedCourse12.save(app_context, course)
return course
@classmethod
def _make_unit_id_to_lessons_lookup_dict(cls, lessons):
"""Creates an index of unit.unit_id to unit.lessons."""
unit_id_to_lessons = {}
for lesson in lessons:
key = str(lesson.unit_id)
if not key in unit_id_to_lessons:
unit_id_to_lessons[key] = []
unit_id_to_lessons[key].append(lesson)
return unit_id_to_lessons
def __init__(
self, app_context,
units=None, lessons=None, unit_id_to_lessons=None):
self._app_context = app_context
self._units = []
self._lessons = []
self._unit_id_to_lessons = {}
if units:
self._units = units
if lessons:
self._lessons = lessons
if unit_id_to_lessons:
self._unit_id_to_lessons = unit_id_to_lessons
else:
self._unit_id_to_lessons = (
self._make_unit_id_to_lessons_lookup_dict(self._lessons))
index_units_and_lessons(self)
@property
def app_context(self):
return self._app_context
@property
def units(self):
return self._units
@property
def lessons(self):
return self._lessons
@property
def unit_id_to_lessons(self):
return self._unit_id_to_lessons
def get_units(self):
return self._units[:]
def get_lessons(self, unit_id):
return self._unit_id_to_lessons.get(str(unit_id), [])
def find_unit_by_id(self, unit_id):
"""Finds a unit given its id."""
for unit in self._units:
if str(unit.unit_id) == str(unit_id):
return unit
return None
def get_review_form_filename(self, unit_id):
"""Returns the corresponding review form filename."""
return 'assets/js/review-%s.js' % unit_id
def get_assessment_filename(self, unit_id):
"""Returns assessment base filename."""
unit = self.find_unit_by_id(unit_id)
assert unit and verify.UNIT_TYPE_ASSESSMENT == unit.type
return 'assets/js/assessment-%s.js' % unit.unit_id
def _get_assessment_as_dict(self, filename):
"""Returns the Python dict representation of an assessment file."""
root_name = 'assessment'
context = self._app_context
assessment_content = context.fs.impl.get(os.path.join(
context.get_home(), filename)).read()
content, noverify_text = verify.convert_javascript_to_python(
assessment_content, root_name)
assessment = verify.evaluate_python_expression_from_text(
content, root_name, verify.Assessment().scope, noverify_text)
return assessment
def get_assessment_content(self, unit):
"""Returns the schema for an assessment as a Python dict."""
return self._get_assessment_as_dict(
self.get_assessment_filename(unit.unit_id))
def get_review_form_content(self, unit):
"""Returns the schema for a review form as a Python dict."""
return self._get_assessment_as_dict(
self.get_review_form_filename(unit.unit_id))
def get_activity_filename(self, unit_id, lesson_id):
"""Returns activity base filename."""
return 'assets/js/activity-%s.%s.js' % (unit_id, lesson_id)
def find_lesson_by_id(self, unit, lesson_id):
"""Finds a lesson given its id (or 1-based index in this model)."""
index = int(lesson_id) - 1
return self.get_lessons(unit.unit_id)[index]
def to_json(self):
"""Creates JSON representation of this instance."""
adict = copy.deepcopy(self)
del adict._app_context
return transforms.dumps(
adict,
indent=4, sort_keys=True,
default=lambda o: o.__dict__)
class Unit13(object):
"""An object to represent a Unit, Assessment or Link (version 1.3)."""
def __init__(self):
self.unit_id = 0 # primary key
self.type = ''
self.title = ''
self.release_date = ''
self.now_available = False
# Units of 'U' types have 1-based index. An index is automatically
# computed.
self._index = None
# Only valid for the unit.type == verify.UNIT_TYPE_LINK.
self.href = None
# Only valid for the unit.type == verify.UNIT_TYPE_ASSESSMENT.
self.weight = 0
# Only valid for the unit.type == verify.UNIT_TYPE_ASSESSMENT.
self.workflow_yaml = DEFAULT_AUTO_GRADER_WORKFLOW
@property
def index(self):
assert verify.UNIT_TYPE_UNIT == self.type
return self._index
@property
def workflow(self):
"""Returns the workflow as an object."""
assert verify.UNIT_TYPE_ASSESSMENT == self.type
workflow = Workflow(self.workflow_yaml)
return workflow
class Lesson13(object):
"""An object to represent a Lesson (version 1.3)."""
def __init__(self):
self.lesson_id = 0 # primary key
self.unit_id = 0 # unit.unit_id of parent
self.title = ''
self.objectives = ''
self.video = ''
self.notes = ''
self.duration = ''
self.now_available = False
self.has_activity = False
self.activity_title = ''
self.activity_listed = True
# Lessons have 1-based index inside the unit they belong to. An index
# is automatically computed.
self._index = None
@property
def index(self):
return self._index
@property
def activity(self):
"""A symbolic name to old attribute."""
return self.has_activity
class PersistentCourse13(object):
"""A representation of a Course13 optimized for persistence."""
COURSES_FILENAME = 'data/course.json'
def __init__(self, next_id=None, units=None, lessons=None):
self.version = CourseModel13.VERSION
self.next_id = next_id
self.units = units
self.lessons = lessons
def to_dict(self):
"""Saves object attributes into a dict."""
result = {}
result['version'] = str(self.version)
result['next_id'] = int(self.next_id)
units = []
for unit in self.units:
units.append(transforms.instance_to_dict(unit))
result['units'] = units
lessons = []
for lesson in self.lessons:
lessons.append(transforms.instance_to_dict(lesson))
result['lessons'] = lessons
return result
def _from_dict(self, adict):
"""Loads instance attributes from the dict."""
self.next_id = int(adict.get('next_id'))
self.units = []
unit_dicts = adict.get('units')
if unit_dicts:
for unit_dict in unit_dicts:
unit = Unit13()
defaults = {'workflow_yaml': DEFAULT_AUTO_GRADER_WORKFLOW}
transforms.dict_to_instance(unit_dict, unit, defaults=defaults)
self.units.append(unit)
self.lessons = []
lesson_dicts = adict.get('lessons')
if lesson_dicts:
for lesson_dict in lesson_dicts:
lesson = Lesson13()
defaults = {'activity_listed': True}
transforms.dict_to_instance(
lesson_dict, lesson, defaults=defaults)
self.lessons.append(lesson)
@classmethod
def save(cls, app_context, course):
"""Saves course to datastore."""
persistent = PersistentCourse13(
next_id=course.next_id,
units=course.units, lessons=course.lessons)
fs = app_context.fs.impl
filename = fs.physical_to_logical(cls.COURSES_FILENAME)
app_context.fs.put(filename, vfs.FileStreamWrapped(
None, persistent.serialize()))
@classmethod
def load(cls, app_context):
"""Loads course from datastore."""
fs = app_context.fs.impl
filename = fs.physical_to_logical(cls.COURSES_FILENAME)
if app_context.fs.isfile(filename):
persistent = PersistentCourse13()
persistent.deserialize(app_context.fs.get(filename))
return CourseModel13(
app_context, next_id=persistent.next_id,
units=persistent.units, lessons=persistent.lessons)
return None
def serialize(self):
"""Saves instance to a JSON representation."""
adict = self.to_dict()
json_text = transforms.dumps(adict)
return json_text.encode('utf-8')
def deserialize(self, binary_data):
"""Loads instance from a JSON representation."""
json_text = binary_data.decode('utf-8')
adict = transforms.loads(json_text)
if not self.version == adict.get('version'):
raise Exception('Expected version %s, found %s.' % (
self.version, adict.get('version')))
self._from_dict(adict)
class CachedCourse13(AbstractCachedObject):
"""A representation of a Course13 optimized for storing in memcache."""
VERSION = COURSE_MODEL_VERSION_1_3
def __init__(
self, next_id=None, units=None, lessons=None,
unit_id_to_lesson_ids=None):
self.version = self.VERSION
self.next_id = next_id
self.units = units
self.lessons = lessons
# This is almost the same as PersistentCourse13 above, but it also
# stores additional indexes used for performance optimizations. There
# is no need to persist these indexes in durable storage, but it is
# nice to have them in memcache.
self.unit_id_to_lesson_ids = unit_id_to_lesson_ids
@classmethod
def new_memento(cls):
return CachedCourse13()
@classmethod
def instance_from_memento(cls, app_context, memento):
return CourseModel13(
app_context, next_id=memento.next_id,
units=memento.units, lessons=memento.lessons,
unit_id_to_lesson_ids=memento.unit_id_to_lesson_ids)
@classmethod
def memento_from_instance(cls, course):
return CachedCourse13(
next_id=course.next_id,
units=course.units, lessons=course.lessons,
unit_id_to_lesson_ids=course.unit_id_to_lesson_ids)
class CourseModel13(object):
"""A course defined in terms of objects (version 1.3)."""
VERSION = COURSE_MODEL_VERSION_1_3
@classmethod
def load(cls, app_context):
"""Loads course from memcache or persistence."""
course = CachedCourse13.load(app_context)
if not course:
course = PersistentCourse13.load(app_context)
if course:
CachedCourse13.save(app_context, course)
return course
@classmethod
def _make_unit_id_to_lessons_lookup_dict(cls, lessons):
"""Creates an index of unit.unit_id to unit.lessons."""
unit_id_to_lesson_ids = {}
for lesson in lessons:
key = str(lesson.unit_id)
if not key in unit_id_to_lesson_ids:
unit_id_to_lesson_ids[key] = []
unit_id_to_lesson_ids[key].append(str(lesson.lesson_id))
return unit_id_to_lesson_ids
def __init__(
self, app_context, next_id=None, units=None, lessons=None,
unit_id_to_lesson_ids=None):
# Init default values.
self._app_context = app_context
self._next_id = 1 # a counter for creating sequential entity ids
self._units = []
self._lessons = []
self._unit_id_to_lesson_ids = {}
# These array keep dirty object in current transaction.
self._dirty_units = []
self._dirty_lessons = []
self._deleted_units = []
self._deleted_lessons = []
# Set provided values.
if next_id:
self._next_id = next_id
if units:
self._units = units
if lessons:
self._lessons = lessons
if unit_id_to_lesson_ids:
self._unit_id_to_lesson_ids = unit_id_to_lesson_ids
else:
self._index()
@property
def app_context(self):
return self._app_context
@property
def next_id(self):
return self._next_id
@property
def units(self):
return self._units
@property
def lessons(self):
return self._lessons
@property
def unit_id_to_lesson_ids(self):
return self._unit_id_to_lesson_ids
def _get_next_id(self):
"""Allocates next id in sequence."""
next_id = self._next_id
self._next_id += 1
return next_id
def _index(self):
"""Indexes units and lessons."""
self._unit_id_to_lesson_ids = self._make_unit_id_to_lessons_lookup_dict(
self._lessons)
index_units_and_lessons(self)
def is_dirty(self):
"""Checks if course object has been modified and needs to be saved."""
return self._dirty_units or self._dirty_lessons
def _flush_deleted_objects(self):
"""Delete files owned by deleted objects."""
# TODO(psimakov): handle similarly add_unit() and set_assessment()
# To delete an activity/assessment one must look up its filename. This
# requires a valid unit/lesson. If unit was deleted it's no longer
# found in _units, same for lesson. So we temporarily install deleted
# unit/lesson array instead of actual. We also temporarily empty
# so _unit_id_to_lesson_ids is not accidentally used. This is a hack,
# and we will improve it as object model gets more complex, but for
# now it works fine.
units = self._units
lessons = self._lessons
unit_id_to_lesson_ids = self._unit_id_to_lesson_ids
try:
self._units = self._deleted_units
self._lessons = self._deleted_lessons
self._unit_id_to_lesson_ids = None
# Delete owned assessments.
for unit in self._deleted_units:
if verify.UNIT_TYPE_ASSESSMENT == unit.type:
self._delete_assessment(unit)
# Delete owned activities.
for lesson in self._deleted_lessons:
if lesson.has_activity:
self._delete_activity(lesson)
finally:
self._units = units
self._lessons = lessons
self._unit_id_to_lesson_ids = unit_id_to_lesson_ids
def _update_dirty_objects(self):
"""Update files owned by course."""
fs = self.app_context.fs
# Update state of owned assessments.
for unit in self._dirty_units:
unit = self.find_unit_by_id(unit.unit_id)
if not unit or verify.UNIT_TYPE_ASSESSMENT != unit.type:
continue
path = fs.impl.physical_to_logical(
self.get_assessment_filename(unit.unit_id))
if fs.isfile(path):
fs.put(
path, None, metadata_only=True,
is_draft=not unit.now_available)
# Update state of owned activities.
for lesson in self._dirty_lessons:
lesson = self.find_lesson_by_id(None, lesson.lesson_id)
if not lesson or not lesson.has_activity:
continue
path = fs.impl.physical_to_logical(
self.get_activity_filename(None, lesson.lesson_id))
if fs.isfile(path):
fs.put(
path, None, metadata_only=True,
is_draft=not lesson.now_available)
def save(self):
"""Saves course to datastore and memcache."""
self._flush_deleted_objects()
self._update_dirty_objects()
self._dirty_units = []
self._dirty_lessons = []
self._deleted_units = []
self._deleted_lessons = []
self._index()
PersistentCourse13.save(self._app_context, self)
CachedCourse13.delete(self._app_context)
def get_units(self):
return self._units[:]
def get_lessons(self, unit_id):
lesson_ids = self._unit_id_to_lesson_ids.get(str(unit_id))
lessons = []
if lesson_ids:
for lesson_id in lesson_ids:
lessons.append(self.find_lesson_by_id(None, lesson_id))
return lessons
def get_assessment_filename(self, unit_id):
"""Returns assessment base filename."""
unit = self.find_unit_by_id(unit_id)
assert unit
assert verify.UNIT_TYPE_ASSESSMENT == unit.type
return 'assets/js/assessment-%s.js' % unit.unit_id
def get_review_form_filename(self, unit_id):
"""Returns review form filename."""
unit = self.find_unit_by_id(unit_id)
assert unit
assert verify.UNIT_TYPE_ASSESSMENT == unit.type
return 'assets/js/review-%s.js' % unit.unit_id
def get_activity_filename(self, unused_unit_id, lesson_id):
"""Returns activity base filename."""
lesson = self.find_lesson_by_id(None, lesson_id)
assert lesson
if lesson.has_activity:
return 'assets/js/activity-%s.js' % lesson_id
return None
def find_unit_by_id(self, unit_id):
"""Finds a unit given its id."""
for unit in self._units:
if str(unit.unit_id) == str(unit_id):
return unit
return None
def find_lesson_by_id(self, unused_unit, lesson_id):
"""Finds a lesson given its id."""
for lesson in self._lessons:
if str(lesson.lesson_id) == str(lesson_id):
return lesson
return None
def add_unit(self, unit_type, title):
"""Adds a brand new unit."""
assert unit_type in verify.UNIT_TYPES
unit = Unit13()
unit.type = unit_type
unit.unit_id = self._get_next_id()
unit.title = title
unit.now_available = False
self._units.append(unit)
self._index()
self._dirty_units.append(unit)
return unit
def add_lesson(self, unit, title):
"""Adds brand new lesson to a unit."""
unit = self.find_unit_by_id(unit.unit_id)
assert unit
lesson = Lesson13()
lesson.lesson_id = self._get_next_id()
lesson.unit_id = unit.unit_id
lesson.title = title
lesson.now_available = False
self._lessons.append(lesson)
self._index()
self._dirty_lessons.append(lesson)
return lesson
def move_lesson_to(self, lesson, unit):
"""Moves a lesson to another unit."""
unit = self.find_unit_by_id(unit.unit_id)
assert unit
assert verify.UNIT_TYPE_UNIT == unit.type
lesson = self.find_lesson_by_id(None, lesson.lesson_id)
assert lesson
lesson.unit_id = unit.unit_id
self._index()
return lesson
def _delete_activity(self, lesson):
"""Deletes activity."""
filename = self._app_context.fs.impl.physical_to_logical(
self.get_activity_filename(None, lesson.lesson_id))
if self.app_context.fs.isfile(filename):
self.app_context.fs.delete(filename)
return True
return False
def _delete_assessment(self, unit):
"""Deletes assessment."""
files_deleted_count = 0
filenames = [
self._app_context.fs.impl.physical_to_logical(
self.get_assessment_filename(unit.unit_id)),
self._app_context.fs.impl.physical_to_logical(
self.get_review_form_filename(unit.unit_id))]
for filename in filenames:
if self.app_context.fs.isfile(filename):
self.app_context.fs.delete(filename)
files_deleted_count += 1
return bool(files_deleted_count)
def delete_all(self):
"""Deletes all course files."""
for entity in self._app_context.fs.impl.list(
appengine_config.BUNDLE_ROOT):
self._app_context.fs.impl.delete(entity)
assert not self._app_context.fs.impl.list(appengine_config.BUNDLE_ROOT)
CachedCourse13.delete(self._app_context)
def delete_lesson(self, lesson):
"""Delete a lesson."""
lesson = self.find_lesson_by_id(None, lesson.lesson_id)
if not lesson:
return False
self._lessons.remove(lesson)
self._index()
self._deleted_lessons.append(lesson)
self._dirty_lessons.append(lesson)
return True
def delete_unit(self, unit):
"""Deletes a unit."""
unit = self.find_unit_by_id(unit.unit_id)
if not unit:
return False
for lesson in self.get_lessons(unit.unit_id):
self.delete_lesson(lesson)
self._units.remove(unit)
self._index()
self._deleted_units.append(unit)
self._dirty_units.append(unit)
return True
def update_unit(self, unit):
"""Updates an existing unit."""
existing_unit = self.find_unit_by_id(unit.unit_id)
if not existing_unit:
return False
existing_unit.title = unit.title
existing_unit.release_date = unit.release_date
existing_unit.now_available = unit.now_available
if verify.UNIT_TYPE_LINK == existing_unit.type:
existing_unit.href = unit.href
if verify.UNIT_TYPE_ASSESSMENT == existing_unit.type:
existing_unit.weight = unit.weight
existing_unit.workflow_yaml = unit.workflow_yaml
self._dirty_units.append(existing_unit)
return existing_unit
def update_lesson(self, lesson):
"""Updates an existing lesson."""
existing_lesson = self.find_lesson_by_id(
lesson.unit_id, lesson.lesson_id)
if not existing_lesson:
return False
existing_lesson.title = lesson.title
existing_lesson.unit_id = lesson.unit_id
existing_lesson.objectives = lesson.objectives
existing_lesson.video = lesson.video
existing_lesson.notes = lesson.notes
existing_lesson.activity_title = lesson.activity_title
self._index()
self._dirty_lessons.append(existing_lesson)
return existing_lesson
def reorder_units(self, order_data):
"""Reorder the units and lessons based on the order data given.
Args:
order_data: list of dict. Format is
The order_data is in the following format:
[
{'id': 0, 'lessons': [{'id': 0}, {'id': 1}, {'id': 2}]},
{'id': 1},
{'id': 2, 'lessons': [{'id': 0}, {'id': 1}]}
...
]
"""
reordered_units = []
unit_ids = set()
for unit_data in order_data:
unit_id = unit_data['id']
unit = self.find_unit_by_id(unit_id)
assert unit
reordered_units.append(self.find_unit_by_id(unit_id))
unit_ids.add(unit_id)
assert len(unit_ids) == len(self._units)
self._units = reordered_units
reordered_lessons = []
lesson_ids = set()
for unit_data in order_data:
unit_id = unit_data['id']
unit = self.find_unit_by_id(unit_id)
assert unit
if verify.UNIT_TYPE_UNIT != unit.type:
continue
for lesson_data in unit_data['lessons']:
lesson_id = lesson_data['id']
reordered_lessons.append(
self.find_lesson_by_id(None, lesson_id))
lesson_ids.add((unit_id, lesson_id))
assert len(lesson_ids) == len(self._lessons)
self._lessons = reordered_lessons
self._index()
def _get_assessment_as_dict(self, filename):
"""Gets the content of an assessment file as a Python dict."""
path = self._app_context.fs.impl.physical_to_logical(filename)
root_name = 'assessment'
assessment_content = self.app_context.fs.get(path)
content, noverify_text = verify.convert_javascript_to_python(
assessment_content, root_name)
assessment = verify.evaluate_python_expression_from_text(
content, root_name, verify.Assessment().scope, noverify_text)
return assessment
def get_assessment_content(self, unit):
"""Returns the schema for an assessment as a Python dict."""
return self._get_assessment_as_dict(
self.get_assessment_filename(unit.unit_id))
def get_review_form_content(self, unit):
"""Returns the schema for a review form as a Python dict."""
return self._get_assessment_as_dict(
self.get_review_form_filename(unit.unit_id))
def set_assessment_file_content(
self, unit, assessment_content, dest_filename, errors=None):
"""Updates the content of an assessment file on the file system."""
if errors is None:
errors = []
path = self._app_context.fs.impl.physical_to_logical(dest_filename)
root_name = 'assessment'
try:
content, noverify_text = verify.convert_javascript_to_python(
assessment_content, root_name)
assessment = verify.evaluate_python_expression_from_text(
content, root_name, verify.Assessment().scope, noverify_text)
except Exception: # pylint: disable-msg=broad-except
errors.append('Unable to parse %s:\n%s' % (
root_name,
str(sys.exc_info()[1])))
return
verifier = verify.Verifier()
try:
verifier.verify_assessment_instance(assessment, path)
except verify.SchemaException:
errors.append('Error validating %s\n' % root_name)
return
fs = self.app_context.fs
fs.put(
path, vfs.string_to_stream(assessment_content),
is_draft=not unit.now_available)
def set_assessment_content(self, unit, assessment_content, errors=None):
"""Updates the content of an assessment."""
self.set_assessment_file_content(
unit,
assessment_content,
self.get_assessment_filename(unit.unit_id),
errors=errors
)
def set_review_form(self, unit, review_form, errors=None):
"""Sets the content of a review form."""
self.set_assessment_file_content(
unit,
review_form,
self.get_review_form_filename(unit.unit_id),
errors=errors
)
def set_activity_content(self, lesson, activity_content, errors=None):
"""Updates the content of an activity."""
if errors is None:
errors = []
path = self._app_context.fs.impl.physical_to_logical(
self.get_activity_filename(lesson.unit_id, lesson.lesson_id))
root_name = 'activity'
try:
content, noverify_text = verify.convert_javascript_to_python(
activity_content, root_name)
activity = verify.evaluate_python_expression_from_text(
content, root_name, verify.Activity().scope, noverify_text)
except Exception: # pylint: disable-msg=broad-except
errors.append('Unable to parse %s:\n%s' % (
root_name,
str(sys.exc_info()[1])))
return
verifier = verify.Verifier()
try:
verifier.verify_activity_instance(activity, path)
except verify.SchemaException:
errors.append('Error validating %s\n' % root_name)
return
fs = self.app_context.fs
fs.put(
path, vfs.string_to_stream(activity_content),
is_draft=not lesson.now_available)
def import_from(self, src_course, errors):
"""Imports a content of another course into this course."""
def copy_unit12_into_unit13(src_unit, dst_unit):
"""Copies unit object attributes between versions."""
assert dst_unit.type == src_unit.type
dst_unit.title = src_unit.title
dst_unit.release_date = src_unit.release_date
dst_unit.now_available = src_unit.now_available
if verify.UNIT_TYPE_LINK == dst_unit.type:
dst_unit.href = src_unit.href
# Copy over the assessment. Note that we copy files directly and
# avoid all logical validations of their content. This is done for
# a purpose - at this layer we don't care what is in those files.
if verify.UNIT_TYPE_ASSESSMENT == dst_unit.type:
if dst_unit.unit_id in DEFAULT_LEGACY_ASSESSMENT_WEIGHTS:
dst_unit.weight = (
DEFAULT_LEGACY_ASSESSMENT_WEIGHTS[dst_unit.unit_id])
filepath_mappings = [{
'src': src_course.get_assessment_filename(src_unit.unit_id),
'dst': self.get_assessment_filename(dst_unit.unit_id)
}, {
'src': src_course.get_review_form_filename(
src_unit.unit_id),
'dst': self.get_review_form_filename(dst_unit.unit_id)
}]
for mapping in filepath_mappings:
src_filename = os.path.join(
src_course.app_context.get_home(), mapping['src'])
if src_course.app_context.fs.isfile(src_filename):
astream = src_course.app_context.fs.open(src_filename)
if astream:
dst_filename = os.path.join(
self.app_context.get_home(), mapping['dst'])
self.app_context.fs.put(dst_filename, astream)
dst_unit.workflow_yaml = src_unit.workflow_yaml
def copy_lesson12_into_lesson13(
src_unit, src_lesson, unused_dst_unit, dst_lesson):
"""Copies lessons object attributes between versions."""
dst_lesson.objectives = src_lesson.objectives
dst_lesson.video = src_lesson.video
dst_lesson.notes = src_lesson.notes
dst_lesson.duration = src_lesson.duration
dst_lesson.has_activity = src_lesson.activity
dst_lesson.activity_title = src_lesson.activity_title
# Old model does not have this flag, but all lessons are available.
dst_lesson.now_available = True
# Copy over the activity. Note that we copy files directly and
# avoid all logical validations of their content. This is done for a
# purpose - at this layer we don't care what is in those files.
if src_lesson.activity:
src_filename = os.path.join(
src_course.app_context.get_home(),
src_course.get_activity_filename(
src_unit.unit_id, src_lesson.lesson_id))
if src_course.app_context.fs.isfile(src_filename):
astream = src_course.app_context.fs.open(src_filename)
if astream:
dst_filename = os.path.join(
self.app_context.get_home(),
self.get_activity_filename(
None, dst_lesson.lesson_id))
self.app_context.fs.put(dst_filename, astream)
if not is_editable_fs(self._app_context):
errors.append(
'Target course %s must be '
'on read-write media.' % self.app_context.raw)
return None, None
if self.get_units():
errors.append(
'Target course %s must be '
'empty.' % self.app_context.raw)
return None, None
# Iterate over course structure and assets and import each item.
for unit in src_course.get_units():
new_unit = self.add_unit(unit.type, unit.title)
copy_unit12_into_unit13(unit, new_unit)
for lesson in src_course.get_lessons(unit.unit_id):
new_lesson = self.add_lesson(new_unit, lesson.title)
copy_lesson12_into_lesson13(unit, lesson, new_unit, new_lesson)
return src_course, self
def to_json(self):
"""Creates JSON representation of this instance."""
persistent = PersistentCourse13(
next_id=self._next_id, units=self._units, lessons=self._lessons)
return transforms.dumps(
persistent.to_dict(),
indent=4, sort_keys=True,
default=lambda o: o.__dict__)
class Workflow(object):
"""Stores workflow specifications for assessments."""
def __init__(self, yaml_str):
"""Sets yaml_str (the workflow spec), without doing any validation."""
self._yaml_str = yaml_str
def to_yaml(self):
return self._yaml_str
def to_dict(self):
if not self._yaml_str:
return {}
obj = yaml.safe_load(self._yaml_str)
assert isinstance(obj, dict)
return obj
def _convert_date_string_to_datetime(self, date_str):
"""Returns a datetime object."""
return datetime.strptime(date_str, ISO_8601_DATE_FORMAT)
def get_grader(self):
"""Returns the associated grader."""
return self.to_dict().get(GRADER_KEY)
def get_matcher(self):
return self.to_dict().get(MATCHER_KEY)
def get_submission_due_date(self):
date_str = self.to_dict().get(SUBMISSION_DUE_DATE_KEY)
if date_str is None:
return None
return self._convert_date_string_to_datetime(date_str)
def get_review_due_date(self):
date_str = self.to_dict().get(REVIEW_DUE_DATE_KEY)
if date_str is None:
return None
return self._convert_date_string_to_datetime(date_str)
def get_review_min_count(self):
return self.to_dict().get(REVIEW_MIN_COUNT_KEY)
def get_review_window_mins(self):
return self.to_dict().get(REVIEW_WINDOW_MINS_KEY)
def _ensure_value_is_nonnegative_int(self, workflow_dict, key, errors):
"""Checks that workflow_dict[key] is a non-negative integer."""
value = workflow_dict[key]
if not isinstance(value, int):
errors.append('%s should be an integer' % key)
elif not value >= 0:
errors.append('%s should be a non-negative integer' % key)
def validate(self, errors=None):
"""Tests whether the current Workflow object is valid."""
if errors is None:
errors = []
try:
# Validate the workflow specification (in YAML format).
assert self._yaml_str, 'missing key: %s.' % GRADER_KEY
workflow_dict = yaml.safe_load(self._yaml_str)
assert isinstance(workflow_dict, dict), (
'expected the YAML representation of a dict')
assert GRADER_KEY in workflow_dict, 'missing key: %s.' % GRADER_KEY
assert workflow_dict[GRADER_KEY] in ALLOWED_GRADERS, (
'invalid grader, should be one of: %s' %
', '.join(ALLOWED_GRADERS))
if workflow_dict[GRADER_KEY] == HUMAN_GRADER:
missing_keys = []
for key in HUMAN_GRADED_ASSESSMENT_KEY_LIST:
if key not in workflow_dict:
missing_keys.append(key)
assert not missing_keys, (
'missing key(s) for a human-reviewed assessment: %s.' %
', '.join(missing_keys))
workflow_errors = []
if (workflow_dict[MATCHER_KEY] not in
review.ALLOWED_MATCHERS):
workflow_errors.append(
'invalid matcher, should be one of: %s' %
', '.join(review.ALLOWED_MATCHERS))
self._ensure_value_is_nonnegative_int(
workflow_dict, REVIEW_MIN_COUNT_KEY, workflow_errors)
self._ensure_value_is_nonnegative_int(
workflow_dict, REVIEW_WINDOW_MINS_KEY, workflow_errors)
try:
submission_due_date = self._convert_date_string_to_datetime(
workflow_dict[SUBMISSION_DUE_DATE_KEY])
review_due_date = self._convert_date_string_to_datetime(
workflow_dict[REVIEW_DUE_DATE_KEY])
if not submission_due_date <= review_due_date:
workflow_errors.append(
'submission due date should be earlier than '
'review due date')
except Exception as e: # pylint: disable-msg=broad-except
workflow_errors.append(
'dates should be formatted as YYYY-MM-DD hh:mm '
'(e.g. 1997-07-16 19:20) and be specified in the UTC '
'timezone')
if workflow_errors:
raise Exception('%s.' % '; '.join(workflow_errors))
return True
except Exception as e: # pylint: disable-msg=broad-except
errors.append('Error validating workflow specification: %s' % e)
return False
class Course(object):
"""Manages a course and all of its components."""
@classmethod
def get_environ(cls, app_context):
"""Returns currently defined course settings as a dictionary."""
course_yaml = None
course_yaml_dict = None
course_data_filename = app_context.get_config_filename()
if app_context.fs.isfile(course_data_filename):
course_yaml = app_context.fs.open(course_data_filename)
if not course_yaml:
return DEFAULT_COURSE_YAML_DICT
try:
course_yaml_dict = yaml.safe_load(
course_yaml.read().decode('utf-8'))
except Exception as e: # pylint: disable-msg=broad-except
logging.info(
'Error: course.yaml file at %s not accessible, '
'loading defaults. %s', course_data_filename, e)
if not course_yaml_dict:
return DEFAULT_COURSE_YAML_DICT
return deep_dict_merge(
course_yaml_dict, DEFAULT_EXISTING_COURSE_YAML_DICT)
@property
def version(self):
return self._model.VERSION
@classmethod
def create_new_default_course(cls, app_context):
return CourseModel13(app_context)
@classmethod
def custom_new_default_course_for_test(cls, app_context):
# There is an expectation in our tests of automatic import
# of data/*.csv files. This method can be used in tests to achieve
# exactly that.
model = CourseModel12.load(app_context)
if model:
return model
return CourseModel13(app_context)
@classmethod
def _load(cls, app_context):
"""Loads course data from persistence storage into this instance."""
if not is_editable_fs(app_context):
model = CourseModel12.load(app_context)
if model:
return model
else:
model = CourseModel13.load(app_context)
if model:
return model
return cls.create_new_default_course(app_context)
def __init__(self, handler, app_context=None):
self._app_context = app_context if app_context else handler.app_context
self._namespace = self._app_context.get_namespace_name()
self._model = self._load(self._app_context)
self._tracker = None
self._reviews_processor = None
@property
def app_context(self):
return self._app_context
def to_json(self):
return self._model.to_json()
def get_progress_tracker(self):
if not self._tracker:
self._tracker = progress.UnitLessonCompletionTracker(self)
return self._tracker
def get_reviews_processor(self):
if not self._reviews_processor:
self._reviews_processor = review.ReviewsProcessor(self)
return self._reviews_processor
def get_units(self):
return self._model.get_units()
def get_lessons(self, unit_id):
return self._model.get_lessons(unit_id)
def save(self):
return self._model.save()
def find_unit_by_id(self, unit_id):
return self._model.find_unit_by_id(unit_id)
def find_lesson_by_id(self, unit, lesson_id):
return self._model.find_lesson_by_id(unit, lesson_id)
def is_last_assessment(self, unit):
"""Checks whether the given unit is the last of all the assessments."""
for current_unit in reversed(self.get_units()):
if current_unit.type == verify.UNIT_TYPE_ASSESSMENT:
return current_unit.unit_id == unit.unit_id
return False
def add_unit(self):
"""Adds new unit to a course."""
return self._model.add_unit('U', 'New Unit')
def add_link(self):
"""Adds new link (other) to a course."""
return self._model.add_unit('O', 'New Link')
def add_assessment(self):
"""Adds new assessment to a course."""
return self._model.add_unit('A', 'New Assessment')
def add_lesson(self, unit):
return self._model.add_lesson(unit, 'New Lesson')
def update_unit(self, unit):
return self._model.update_unit(unit)
def update_lesson(self, lesson):
return self._model.update_lesson(lesson)
def move_lesson_to(self, lesson, unit):
return self._model.move_lesson_to(lesson, unit)
def delete_all(self):
return self._model.delete_all()
def delete_unit(self, unit):
return self._model.delete_unit(unit)
def delete_lesson(self, lesson):
return self._model.delete_lesson(lesson)
def get_score(self, student, assessment_id):
"""Gets a student's score for a particular assessment."""
assert self.is_valid_assessment_id(assessment_id)
scores = transforms.loads(student.scores) if student.scores else {}
return scores.get(assessment_id) if scores else None
def get_overall_score(self, student):
"""Gets the overall course score for a student."""
score_list = self.get_all_scores(student)
overall_score = 0
total_weight = 0
for unit in score_list:
if not unit['human_graded']:
total_weight += unit['weight']
overall_score += unit['weight'] * unit['score']
if total_weight == 0:
return None
return int(float(overall_score) / total_weight)
def get_overall_result(self, student):
"""Gets the overall result based on a student's score profile."""
score = self.get_overall_score(student)
if score is None:
return None
# This can be replaced with a custom definition for an overall result
# string.
return 'pass' if self.get_overall_score(student) >= 70 else 'fail'
def get_all_scores(self, student):
"""Gets all score data for a student.
Args:
student: the student whose scores should be retrieved.
Returns:
an array of dicts, each representing an assessment. Each dict has
the keys 'id', 'title', 'weight' and 'score' (if available),
representing the unit id, the assessment title, the weight
contributed by the assessment to the final score, and the
assessment score.
"""
assessment_list = self.get_assessment_list()
scores = transforms.loads(student.scores) if student.scores else {}
unit_progress = self.get_progress_tracker().get_unit_progress(student)
assessment_score_list = []
for unit in assessment_list:
# Compute the weight for this assessment.
weight = 0
if hasattr(unit, 'weight'):
weight = unit.weight
elif unit.unit_id in DEFAULT_LEGACY_ASSESSMENT_WEIGHTS:
weight = DEFAULT_LEGACY_ASSESSMENT_WEIGHTS[unit.unit_id]
completed = unit_progress[unit.unit_id]
# If a human-reviewed assessment is completed, ensure that the
# required reviews have also been completed.
if completed and self.needs_human_grader(unit):
reviews = self.get_reviews_processor().get_review_steps_by(
unit.unit_id, student.get_key())
review_min_count = unit.workflow.get_review_min_count()
if not review.ReviewUtils.has_completed_enough_reviews(
reviews, review_min_count):
completed = False
assessment_score_list.append({
'id': str(unit.unit_id),
'title': unit.title,
'weight': weight,
'completed': completed,
'human_graded': self.needs_human_grader(unit),
'score': (scores[str(unit.unit_id)]
if str(unit.unit_id) in scores else 0),
})
return assessment_score_list
def get_assessment_list(self):
"""Returns a list of units that are assessments."""
# TODO(psimakov): Streamline this so that it does not require a full
# iteration on each request, probably by modifying the index() method.
assessment_list = []
for unit in self.get_units():
if verify.UNIT_TYPE_ASSESSMENT == unit.type:
assessment_list.append(unit)
return copy.deepcopy(assessment_list)
def get_peer_reviewed_units(self):
"""Returns a list of units that are peer-reviewed assessments.
Returns:
A list of units that are peer-reviewed assessments. Each unit
in the list has a unit_id of type string.
"""
assessment_list = self.get_assessment_list()
units = copy.deepcopy([unit for unit in assessment_list if (
unit.workflow.get_grader() == HUMAN_GRADER and
unit.workflow.get_matcher() == review.PEER_MATCHER)])
for unit in units:
unit.unit_id = str(unit.unit_id)
return units
def get_assessment_filename(self, unit_id):
return self._model.get_assessment_filename(unit_id)
def get_review_form_filename(self, unit_id):
return self._model.get_review_form_filename(unit_id)
def get_activity_filename(self, unit_id, lesson_id):
return self._model.get_activity_filename(unit_id, lesson_id)
def needs_human_grader(self, unit):
return unit.workflow.get_grader() == HUMAN_GRADER
def reorder_units(self, order_data):
return self._model.reorder_units(order_data)
def get_assessment_content(self, unit):
"""Returns the schema for an assessment as a Python dict."""
return self._model.get_assessment_content(unit)
def get_review_form_content(self, unit):
"""Returns the schema for a review form as a Python dict."""
return self._model.get_review_form_content(unit)
def set_assessment_content(self, unit, assessment_content, errors=None):
return self._model.set_assessment_content(
unit, assessment_content, errors=errors)
def set_review_form(self, unit, review_form, errors=None):
return self._model.set_review_form(unit, review_form, errors=errors)
def set_activity_content(self, lesson, activity_content, errors=None):
return self._model.set_activity_content(
lesson, activity_content, errors=errors)
def is_valid_assessment_id(self, assessment_id):
"""Tests whether the given assessment id is valid."""
for unit in self.get_units():
if (verify.UNIT_TYPE_ASSESSMENT == unit.type and
str(assessment_id) == str(unit.unit_id)):
return True
return False
def is_valid_unit_lesson_id(self, unit_id, lesson_id):
"""Tests whether the given unit id and lesson id are valid."""
for unit in self.get_units():
if str(unit.unit_id) == str(unit_id):
for lesson in self.get_lessons(unit_id):
if str(lesson.lesson_id) == str(lesson_id):
return True
return False
def import_from(self, app_context, errors=None):
"""Import course structure and assets from another courses."""
src_course = Course(None, app_context=app_context)
if errors is None:
errors = []
# Import 1.2 -> 1.3
if (src_course.version == CourseModel12.VERSION and
self.version == CourseModel13.VERSION):
return self._model.import_from(src_course, errors)
# import 1.3 -> 1.3
if (src_course.version == CourseModel13.VERSION and
self.version == CourseModel13.VERSION):
return self._model.import_from(src_course, errors)
errors.append(
'Import of '
'course %s (version %s) into '
'course %s (version %s) '
'is not supported.' % (
app_context.raw, src_course.version,
self.app_context.raw, self.version))
return None, None
def get_course_announcement_list_email(self):
"""Get Announcement email address for the course."""
course_env = self.get_environ(self._app_context)
if not course_env:
return None
if 'course' not in course_env:
return None
course_dict = course_env['course']
if 'announcement_list_email' not in course_dict:
return None
announcement_list_email = course_dict['announcement_list_email']
if announcement_list_email:
return announcement_list_email
return None
def init_new_course_settings(self, title, admin_email):
"""Initializes new course.yaml file if it does not yet exists."""
fs = self.app_context.fs.impl
course_yaml = fs.physical_to_logical('/course.yaml')
if fs.isfile(course_yaml):
return False
title = title.replace('\'', '\'\'')
course_yaml_text = u"""# my new course.yaml
course:
title: '%s'
admin_user_emails: '[%s]'
now_available: False
""" % (title, admin_email)
fs.put(course_yaml, vfs.string_to_stream(course_yaml_text))
return True
| apache-2.0 | 801,299,094,941,830,500 | 34.955008 | 80 | 0.599079 | false |
xccui/flink | flink-python/pyflink/common/typeinfo.py | 1 | 30454 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import calendar
import datetime
import time
from abc import ABC
from typing import List, Union
from py4j.java_gateway import JavaClass, JavaObject
from pyflink.java_gateway import get_gateway
class TypeInformation(ABC):
"""
TypeInformation is the core class of Flink's type system. FLink requires a type information
for all types that are used as input or return type of a user function. This type information
class acts as the tool to generate serializers and comparators, and to perform semantic checks
such as whether the fields that are used as join/grouping keys actually exist.
The type information also bridges between the programming languages object model and a logical
flat schema. It maps fields from the types to columns (fields) in a flat schema. Not all fields
from a type are mapped to a separate fields in the flat schema and often, entire types are
mapped to one field.. It is important to notice that the schema must hold for all instances of a
type. For that reason, elements in lists and arrays are not assigned to individual fields, but
the lists and arrays are considered to be one field in total, to account for different lengths
in the arrays.
a) Basic types are indivisible and are considered as a single field.
b) Arrays and collections are one field.
c) Tuples represents as many fields as the class has fields.
To represent this properly, each type has an arity (the number of fields it contains directly),
and a total number of fields (number of fields in the entire schema of this type, including
nested types).
"""
class WrapperTypeInfo(TypeInformation):
"""
A wrapper class for java TypeInformation Objects.
"""
def __init__(self, j_typeinfo):
self._j_typeinfo = j_typeinfo
def get_java_type_info(self) -> JavaObject:
return self._j_typeinfo
def __eq__(self, o) -> bool:
if type(o) is type(self):
return self._j_typeinfo.equals(o._j_typeinfo)
else:
return False
def __hash__(self) -> int:
return hash(self._j_typeinfo)
def __str__(self):
return self._j_typeinfo.toString()
def need_conversion(self):
"""
Does this type need to conversion between Python object and internal Wrapper object.
"""
return False
def to_internal_type(self, obj):
"""
Converts a Python object into an internal object.
"""
return obj
def from_internal_type(self, obj):
"""
Converts an internal object into a native Python object.
"""
return obj
class BasicTypeInfo(TypeInformation, ABC):
"""
Type information for primitive types (int, long, double, byte, ...), String, BigInteger,
and BigDecimal.
"""
@staticmethod
def STRING_TYPE_INFO():
return WrapperTypeInfo(get_gateway().jvm
.org.apache.flink.api.common.typeinfo.BasicTypeInfo.STRING_TYPE_INFO)
@staticmethod
def BOOLEAN_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.BOOLEAN_TYPE_INFO)
@staticmethod
def BYTE_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.BYTE_TYPE_INFO)
@staticmethod
def SHORT_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.SHORT_TYPE_INFO)
@staticmethod
def INT_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.INT_TYPE_INFO)
@staticmethod
def LONG_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.LONG_TYPE_INFO)
@staticmethod
def FLOAT_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.FLOAT_TYPE_INFO)
@staticmethod
def DOUBLE_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.DOUBLE_TYPE_INFO)
@staticmethod
def CHAR_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.CHAR_TYPE_INFO)
@staticmethod
def BIG_INT_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.BIG_INT_TYPE_INFO)
@staticmethod
def BIG_DEC_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo.BIG_DEC_TYPE_INFO)
class SqlTimeTypeInfo(TypeInformation, ABC):
"""
SqlTimeTypeInfo enables users to get Sql Time TypeInfo.
"""
@staticmethod
def DATE():
return DateTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.SqlTimeTypeInfo.DATE)
@staticmethod
def TIME():
return TimeTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.SqlTimeTypeInfo.TIME)
@staticmethod
def TIMESTAMP():
return TimestampTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.SqlTimeTypeInfo.TIMESTAMP)
class PrimitiveArrayTypeInfo(WrapperTypeInfo, ABC):
"""
A TypeInformation for arrays of primitive types (int, long, double, ...).
Supports the creation of dedicated efficient serializers for these types.
"""
@staticmethod
def BOOLEAN_PRIMITIVE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.PrimitiveArrayTypeInfo.BOOLEAN_PRIMITIVE_ARRAY_TYPE_INFO)
@staticmethod
def BYTE_PRIMITIVE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO)
@staticmethod
def SHORT_PRIMITIVE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.PrimitiveArrayTypeInfo.SHORT_PRIMITIVE_ARRAY_TYPE_INFO)
@staticmethod
def INT_PRIMITIVE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.PrimitiveArrayTypeInfo.INT_PRIMITIVE_ARRAY_TYPE_INFO)
@staticmethod
def LONG_PRIMITIVE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.PrimitiveArrayTypeInfo.LONG_PRIMITIVE_ARRAY_TYPE_INFO)
@staticmethod
def FLOAT_PRIMITIVE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.PrimitiveArrayTypeInfo.FLOAT_PRIMITIVE_ARRAY_TYPE_INFO)
@staticmethod
def DOUBLE_PRIMITIVE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.PrimitiveArrayTypeInfo.DOUBLE_PRIMITIVE_ARRAY_TYPE_INFO)
@staticmethod
def CHAR_PRIMITIVE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.PrimitiveArrayTypeInfo.CHAR_PRIMITIVE_ARRAY_TYPE_INFO)
def is_primitive_array_type_info(type_info: TypeInformation):
return type_info in {
PrimitiveArrayTypeInfo.BOOLEAN_PRIMITIVE_ARRAY_TYPE_INFO(),
PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO(),
PrimitiveArrayTypeInfo.SHORT_PRIMITIVE_ARRAY_TYPE_INFO(),
PrimitiveArrayTypeInfo.INT_PRIMITIVE_ARRAY_TYPE_INFO(),
PrimitiveArrayTypeInfo.LONG_PRIMITIVE_ARRAY_TYPE_INFO(),
PrimitiveArrayTypeInfo.FLOAT_PRIMITIVE_ARRAY_TYPE_INFO(),
PrimitiveArrayTypeInfo.DOUBLE_PRIMITIVE_ARRAY_TYPE_INFO(),
PrimitiveArrayTypeInfo.CHAR_PRIMITIVE_ARRAY_TYPE_INFO()
}
class BasicArrayTypeInfo(WrapperTypeInfo, ABC):
"""
A TypeInformation for arrays of boxed primitive types (Integer, Long, Double, ...).
Supports the creation of dedicated efficient serializers for these types.
"""
@staticmethod
def BOOLEAN_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.BasicArrayTypeInfo.BOOLEAN_ARRAY_TYPE_INFO)
@staticmethod
def BYTE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.BasicArrayTypeInfo.BYTE_ARRAY_TYPE_INFO)
@staticmethod
def SHORT_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.BasicArrayTypeInfo.SHORT_ARRAY_TYPE_INFO)
@staticmethod
def INT_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.BasicArrayTypeInfo.INT_ARRAY_TYPE_INFO)
@staticmethod
def LONG_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.BasicArrayTypeInfo.LONG_ARRAY_TYPE_INFO)
@staticmethod
def FLOAT_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.BasicArrayTypeInfo.FLOAT_ARRAY_TYPE_INFO)
@staticmethod
def DOUBLE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.BasicArrayTypeInfo.DOUBLE_ARRAY_TYPE_INFO)
@staticmethod
def CHAR_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.BasicArrayTypeInfo.CHAR_ARRAY_TYPE_INFO)
@staticmethod
def STRING_ARRAY_TYPE_INFO():
return WrapperTypeInfo(
get_gateway().jvm.org.apache.flink.api.common.typeinfo
.BasicArrayTypeInfo.STRING_ARRAY_TYPE_INFO)
def is_basic_array_type_info(type_info: TypeInformation):
return type_info in {
BasicArrayTypeInfo.BOOLEAN_ARRAY_TYPE_INFO(),
BasicArrayTypeInfo.BYTE_ARRAY_TYPE_INFO(),
BasicArrayTypeInfo.SHORT_ARRAY_TYPE_INFO(),
BasicArrayTypeInfo.INT_ARRAY_TYPE_INFO(),
BasicArrayTypeInfo.LONG_ARRAY_TYPE_INFO(),
BasicArrayTypeInfo.FLOAT_ARRAY_TYPE_INFO(),
BasicArrayTypeInfo.DOUBLE_ARRAY_TYPE_INFO(),
BasicArrayTypeInfo.CHAR_ARRAY_TYPE_INFO(),
BasicArrayTypeInfo.STRING_ARRAY_TYPE_INFO()
}
class PickledBytesTypeInfo(WrapperTypeInfo, ABC):
"""
A PickledBytesTypeInfo indicates the data is a primitive byte array generated by pickle
serializer.
"""
@staticmethod
def PICKLED_BYTE_ARRAY_TYPE_INFO():
return WrapperTypeInfo(get_gateway().jvm.org.apache.flink.streaming.api.typeinfo.python
.PickledByteArrayTypeInfo.PICKLED_BYTE_ARRAY_TYPE_INFO)
class RowTypeInfo(WrapperTypeInfo):
"""
TypeInformation for Row.
"""
def __init__(self, types: List[TypeInformation], field_names: List[str] = None):
self.types = types
self.field_names = field_names
self.j_types_array = get_gateway().new_array(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.TypeInformation, len(types))
for i in range(len(types)):
wrapper_typeinfo = types[i]
if isinstance(wrapper_typeinfo, WrapperTypeInfo):
self.j_types_array[i] = wrapper_typeinfo.get_java_type_info()
if field_names is None:
self._j_typeinfo = get_gateway().jvm.org.apache.flink.api.java.typeutils.RowTypeInfo(
self.j_types_array)
else:
j_names_array = get_gateway().new_array(get_gateway().jvm.java.lang.String,
len(field_names))
for i in range(len(field_names)):
j_names_array[i] = field_names[i]
self._j_typeinfo = get_gateway().jvm.org.apache.flink.api.java.typeutils.RowTypeInfo(
self.j_types_array, j_names_array)
self._need_conversion = [f.need_conversion() if isinstance(f, WrapperTypeInfo) else None
for f in types]
self._need_serialize_any_field = any(self._need_conversion)
super(RowTypeInfo, self).__init__(self._j_typeinfo)
def get_field_names(self) -> List[str]:
j_field_names = self._j_typeinfo.getFieldNames()
field_names = [name for name in j_field_names]
return field_names
def get_field_index(self, field_name: str) -> int:
return self._j_typeinfo.getFieldIndex(field_name)
def get_field_types(self) -> List[TypeInformation]:
return self.types
def __eq__(self, other) -> bool:
return self._j_typeinfo.equals(other._j_typeinfo)
def __hash__(self) -> int:
return self._j_typeinfo.hashCode()
def __str__(self) -> str:
return "RowTypeInfo(%s)" % ', '.join([field_name + ': ' + field_type.__str__()
for field_name, field_type in
zip(self.get_field_names(),
self.get_field_types())])
def need_conversion(self):
return True
def to_internal_type(self, obj):
if obj is None:
return
if self._need_serialize_any_field:
# Only calling to_internal_type function for fields that need conversion
if isinstance(obj, dict):
return tuple(f.to_internal_type(obj.get(n)) if c else obj.get(n)
for n, f, c in zip(self._j_typeinfo.getFieldNames(), self.types,
self._need_conversion))
elif isinstance(obj, (tuple, list)):
return tuple(f.to_internal_type(v) if c else v
for f, v, c in zip(self.types, obj, self._need_conversion))
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return tuple(f.to_internal_type(d.get(n)) if c else d.get(n)
for n, f, c in zip(self._j_typeinfo.getFieldNames(), self.types,
self._need_conversion))
else:
raise ValueError("Unexpected tuple %r with RowTypeInfo" % obj)
else:
if isinstance(obj, dict):
return tuple(obj.get(n) for n in self._j_typeinfo.getFieldNames())
elif isinstance(obj, (list, tuple)):
return tuple(obj)
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return tuple(d.get(n) for n in self._j_typeinfo.getFieldNames())
else:
raise ValueError("Unexpected tuple %r with RowTypeInfo" % obj)
def from_internal_type(self, obj):
if obj is None:
return
if isinstance(obj, (tuple, list)):
# it's already converted by pickler
return obj
if self._need_serialize_any_field:
# Only calling from_internal_type function for fields that need conversion
values = [f.from_internal_type(v) if c else v
for f, v, c in zip(self.types, obj, self._need_conversion)]
else:
values = obj
return tuple(values)
class TupleTypeInfo(WrapperTypeInfo):
"""
TypeInformation for Tuple.
"""
def __init__(self, types: List[TypeInformation]):
self.types = types
j_types_array = get_gateway().new_array(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.TypeInformation, len(types))
for i in range(len(types)):
field_type = types[i]
if isinstance(field_type, WrapperTypeInfo):
j_types_array[i] = field_type.get_java_type_info()
j_typeinfo = get_gateway().jvm \
.org.apache.flink.api.java.typeutils.TupleTypeInfo(j_types_array)
super(TupleTypeInfo, self).__init__(j_typeinfo=j_typeinfo)
def get_field_types(self) -> List[TypeInformation]:
return self.types
def __eq__(self, other) -> bool:
return self._j_typeinfo.equals(other._j_typeinfo)
def __hash__(self) -> int:
return self._j_typeinfo.hashCode()
def __str__(self) -> str:
return "TupleTypeInfo(%s)" % ', '.join([field_type.__str__() for field_type in self.types])
class DateTypeInfo(WrapperTypeInfo):
"""
TypeInformation for Date.
"""
def __init__(self, j_typeinfo):
super(DateTypeInfo, self).__init__(j_typeinfo)
EPOCH_ORDINAL = datetime.datetime(1970, 1, 1).toordinal()
def need_conversion(self):
return True
def to_internal_type(self, d):
if d is not None:
return d.toordinal() - self.EPOCH_ORDINAL
def from_internal_type(self, v):
if v is not None:
return datetime.date.fromordinal(v + self.EPOCH_ORDINAL)
class TimeTypeInfo(WrapperTypeInfo):
"""
TypeInformation for Time.
"""
EPOCH_ORDINAL = calendar.timegm(time.localtime(0)) * 10 ** 6
def __init__(self, j_typeinfo):
super(TimeTypeInfo, self).__init__(j_typeinfo)
def need_conversion(self):
return True
def to_internal_type(self, t):
if t is not None:
if t.tzinfo is not None:
offset = t.utcoffset()
offset = offset if offset else datetime.timedelta()
offset_microseconds =\
(offset.days * 86400 + offset.seconds) * 10 ** 6 + offset.microseconds
else:
offset_microseconds = self.EPOCH_ORDINAL
minutes = t.hour * 60 + t.minute
seconds = minutes * 60 + t.second
return seconds * 10 ** 6 + t.microsecond - offset_microseconds
def from_internal_type(self, t):
if t is not None:
seconds, microseconds = divmod(t + self.EPOCH_ORDINAL, 10 ** 6)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return datetime.time(hours, minutes, seconds, microseconds)
class TimestampTypeInfo(WrapperTypeInfo):
"""
TypeInformation for Timestamp.
"""
def __init__(self, j_typeinfo):
super(TimestampTypeInfo, self).__init__(j_typeinfo)
def need_conversion(self):
return True
def to_internal_type(self, dt):
if dt is not None:
seconds = (calendar.timegm(dt.utctimetuple()) if dt.tzinfo
else time.mktime(dt.timetuple()))
return int(seconds) * 10 ** 6 + dt.microsecond
def from_internal_type(self, ts):
if ts is not None:
return datetime.datetime.fromtimestamp(ts // 10 ** 6).replace(microsecond=ts % 10 ** 6)
class Types(object):
"""
This class gives access to the type information of the most common types for which Flink has
built-in serializers and comparators.
"""
STRING = BasicTypeInfo.STRING_TYPE_INFO
BYTE = BasicTypeInfo.BYTE_TYPE_INFO
BOOLEAN = BasicTypeInfo.BOOLEAN_TYPE_INFO
SHORT = BasicTypeInfo.SHORT_TYPE_INFO
INT = BasicTypeInfo.INT_TYPE_INFO
LONG = BasicTypeInfo.LONG_TYPE_INFO
FLOAT = BasicTypeInfo.FLOAT_TYPE_INFO
DOUBLE = BasicTypeInfo.DOUBLE_TYPE_INFO
CHAR = BasicTypeInfo.CHAR_TYPE_INFO
BIG_INT = BasicTypeInfo.BIG_INT_TYPE_INFO
BIG_DEC = BasicTypeInfo.BIG_DEC_TYPE_INFO
SQL_DATE = SqlTimeTypeInfo.DATE
SQL_TIME = SqlTimeTypeInfo.TIME
SQL_TIMESTAMP = SqlTimeTypeInfo.TIMESTAMP
PICKLED_BYTE_ARRAY = PickledBytesTypeInfo.PICKLED_BYTE_ARRAY_TYPE_INFO
@staticmethod
def ROW(types: List[TypeInformation]):
"""
Returns type information for Row with fields of the given types. A row itself must not be
null.
:param types: the types of the row fields, e.g., Types.String(), Types.INT()
"""
return RowTypeInfo(types)
@staticmethod
def ROW_NAMED(names: List[str], types: List[TypeInformation]):
"""
Returns type information for Row with fields of the given types and with given names. A row
must not be null.
:param names: array of field names.
:param types: array of field types.
"""
return RowTypeInfo(types, names)
@staticmethod
def TUPLE(types: List[TypeInformation]):
"""
Returns type information for Tuple with fields of the given types. A Tuple itself must not
be null.
:param types: array of field types.
"""
return TupleTypeInfo(types)
@staticmethod
def PRIMITIVE_ARRAY(element_type: TypeInformation):
"""
Returns type information for arrays of primitive type (such as byte[]). The array must not
be null.
:param element_type element type of the array (e.g. Types.BOOLEAN(), Types.INT(),
Types.DOUBLE())
"""
if element_type == Types.BOOLEAN():
return PrimitiveArrayTypeInfo.BOOLEAN_PRIMITIVE_ARRAY_TYPE_INFO()
elif element_type == Types.BYTE():
return PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO()
elif element_type == Types.SHORT():
return PrimitiveArrayTypeInfo.SHORT_PRIMITIVE_ARRAY_TYPE_INFO()
elif element_type == Types.INT():
return PrimitiveArrayTypeInfo.INT_PRIMITIVE_ARRAY_TYPE_INFO()
elif element_type == Types.LONG():
return PrimitiveArrayTypeInfo.LONG_PRIMITIVE_ARRAY_TYPE_INFO()
elif element_type == Types.FLOAT():
return PrimitiveArrayTypeInfo.FLOAT_PRIMITIVE_ARRAY_TYPE_INFO()
elif element_type == Types.DOUBLE():
return PrimitiveArrayTypeInfo.DOUBLE_PRIMITIVE_ARRAY_TYPE_INFO()
elif element_type == Types.CHAR():
return PrimitiveArrayTypeInfo.CHAR_PRIMITIVE_ARRAY_TYPE_INFO()
else:
raise TypeError("Invalid element type for a primitive array.")
@staticmethod
def BASIC_ARRAY(element_type: TypeInformation) -> TypeInformation:
"""
Returns type information for arrays of boxed primitive type (such as Integer[]).
:param element_type element type of the array (e.g. Types.BOOLEAN(), Types.INT(),
Types.DOUBLE())
"""
if element_type == Types.BOOLEAN():
return BasicArrayTypeInfo.BOOLEAN_ARRAY_TYPE_INFO()
elif element_type == Types.BYTE():
return BasicArrayTypeInfo.BYTE_ARRAY_TYPE_INFO()
elif element_type == Types.SHORT():
return BasicArrayTypeInfo.SHORT_ARRAY_TYPE_INFO()
elif element_type == Types.INT():
return BasicArrayTypeInfo.INT_ARRAY_TYPE_INFO()
elif element_type == Types.LONG():
return BasicArrayTypeInfo.LONG_ARRAY_TYPE_INFO()
elif element_type == Types.FLOAT():
return BasicArrayTypeInfo.FLOAT_ARRAY_TYPE_INFO()
elif element_type == Types.DOUBLE():
return BasicArrayTypeInfo.DOUBLE_ARRAY_TYPE_INFO()
elif element_type == Types.CHAR():
return BasicArrayTypeInfo.CHAR_ARRAY_TYPE_INFO()
elif element_type == Types.STRING():
return BasicArrayTypeInfo.STRING_ARRAY_TYPE_INFO()
else:
raise TypeError("Invalid element type for a boxed primitive array: %s" %
str(element_type))
def _from_java_type(j_type_info: JavaObject) -> TypeInformation:
gateway = get_gateway()
JBasicTypeInfo = gateway.jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo
if _is_instance_of(j_type_info, JBasicTypeInfo.STRING_TYPE_INFO):
return Types.STRING()
elif _is_instance_of(j_type_info, JBasicTypeInfo.BOOLEAN_TYPE_INFO):
return Types.BOOLEAN()
elif _is_instance_of(j_type_info, JBasicTypeInfo.BYTE_TYPE_INFO):
return Types.BYTE()
elif _is_instance_of(j_type_info, JBasicTypeInfo.SHORT_TYPE_INFO):
return Types.SHORT()
elif _is_instance_of(j_type_info, JBasicTypeInfo.INT_TYPE_INFO):
return Types.INT()
elif _is_instance_of(j_type_info, JBasicTypeInfo.LONG_TYPE_INFO):
return Types.LONG()
elif _is_instance_of(j_type_info, JBasicTypeInfo.FLOAT_TYPE_INFO):
return Types.FLOAT()
elif _is_instance_of(j_type_info, JBasicTypeInfo.DOUBLE_TYPE_INFO):
return Types.DOUBLE()
elif _is_instance_of(j_type_info, JBasicTypeInfo.CHAR_TYPE_INFO):
return Types.CHAR()
elif _is_instance_of(j_type_info, JBasicTypeInfo.BIG_INT_TYPE_INFO):
return Types.BIG_INT()
elif _is_instance_of(j_type_info, JBasicTypeInfo.BIG_DEC_TYPE_INFO):
return Types.BIG_DEC()
JSqlTimeTypeInfo = gateway.jvm.org.apache.flink.api.common.typeinfo.SqlTimeTypeInfo
if _is_instance_of(j_type_info, JSqlTimeTypeInfo.DATE):
return Types.SQL_DATE()
elif _is_instance_of(j_type_info, JSqlTimeTypeInfo.TIME):
return Types.SQL_TIME()
elif _is_instance_of(j_type_info, JSqlTimeTypeInfo.TIMESTAMP):
return Types.SQL_TIMESTAMP()
JPrimitiveArrayTypeInfo = gateway.jvm.org.apache.flink.api.common.typeinfo \
.PrimitiveArrayTypeInfo
if _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.BOOLEAN_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.BOOLEAN())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.BYTE())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.SHORT_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.SHORT())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.INT_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.INT())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.LONG_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.LONG())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.FLOAT_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.FLOAT())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.DOUBLE_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.DOUBLE())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.CHAR_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.CHAR())
JBasicArrayTypeInfo = gateway.jvm.org.apache.flink.api.common.typeinfo \
.BasicArrayTypeInfo
if _is_instance_of(j_type_info, JBasicArrayTypeInfo.BOOLEAN_ARRAY_TYPE_INFO):
return Types.BASIC_ARRAY(Types.BOOLEAN())
elif _is_instance_of(j_type_info, JBasicArrayTypeInfo.BYTE_ARRAY_TYPE_INFO):
return Types.BASIC_ARRAY(Types.BYTE())
elif _is_instance_of(j_type_info, JBasicArrayTypeInfo.SHORT_ARRAY_TYPE_INFO):
return Types.BASIC_ARRAY(Types.SHORT())
elif _is_instance_of(j_type_info, JBasicArrayTypeInfo.INT_ARRAY_TYPE_INFO):
return Types.BASIC_ARRAY(Types.INT())
elif _is_instance_of(j_type_info, JBasicArrayTypeInfo.LONG_ARRAY_TYPE_INFO):
return Types.BASIC_ARRAY(Types.LONG())
elif _is_instance_of(j_type_info, JBasicArrayTypeInfo.FLOAT_ARRAY_TYPE_INFO):
return Types.BASIC_ARRAY(Types.FLOAT())
elif _is_instance_of(j_type_info, JBasicArrayTypeInfo.DOUBLE_ARRAY_TYPE_INFO):
return Types.BASIC_ARRAY(Types.DOUBLE())
elif _is_instance_of(j_type_info, JBasicArrayTypeInfo.CHAR_ARRAY_TYPE_INFO):
return Types.BASIC_ARRAY(Types.CHAR())
elif _is_instance_of(j_type_info, JBasicArrayTypeInfo.STRING_ARRAY_TYPE_INFO):
return Types.BASIC_ARRAY(Types.STRING())
JPickledBytesTypeInfo = gateway.jvm \
.org.apache.flink.streaming.api.typeinfo.python.PickledByteArrayTypeInfo\
.PICKLED_BYTE_ARRAY_TYPE_INFO
if _is_instance_of(j_type_info, JPickledBytesTypeInfo):
return Types.PICKLED_BYTE_ARRAY()
JRowTypeInfo = gateway.jvm.org.apache.flink.api.java.typeutils.RowTypeInfo
if _is_instance_of(j_type_info, JRowTypeInfo):
j_row_field_names = j_type_info.getFieldNames()
j_row_field_types = j_type_info.getFieldTypes()
row_field_types = [_from_java_type(j_row_field_type) for j_row_field_type in
j_row_field_types]
return Types.ROW_NAMED(j_row_field_names, row_field_types)
JTupleTypeInfo = gateway.jvm.org.apache.flink.api.java.typeutils.TupleTypeInfo
if _is_instance_of(j_type_info, JTupleTypeInfo):
j_field_types = []
for i in range(j_type_info.getArity()):
j_field_types.append(j_type_info.getTypeAt(i))
field_types = [_from_java_type(j_field_type) for j_field_type in j_field_types]
return TupleTypeInfo(field_types)
raise TypeError("The java type info: %s is not supported in PyFlink currently." % j_type_info)
def _is_instance_of(java_object: JavaObject, java_type: Union[JavaObject, JavaClass]) -> bool:
if isinstance(java_type, JavaObject):
return java_object.equals(java_type)
elif isinstance(java_type, JavaClass):
return java_object.getClass().isAssignableFrom(java_type._java_lang_class)
return False
| apache-2.0 | 6,553,027,169,598,011,000 | 38.80915 | 100 | 0.645958 | false |
timm/timmnix | pypy3-v5.5.0-linux64/lib_pypy/identity_dict.py | 1 | 1502 | try:
from __pypy__ import identity_dict as idict
except ImportError:
idict = None
from UserDict import DictMixin
class IdentityDictPurePython(object, DictMixin):
__slots__ = "_dict _keys".split()
def __init__(self):
self._dict = {}
self._keys = {} # id(obj) -> obj
def __getitem__(self, arg):
return self._dict[id(arg)]
def __setitem__(self, arg, val):
self._keys[id(arg)] = arg
self._dict[id(arg)] = val
def __delitem__(self, arg):
del self._keys[id(arg)]
del self._dict[id(arg)]
def keys(self):
return self._keys.values()
def __contains__(self, arg):
return id(arg) in self._dict
def copy(self):
d = type(self)()
d.update(self.items())
assert len(d) == len(self)
return d
class IdentityDictPyPy(object, DictMixin):
__slots__ = ["_dict"]
def __init__(self):
self._dict = idict()
def __getitem__(self, arg):
return self._dict[arg]
def __setitem__(self, arg, val):
self._dict[arg] = val
def __delitem__(self, arg):
del self._dict[arg]
def keys(self):
return self._dict.keys()
def __contains__(self, arg):
return arg in self._dict
def copy(self):
d = type(self)()
d.update(self.items())
assert len(d) == len(self)
return d
if idict is None:
identity_dict = IdentityDictPurePython
else:
identity_dict = IdentityDictPyPy
| mit | 1,525,193,313,299,822,000 | 20.15493 | 48 | 0.548602 | false |
thekatiebr/NEMO_II | Classifiers/ML_Controller.py | 1 | 3672 | from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
import pandas
from pandas import DataFrame
import pandas.io.sql as psql
import KnowledgeBase
import NeuralNetworkController
import DecisionTreeController
import RandomForestController
import SVMController
import random
##############################################################################################################
# ML-Controller class #
# SKLearn interface for NEMO #
##############################################################################################################
# ***** INSTANCE VARIABLES***** #
# data - attributes as retrieved from the DATA table #
# target - classes as retrieved from the the DATA table #
# kb - instance of knowledge base object #
##############################################################################################################
class ML_Controller:
#Constructor
#imports data from knowledge base
#Preconditions:
# * A knowledge base has been set up and read data
#Postconditions:
# * Data will be imported from the
def __init__(self, kb, algorithm_type):
cols = ",".join(kb.X)
stmt = "select " + cols + " from DATA;"
#print stmt
self.data = pandas.read_sql_query(stmt, kb.db)
#print self.data
#print "data length = " + str(len(self.data))
stmt = "select " + kb.Y + " from DATA"
#print stmt
self.target = pandas.read_sql_query(stmt, kb.db)
#print self.target
#print "target length = " + str(len(self.target))
self.kb = kb
self.isCurrentlyOptimizing = False #True when model is in optimization queue, false otherwise
self.algorithm = None
#print algorithm_type
if algorithm_type == "Neural Network":
self.algorithm = NeuralNetworkController.NeuralNetworkController(self.kb)
if algorithm_type == "Decision Tree":
self.algorithm = DecisionTreeController.DecisionTreeController(self.kb)
if algorithm_type == 'SVM':
self.algorithm = SVMController.SVMController(self.kb)
if algorithm_type == "Random Forest":
self.algorithm = RandomForestController.RandomForestController(self.kb)
def get_params(self):
return self.algorithm.get_params()
def createModel(self, id=None):
if id is None:
self.algorithm.createModel(self.data, self.target)
else:
self.algorithm.createModelFromID(self.data, self.target, id)
def createModelPreSplit(self, xtrain, xtest, ytrain, ytest, attributes=None):
if attributes is None and self.algorithm.isModelCreated():
attributes = self.get_params()
self.algorithm.createModelPreSplit(xtrain, xtest, ytrain, ytest, attributes)
def copyModel(self, id):
self.algorithm.copyModel(self.data, self.target, id)
def fit(self, x, y):
self.algorithm.fit(x,y)
def predict(self, x):
return self.algorithm.predict(x)
def runAlgorithm(self, x = None, y = None):
results = self.algorithm.runModel(self.kb.multi, x, y)
#self.kb.updateDatabaseWithResults(self.algorithm)
return results
def updateDatabaseWithResults(self):
self.kb.updateDatabaseWithResults(self.algorithm)
def getName(self):
return self.algorithm.algorithm_name
def getID(self):
return self.algorithm.algorithm_id
def optimizeAlgorithm(self):
curr_id = self.algorithm.algorithm_id
self.algorithm = self.algorithm.optimize('Accuracy', 'Coordinate Ascent')
self.algorithm.algorithm_id = curr_id
self.algorithm.results['ID'] = curr_id
self.kb.updateDatabaseWithResults(self.algorithm)
#self.kb.removeModelFromRepository(self.algorithm)
self.kb.updateDatabaseWithModel(self.algorithm) | apache-2.0 | 4,953,417,685,003,964,000 | 36.479592 | 110 | 0.656046 | false |
foospidy/DbDat | plugins/db2/__init__.py | 1 | 1889 | from check_information_version import *
from check_information_db2system import *
from check_configuration_audit_buffer import *
from check_configuration_authentication_mechanism import *
from check_configuration_catalog_noauth import *
from check_configuration_datalinks import *
from check_configuration_dftdbpath import *
from check_configuration_diaglevel import *
from check_configuration_diagpath import *
from check_configuration_discover import *
from check_configuration_discover_inst import *
from check_configuration_discover_db import *
from check_configuration_fed_noauth import *
from check_configuration_health_mon import *
from check_configuration_keepfenced import *
from check_configuration_notifylevel import *
from check_configuration_srvcon_auth import *
from check_configuration_archretrydelay import *
from check_configuration_numarchretry import *
from check_configuration_logarchmeth import *
from check_configuration_failarchpath import *
from check_configuration_num_db_backups import *
from check_configuration_autorestart import *
from check_configuration_mirrorlogpath import *
from check_configuration_dasadm_group import *
from check_configuration_exec_exp_task import *
from check_configuration_sched_enable import *
from check_configuration_max_connection_limits import *
from check_configuration_auto_maint import *
from check_privilege_syscat_views import *
from check_privilege_tablespaces import *
from check_privilege_group_entitlements import *
from check_privilege_secadm import *
from check_privilege_dbadm import *
from check_privilege_createtab import *
from check_privilege_bindadd import *
from check_privilege_connect import *
from check_privilege_nofence import *
from check_privilege_implschema import *
from check_privilege_load import *
from check_privilege_external_routine import *
from check_privilege_external_quiesceconnect import *
| gpl-2.0 | 3,025,238,743,346,959,400 | 43.97619 | 58 | 0.833245 | false |
Florianboux/zds-site | zds/utils/templatetags/interventions.py | 1 | 6597 | # coding: utf-8
from datetime import datetime, timedelta
import time
from django import template
from django.db.models import Q, F
from zds.article.models import Reaction, ArticleRead
from zds.forum.models import TopicFollowed, never_read as never_read_topic, Post, TopicRead
from zds.mp.models import PrivateTopic, PrivateTopicRead
from zds.tutorial.models import Note, TutorialRead
from zds.utils.models import Alert
register = template.Library()
@register.filter('is_read')
def is_read(topic):
if never_read_topic(topic):
return False
else:
return True
@register.filter('humane_delta')
def humane_delta(value):
# mapping between label day and key
const = {1: "Aujourd'hui", 2: "Hier", 3: "Cette semaine", 4: "Ce mois-ci", 5: "Cette année"}
return const[value]
@register.filter('followed_topics')
def followed_topics(user):
topicsfollowed = TopicFollowed.objects.select_related("topic").filter(user=user)\
.order_by('-topic__last_message__pubdate')[:10]
# This period is a map for link a moment (Today, yesterday, this week, this month, etc.) with
# the number of days for which we can say we're still in the period
# for exemple, the tuple (2, 1) means for the period "2" corresponding to "Yesterday" according
# to humane_delta, means if your pubdate hasn't exceeded one day, we are always at "Yesterday"
# Number is use for index for sort map easily
period = ((1, 0), (2, 1), (3, 7), (4, 30), (5, 360))
topics = {}
for tf in topicsfollowed:
for p in period:
if tf.topic.last_message.pubdate.date() >= (datetime.now() - timedelta(days=int(p[1]),
hours=0, minutes=0,
seconds=0)).date():
if p[0] in topics:
topics[p[0]].append(tf.topic)
else:
topics[p[0]] = [tf.topic]
break
return topics
def comp(d1, d2):
v1 = int(time.mktime(d1['pubdate'].timetuple()))
v2 = int(time.mktime(d2['pubdate'].timetuple()))
if v1 > v2:
return -1
elif v1 < v2:
return 1
else:
return 0
@register.filter('interventions_topics')
def interventions_topics(user):
topicsfollowed = TopicFollowed.objects.filter(user=user).values("topic").distinct().all()
topics_never_read = TopicRead.objects\
.filter(user=user)\
.filter(topic__in=topicsfollowed)\
.select_related("topic")\
.exclude(post=F('topic__last_message'))
articlesfollowed = Reaction.objects\
.filter(author=user)\
.values('article')\
.distinct().all()
articles_never_read = ArticleRead.objects\
.filter(user=user)\
.filter(article__in=articlesfollowed)\
.select_related("article")\
.exclude(reaction=F('article__last_reaction'))
tutorialsfollowed = Note.objects\
.filter(author=user)\
.values('tutorial')\
.distinct().all()
tutorials_never_read = TutorialRead.objects\
.filter(user=user)\
.filter(tutorial__in=tutorialsfollowed)\
.exclude(note=F('tutorial__last_note'))
posts_unread = []
for art in articles_never_read:
content = art.article.first_unread_reaction()
posts_unread.append({'pubdate': content.pubdate,
'author': content.author,
'title': art.article.title,
'url': content.get_absolute_url()})
for tuto in tutorials_never_read:
content = tuto.tutorial.first_unread_note()
posts_unread.append({'pubdate': content.pubdate,
'author': content.author,
'title': tuto.tutorial.title,
'url': content.get_absolute_url()})
for top in topics_never_read:
content = top.topic.first_unread_post()
if content is None:
content = top.topic.last_message
posts_unread.append({'pubdate': content.pubdate,
'author': content.author,
'title': top.topic.title,
'url': content.get_absolute_url()})
posts_unread.sort(cmp=comp)
return posts_unread
@register.filter('interventions_privatetopics')
def interventions_privatetopics(user):
topics_never_read = list(PrivateTopicRead.objects
.filter(user=user)
.filter(privatepost=F('privatetopic__last_message')).all())
tnrs = []
for tnr in topics_never_read:
tnrs.append(tnr.privatetopic.pk)
privatetopics_unread = PrivateTopic.objects\
.filter(Q(author=user) | Q(participants__in=[user]))\
.exclude(pk__in=tnrs)\
.select_related("privatetopic")\
.order_by("-pubdate")\
.distinct()
return {'unread': privatetopics_unread}
@register.filter(name='alerts_list')
def alerts_list(user):
total = []
alerts = Alert.objects.select_related("author").all().order_by('-pubdate')[:10]
for alert in alerts:
if alert.scope == Alert.FORUM:
post = Post.objects.select_related("topic").get(pk=alert.comment.pk)
total.append({'title': post.topic.title,
'url': post.get_absolute_url(),
'pubdate': post.pubdate,
'author': alert.author,
'text': alert.text})
if alert.scope == Alert.ARTICLE:
reaction = Reaction.objects.select_related("article").get(pk=alert.comment.pk)
total.append({'title': reaction.article.title,
'url': reaction.get_absolute_url(),
'pubdate': reaction.pubdate,
'author': alert.author,
'text': alert.text})
if alert.scope == Alert.TUTORIAL:
note = Note.objects.select_related("tutorial").get(pk=alert.comment.pk)
total.append({'title': note.tutorial.title,
'url': note.get_absolute_url(),
'pubdate': note.pubdate,
'author': alert.author,
'text': alert.text})
return total
@register.filter(name='alerts_count')
def alerts_count(user):
if user.is_authenticated():
return Alert.objects.count()
else:
return 0
| gpl-3.0 | -6,155,662,566,906,783,000 | 34.272727 | 102 | 0.568526 | false |
guyuanlin/try-talk | trytalk/settings/local.py | 1 | 1831 | from default import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Using SpatialLite backend
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'trytalk',
'USER': 'test',
'PASSWORD': 'test',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# settings for request application
REQUEST_IGNORE_PATHS = (
r'^admin/',
)
REQUEST_ONLY_ERRORS = True
REQUEST_HEAD_FIELDS = [
'HTTP_AUTHORIZATION',
'CONTENT_TYPE',
]
# mobile_notifications settings
APNS_USE_SANDBOX = False
APNS_CERT_PEM = 'mobile_notifications/certificates/apns-production-cert.pem'
APNS_KEY_PEM = 'mobile_notifications/certificates/apns-production-key-noenc.pem'
# LOGGING CONFIGURATION
# A logging configuration that writes log messages to the console.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
# Formatting of messages.
'formatters': {
# Don't need to show the time when logging to console.
'console': {
'format': '%(levelname)s %(name)s.%(funcName)s (%(lineno)d) %(message)s'
}
},
# The handlers decide what we should do with a logging message - do we email
# it, ditch it, or write it to a file?
'handlers': {
# Writing to console. Use only in dev.
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'console'
},
# Send logs to /dev/null.
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
},
# Loggers decide what is logged.
'loggers': {
'': {
# Default (suitable for dev) is to log to console.
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
# logging of SQL statements. Default is to ditch them (send them to
# null). Note that this logger only works if DEBUG = True.
'django.db.backends': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': False,
},
}
} | apache-2.0 | 4,495,261,597,991,617,500 | 22.792208 | 80 | 0.652649 | false |
randyzingle/tools | kub/services/archive/cdk/python/sample-app/.env/lib/python3.6/site-packages/aws_cdk/aws_logs/__init__.py | 1 | 94375 | import abc
import datetime
import enum
import typing
import jsii
import jsii.compat
import publication
from jsii.python import classproperty
import aws_cdk.aws_cloudwatch
import aws_cdk.aws_iam
import aws_cdk.core
__jsii_assembly__ = jsii.JSIIAssembly.load("@aws-cdk/aws-logs", "1.15.0", __name__, "[email protected]")
class CfnDestination(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-logs.CfnDestination"):
"""A CloudFormation ``AWS::Logs::Destination``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-destination.html
cloudformationResource:
:cloudformationResource:: AWS::Logs::Destination
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, destination_name: str, destination_policy: str, role_arn: str, target_arn: str) -> None:
"""Create a new ``AWS::Logs::Destination``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param destination_name: ``AWS::Logs::Destination.DestinationName``.
:param destination_policy: ``AWS::Logs::Destination.DestinationPolicy``.
:param role_arn: ``AWS::Logs::Destination.RoleArn``.
:param target_arn: ``AWS::Logs::Destination.TargetArn``.
"""
props = CfnDestinationProps(destination_name=destination_name, destination_policy=destination_policy, role_arn=role_arn, target_arn=target_arn)
jsii.create(CfnDestination, self, [scope, id, props])
@jsii.member(jsii_name="renderProperties")
def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@property
@jsii.member(jsii_name="attrArn")
def attr_arn(self) -> str:
"""
cloudformationAttribute:
:cloudformationAttribute:: Arn
"""
return jsii.get(self, "attrArn")
@property
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[str,typing.Any]:
return jsii.get(self, "cfnProperties")
@property
@jsii.member(jsii_name="destinationName")
def destination_name(self) -> str:
"""``AWS::Logs::Destination.DestinationName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-destination.html#cfn-logs-destination-destinationname
"""
return jsii.get(self, "destinationName")
@destination_name.setter
def destination_name(self, value: str):
return jsii.set(self, "destinationName", value)
@property
@jsii.member(jsii_name="destinationPolicy")
def destination_policy(self) -> str:
"""``AWS::Logs::Destination.DestinationPolicy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-destination.html#cfn-logs-destination-destinationpolicy
"""
return jsii.get(self, "destinationPolicy")
@destination_policy.setter
def destination_policy(self, value: str):
return jsii.set(self, "destinationPolicy", value)
@property
@jsii.member(jsii_name="roleArn")
def role_arn(self) -> str:
"""``AWS::Logs::Destination.RoleArn``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-destination.html#cfn-logs-destination-rolearn
"""
return jsii.get(self, "roleArn")
@role_arn.setter
def role_arn(self, value: str):
return jsii.set(self, "roleArn", value)
@property
@jsii.member(jsii_name="targetArn")
def target_arn(self) -> str:
"""``AWS::Logs::Destination.TargetArn``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-destination.html#cfn-logs-destination-targetarn
"""
return jsii.get(self, "targetArn")
@target_arn.setter
def target_arn(self, value: str):
return jsii.set(self, "targetArn", value)
@jsii.data_type(jsii_type="@aws-cdk/aws-logs.CfnDestinationProps", jsii_struct_bases=[], name_mapping={'destination_name': 'destinationName', 'destination_policy': 'destinationPolicy', 'role_arn': 'roleArn', 'target_arn': 'targetArn'})
class CfnDestinationProps():
def __init__(self, *, destination_name: str, destination_policy: str, role_arn: str, target_arn: str):
"""Properties for defining a ``AWS::Logs::Destination``.
:param destination_name: ``AWS::Logs::Destination.DestinationName``.
:param destination_policy: ``AWS::Logs::Destination.DestinationPolicy``.
:param role_arn: ``AWS::Logs::Destination.RoleArn``.
:param target_arn: ``AWS::Logs::Destination.TargetArn``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-destination.html
"""
self._values = {
'destination_name': destination_name,
'destination_policy': destination_policy,
'role_arn': role_arn,
'target_arn': target_arn,
}
@property
def destination_name(self) -> str:
"""``AWS::Logs::Destination.DestinationName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-destination.html#cfn-logs-destination-destinationname
"""
return self._values.get('destination_name')
@property
def destination_policy(self) -> str:
"""``AWS::Logs::Destination.DestinationPolicy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-destination.html#cfn-logs-destination-destinationpolicy
"""
return self._values.get('destination_policy')
@property
def role_arn(self) -> str:
"""``AWS::Logs::Destination.RoleArn``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-destination.html#cfn-logs-destination-rolearn
"""
return self._values.get('role_arn')
@property
def target_arn(self) -> str:
"""``AWS::Logs::Destination.TargetArn``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-destination.html#cfn-logs-destination-targetarn
"""
return self._values.get('target_arn')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CfnDestinationProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class CfnLogGroup(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-logs.CfnLogGroup"):
"""A CloudFormation ``AWS::Logs::LogGroup``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-loggroup.html
cloudformationResource:
:cloudformationResource:: AWS::Logs::LogGroup
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, log_group_name: typing.Optional[str]=None, retention_in_days: typing.Optional[jsii.Number]=None) -> None:
"""Create a new ``AWS::Logs::LogGroup``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param log_group_name: ``AWS::Logs::LogGroup.LogGroupName``.
:param retention_in_days: ``AWS::Logs::LogGroup.RetentionInDays``.
"""
props = CfnLogGroupProps(log_group_name=log_group_name, retention_in_days=retention_in_days)
jsii.create(CfnLogGroup, self, [scope, id, props])
@jsii.member(jsii_name="renderProperties")
def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@property
@jsii.member(jsii_name="attrArn")
def attr_arn(self) -> str:
"""
cloudformationAttribute:
:cloudformationAttribute:: Arn
"""
return jsii.get(self, "attrArn")
@property
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[str,typing.Any]:
return jsii.get(self, "cfnProperties")
@property
@jsii.member(jsii_name="logGroupName")
def log_group_name(self) -> typing.Optional[str]:
"""``AWS::Logs::LogGroup.LogGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-loggroup.html#cfn-cwl-loggroup-loggroupname
"""
return jsii.get(self, "logGroupName")
@log_group_name.setter
def log_group_name(self, value: typing.Optional[str]):
return jsii.set(self, "logGroupName", value)
@property
@jsii.member(jsii_name="retentionInDays")
def retention_in_days(self) -> typing.Optional[jsii.Number]:
"""``AWS::Logs::LogGroup.RetentionInDays``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-loggroup.html#cfn-cwl-loggroup-retentionindays
"""
return jsii.get(self, "retentionInDays")
@retention_in_days.setter
def retention_in_days(self, value: typing.Optional[jsii.Number]):
return jsii.set(self, "retentionInDays", value)
@jsii.data_type(jsii_type="@aws-cdk/aws-logs.CfnLogGroupProps", jsii_struct_bases=[], name_mapping={'log_group_name': 'logGroupName', 'retention_in_days': 'retentionInDays'})
class CfnLogGroupProps():
def __init__(self, *, log_group_name: typing.Optional[str]=None, retention_in_days: typing.Optional[jsii.Number]=None):
"""Properties for defining a ``AWS::Logs::LogGroup``.
:param log_group_name: ``AWS::Logs::LogGroup.LogGroupName``.
:param retention_in_days: ``AWS::Logs::LogGroup.RetentionInDays``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-loggroup.html
"""
self._values = {
}
if log_group_name is not None: self._values["log_group_name"] = log_group_name
if retention_in_days is not None: self._values["retention_in_days"] = retention_in_days
@property
def log_group_name(self) -> typing.Optional[str]:
"""``AWS::Logs::LogGroup.LogGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-loggroup.html#cfn-cwl-loggroup-loggroupname
"""
return self._values.get('log_group_name')
@property
def retention_in_days(self) -> typing.Optional[jsii.Number]:
"""``AWS::Logs::LogGroup.RetentionInDays``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-loggroup.html#cfn-cwl-loggroup-retentionindays
"""
return self._values.get('retention_in_days')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CfnLogGroupProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class CfnLogStream(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-logs.CfnLogStream"):
"""A CloudFormation ``AWS::Logs::LogStream``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-logstream.html
cloudformationResource:
:cloudformationResource:: AWS::Logs::LogStream
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, log_group_name: str, log_stream_name: typing.Optional[str]=None) -> None:
"""Create a new ``AWS::Logs::LogStream``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param log_group_name: ``AWS::Logs::LogStream.LogGroupName``.
:param log_stream_name: ``AWS::Logs::LogStream.LogStreamName``.
"""
props = CfnLogStreamProps(log_group_name=log_group_name, log_stream_name=log_stream_name)
jsii.create(CfnLogStream, self, [scope, id, props])
@jsii.member(jsii_name="renderProperties")
def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@property
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[str,typing.Any]:
return jsii.get(self, "cfnProperties")
@property
@jsii.member(jsii_name="logGroupName")
def log_group_name(self) -> str:
"""``AWS::Logs::LogStream.LogGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-logstream.html#cfn-logs-logstream-loggroupname
"""
return jsii.get(self, "logGroupName")
@log_group_name.setter
def log_group_name(self, value: str):
return jsii.set(self, "logGroupName", value)
@property
@jsii.member(jsii_name="logStreamName")
def log_stream_name(self) -> typing.Optional[str]:
"""``AWS::Logs::LogStream.LogStreamName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-logstream.html#cfn-logs-logstream-logstreamname
"""
return jsii.get(self, "logStreamName")
@log_stream_name.setter
def log_stream_name(self, value: typing.Optional[str]):
return jsii.set(self, "logStreamName", value)
@jsii.data_type(jsii_type="@aws-cdk/aws-logs.CfnLogStreamProps", jsii_struct_bases=[], name_mapping={'log_group_name': 'logGroupName', 'log_stream_name': 'logStreamName'})
class CfnLogStreamProps():
def __init__(self, *, log_group_name: str, log_stream_name: typing.Optional[str]=None):
"""Properties for defining a ``AWS::Logs::LogStream``.
:param log_group_name: ``AWS::Logs::LogStream.LogGroupName``.
:param log_stream_name: ``AWS::Logs::LogStream.LogStreamName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-logstream.html
"""
self._values = {
'log_group_name': log_group_name,
}
if log_stream_name is not None: self._values["log_stream_name"] = log_stream_name
@property
def log_group_name(self) -> str:
"""``AWS::Logs::LogStream.LogGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-logstream.html#cfn-logs-logstream-loggroupname
"""
return self._values.get('log_group_name')
@property
def log_stream_name(self) -> typing.Optional[str]:
"""``AWS::Logs::LogStream.LogStreamName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-logstream.html#cfn-logs-logstream-logstreamname
"""
return self._values.get('log_stream_name')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CfnLogStreamProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class CfnMetricFilter(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-logs.CfnMetricFilter"):
"""A CloudFormation ``AWS::Logs::MetricFilter``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-metricfilter.html
cloudformationResource:
:cloudformationResource:: AWS::Logs::MetricFilter
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, filter_pattern: str, log_group_name: str, metric_transformations: typing.Union[aws_cdk.core.IResolvable, typing.List[typing.Union["MetricTransformationProperty", aws_cdk.core.IResolvable]]]) -> None:
"""Create a new ``AWS::Logs::MetricFilter``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param filter_pattern: ``AWS::Logs::MetricFilter.FilterPattern``.
:param log_group_name: ``AWS::Logs::MetricFilter.LogGroupName``.
:param metric_transformations: ``AWS::Logs::MetricFilter.MetricTransformations``.
"""
props = CfnMetricFilterProps(filter_pattern=filter_pattern, log_group_name=log_group_name, metric_transformations=metric_transformations)
jsii.create(CfnMetricFilter, self, [scope, id, props])
@jsii.member(jsii_name="renderProperties")
def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@property
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[str,typing.Any]:
return jsii.get(self, "cfnProperties")
@property
@jsii.member(jsii_name="filterPattern")
def filter_pattern(self) -> str:
"""``AWS::Logs::MetricFilter.FilterPattern``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-metricfilter.html#cfn-cwl-metricfilter-filterpattern
"""
return jsii.get(self, "filterPattern")
@filter_pattern.setter
def filter_pattern(self, value: str):
return jsii.set(self, "filterPattern", value)
@property
@jsii.member(jsii_name="logGroupName")
def log_group_name(self) -> str:
"""``AWS::Logs::MetricFilter.LogGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-metricfilter.html#cfn-cwl-metricfilter-loggroupname
"""
return jsii.get(self, "logGroupName")
@log_group_name.setter
def log_group_name(self, value: str):
return jsii.set(self, "logGroupName", value)
@property
@jsii.member(jsii_name="metricTransformations")
def metric_transformations(self) -> typing.Union[aws_cdk.core.IResolvable, typing.List[typing.Union["MetricTransformationProperty", aws_cdk.core.IResolvable]]]:
"""``AWS::Logs::MetricFilter.MetricTransformations``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-metricfilter.html#cfn-cwl-metricfilter-metrictransformations
"""
return jsii.get(self, "metricTransformations")
@metric_transformations.setter
def metric_transformations(self, value: typing.Union[aws_cdk.core.IResolvable, typing.List[typing.Union["MetricTransformationProperty", aws_cdk.core.IResolvable]]]):
return jsii.set(self, "metricTransformations", value)
@jsii.data_type(jsii_type="@aws-cdk/aws-logs.CfnMetricFilter.MetricTransformationProperty", jsii_struct_bases=[], name_mapping={'metric_name': 'metricName', 'metric_namespace': 'metricNamespace', 'metric_value': 'metricValue', 'default_value': 'defaultValue'})
class MetricTransformationProperty():
def __init__(self, *, metric_name: str, metric_namespace: str, metric_value: str, default_value: typing.Optional[jsii.Number]=None):
"""
:param metric_name: ``CfnMetricFilter.MetricTransformationProperty.MetricName``.
:param metric_namespace: ``CfnMetricFilter.MetricTransformationProperty.MetricNamespace``.
:param metric_value: ``CfnMetricFilter.MetricTransformationProperty.MetricValue``.
:param default_value: ``CfnMetricFilter.MetricTransformationProperty.DefaultValue``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-logs-metricfilter-metrictransformation.html
"""
self._values = {
'metric_name': metric_name,
'metric_namespace': metric_namespace,
'metric_value': metric_value,
}
if default_value is not None: self._values["default_value"] = default_value
@property
def metric_name(self) -> str:
"""``CfnMetricFilter.MetricTransformationProperty.MetricName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-logs-metricfilter-metrictransformation.html#cfn-cwl-metricfilter-metrictransformation-metricname
"""
return self._values.get('metric_name')
@property
def metric_namespace(self) -> str:
"""``CfnMetricFilter.MetricTransformationProperty.MetricNamespace``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-logs-metricfilter-metrictransformation.html#cfn-cwl-metricfilter-metrictransformation-metricnamespace
"""
return self._values.get('metric_namespace')
@property
def metric_value(self) -> str:
"""``CfnMetricFilter.MetricTransformationProperty.MetricValue``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-logs-metricfilter-metrictransformation.html#cfn-cwl-metricfilter-metrictransformation-metricvalue
"""
return self._values.get('metric_value')
@property
def default_value(self) -> typing.Optional[jsii.Number]:
"""``CfnMetricFilter.MetricTransformationProperty.DefaultValue``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-logs-metricfilter-metrictransformation.html#cfn-cwl-metricfilter-metrictransformation-defaultvalue
"""
return self._values.get('default_value')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'MetricTransformationProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-logs.CfnMetricFilterProps", jsii_struct_bases=[], name_mapping={'filter_pattern': 'filterPattern', 'log_group_name': 'logGroupName', 'metric_transformations': 'metricTransformations'})
class CfnMetricFilterProps():
def __init__(self, *, filter_pattern: str, log_group_name: str, metric_transformations: typing.Union[aws_cdk.core.IResolvable, typing.List[typing.Union["CfnMetricFilter.MetricTransformationProperty", aws_cdk.core.IResolvable]]]):
"""Properties for defining a ``AWS::Logs::MetricFilter``.
:param filter_pattern: ``AWS::Logs::MetricFilter.FilterPattern``.
:param log_group_name: ``AWS::Logs::MetricFilter.LogGroupName``.
:param metric_transformations: ``AWS::Logs::MetricFilter.MetricTransformations``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-metricfilter.html
"""
self._values = {
'filter_pattern': filter_pattern,
'log_group_name': log_group_name,
'metric_transformations': metric_transformations,
}
@property
def filter_pattern(self) -> str:
"""``AWS::Logs::MetricFilter.FilterPattern``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-metricfilter.html#cfn-cwl-metricfilter-filterpattern
"""
return self._values.get('filter_pattern')
@property
def log_group_name(self) -> str:
"""``AWS::Logs::MetricFilter.LogGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-metricfilter.html#cfn-cwl-metricfilter-loggroupname
"""
return self._values.get('log_group_name')
@property
def metric_transformations(self) -> typing.Union[aws_cdk.core.IResolvable, typing.List[typing.Union["CfnMetricFilter.MetricTransformationProperty", aws_cdk.core.IResolvable]]]:
"""``AWS::Logs::MetricFilter.MetricTransformations``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-metricfilter.html#cfn-cwl-metricfilter-metrictransformations
"""
return self._values.get('metric_transformations')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CfnMetricFilterProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class CfnSubscriptionFilter(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-logs.CfnSubscriptionFilter"):
"""A CloudFormation ``AWS::Logs::SubscriptionFilter``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-subscriptionfilter.html
cloudformationResource:
:cloudformationResource:: AWS::Logs::SubscriptionFilter
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, destination_arn: str, filter_pattern: str, log_group_name: str, role_arn: typing.Optional[str]=None) -> None:
"""Create a new ``AWS::Logs::SubscriptionFilter``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param destination_arn: ``AWS::Logs::SubscriptionFilter.DestinationArn``.
:param filter_pattern: ``AWS::Logs::SubscriptionFilter.FilterPattern``.
:param log_group_name: ``AWS::Logs::SubscriptionFilter.LogGroupName``.
:param role_arn: ``AWS::Logs::SubscriptionFilter.RoleArn``.
"""
props = CfnSubscriptionFilterProps(destination_arn=destination_arn, filter_pattern=filter_pattern, log_group_name=log_group_name, role_arn=role_arn)
jsii.create(CfnSubscriptionFilter, self, [scope, id, props])
@jsii.member(jsii_name="renderProperties")
def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@property
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[str,typing.Any]:
return jsii.get(self, "cfnProperties")
@property
@jsii.member(jsii_name="destinationArn")
def destination_arn(self) -> str:
"""``AWS::Logs::SubscriptionFilter.DestinationArn``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-subscriptionfilter.html#cfn-cwl-subscriptionfilter-destinationarn
"""
return jsii.get(self, "destinationArn")
@destination_arn.setter
def destination_arn(self, value: str):
return jsii.set(self, "destinationArn", value)
@property
@jsii.member(jsii_name="filterPattern")
def filter_pattern(self) -> str:
"""``AWS::Logs::SubscriptionFilter.FilterPattern``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-subscriptionfilter.html#cfn-cwl-subscriptionfilter-filterpattern
"""
return jsii.get(self, "filterPattern")
@filter_pattern.setter
def filter_pattern(self, value: str):
return jsii.set(self, "filterPattern", value)
@property
@jsii.member(jsii_name="logGroupName")
def log_group_name(self) -> str:
"""``AWS::Logs::SubscriptionFilter.LogGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-subscriptionfilter.html#cfn-cwl-subscriptionfilter-loggroupname
"""
return jsii.get(self, "logGroupName")
@log_group_name.setter
def log_group_name(self, value: str):
return jsii.set(self, "logGroupName", value)
@property
@jsii.member(jsii_name="roleArn")
def role_arn(self) -> typing.Optional[str]:
"""``AWS::Logs::SubscriptionFilter.RoleArn``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-subscriptionfilter.html#cfn-cwl-subscriptionfilter-rolearn
"""
return jsii.get(self, "roleArn")
@role_arn.setter
def role_arn(self, value: typing.Optional[str]):
return jsii.set(self, "roleArn", value)
@jsii.data_type(jsii_type="@aws-cdk/aws-logs.CfnSubscriptionFilterProps", jsii_struct_bases=[], name_mapping={'destination_arn': 'destinationArn', 'filter_pattern': 'filterPattern', 'log_group_name': 'logGroupName', 'role_arn': 'roleArn'})
class CfnSubscriptionFilterProps():
def __init__(self, *, destination_arn: str, filter_pattern: str, log_group_name: str, role_arn: typing.Optional[str]=None):
"""Properties for defining a ``AWS::Logs::SubscriptionFilter``.
:param destination_arn: ``AWS::Logs::SubscriptionFilter.DestinationArn``.
:param filter_pattern: ``AWS::Logs::SubscriptionFilter.FilterPattern``.
:param log_group_name: ``AWS::Logs::SubscriptionFilter.LogGroupName``.
:param role_arn: ``AWS::Logs::SubscriptionFilter.RoleArn``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-subscriptionfilter.html
"""
self._values = {
'destination_arn': destination_arn,
'filter_pattern': filter_pattern,
'log_group_name': log_group_name,
}
if role_arn is not None: self._values["role_arn"] = role_arn
@property
def destination_arn(self) -> str:
"""``AWS::Logs::SubscriptionFilter.DestinationArn``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-subscriptionfilter.html#cfn-cwl-subscriptionfilter-destinationarn
"""
return self._values.get('destination_arn')
@property
def filter_pattern(self) -> str:
"""``AWS::Logs::SubscriptionFilter.FilterPattern``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-subscriptionfilter.html#cfn-cwl-subscriptionfilter-filterpattern
"""
return self._values.get('filter_pattern')
@property
def log_group_name(self) -> str:
"""``AWS::Logs::SubscriptionFilter.LogGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-subscriptionfilter.html#cfn-cwl-subscriptionfilter-loggroupname
"""
return self._values.get('log_group_name')
@property
def role_arn(self) -> typing.Optional[str]:
"""``AWS::Logs::SubscriptionFilter.RoleArn``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-subscriptionfilter.html#cfn-cwl-subscriptionfilter-rolearn
"""
return self._values.get('role_arn')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CfnSubscriptionFilterProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-logs.ColumnRestriction", jsii_struct_bases=[], name_mapping={'comparison': 'comparison', 'number_value': 'numberValue', 'string_value': 'stringValue'})
class ColumnRestriction():
def __init__(self, *, comparison: str, number_value: typing.Optional[jsii.Number]=None, string_value: typing.Optional[str]=None):
"""
:param comparison: Comparison operator to use.
:param number_value: Number value to compare to. Exactly one of 'stringValue' and 'numberValue' must be set.
:param string_value: String value to compare to. Exactly one of 'stringValue' and 'numberValue' must be set.
"""
self._values = {
'comparison': comparison,
}
if number_value is not None: self._values["number_value"] = number_value
if string_value is not None: self._values["string_value"] = string_value
@property
def comparison(self) -> str:
"""Comparison operator to use."""
return self._values.get('comparison')
@property
def number_value(self) -> typing.Optional[jsii.Number]:
"""Number value to compare to.
Exactly one of 'stringValue' and 'numberValue' must be set.
"""
return self._values.get('number_value')
@property
def string_value(self) -> typing.Optional[str]:
"""String value to compare to.
Exactly one of 'stringValue' and 'numberValue' must be set.
"""
return self._values.get('string_value')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'ColumnRestriction(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-logs.CrossAccountDestinationProps", jsii_struct_bases=[], name_mapping={'role': 'role', 'target_arn': 'targetArn', 'destination_name': 'destinationName'})
class CrossAccountDestinationProps():
def __init__(self, *, role: aws_cdk.aws_iam.IRole, target_arn: str, destination_name: typing.Optional[str]=None):
"""Properties for a CrossAccountDestination.
:param role: The role to assume that grants permissions to write to 'target'. The role must be assumable by 'logs.{REGION}.amazonaws.com'.
:param target_arn: The log destination target's ARN.
:param destination_name: The name of the log destination. Default: Automatically generated
"""
self._values = {
'role': role,
'target_arn': target_arn,
}
if destination_name is not None: self._values["destination_name"] = destination_name
@property
def role(self) -> aws_cdk.aws_iam.IRole:
"""The role to assume that grants permissions to write to 'target'.
The role must be assumable by 'logs.{REGION}.amazonaws.com'.
"""
return self._values.get('role')
@property
def target_arn(self) -> str:
"""The log destination target's ARN."""
return self._values.get('target_arn')
@property
def destination_name(self) -> typing.Optional[str]:
"""The name of the log destination.
default
:default: Automatically generated
"""
return self._values.get('destination_name')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CrossAccountDestinationProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class FilterPattern(metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-logs.FilterPattern"):
"""A collection of static methods to generate appropriate ILogPatterns."""
def __init__(self) -> None:
jsii.create(FilterPattern, self, [])
@jsii.member(jsii_name="all")
@classmethod
def all(cls, *patterns: "JsonPattern") -> "JsonPattern":
"""A JSON log pattern that matches if all given JSON log patterns match.
:param patterns: -
"""
return jsii.sinvoke(cls, "all", [*patterns])
@jsii.member(jsii_name="allEvents")
@classmethod
def all_events(cls) -> "IFilterPattern":
"""A log pattern that matches all events."""
return jsii.sinvoke(cls, "allEvents", [])
@jsii.member(jsii_name="allTerms")
@classmethod
def all_terms(cls, *terms: str) -> "IFilterPattern":
"""A log pattern that matches if all the strings given appear in the event.
:param terms: The words to search for. All terms must match.
"""
return jsii.sinvoke(cls, "allTerms", [*terms])
@jsii.member(jsii_name="any")
@classmethod
def any(cls, *patterns: "JsonPattern") -> "JsonPattern":
"""A JSON log pattern that matches if any of the given JSON log patterns match.
:param patterns: -
"""
return jsii.sinvoke(cls, "any", [*patterns])
@jsii.member(jsii_name="anyTerm")
@classmethod
def any_term(cls, *terms: str) -> "IFilterPattern":
"""A log pattern that matches if any of the strings given appear in the event.
:param terms: The words to search for. Any terms must match.
"""
return jsii.sinvoke(cls, "anyTerm", [*terms])
@jsii.member(jsii_name="anyTermGroup")
@classmethod
def any_term_group(cls, *term_groups: typing.List[str]) -> "IFilterPattern":
"""A log pattern that matches if any of the given term groups matches the event.
A term group matches an event if all the terms in it appear in the event string.
:param term_groups: A list of term groups to search for. Any one of the clauses must match.
"""
return jsii.sinvoke(cls, "anyTermGroup", [*term_groups])
@jsii.member(jsii_name="booleanValue")
@classmethod
def boolean_value(cls, json_field: str, value: bool) -> "JsonPattern":
"""A JSON log pattern that matches if the field exists and equals the boolean value.
:param json_field: Field inside JSON. Example: "$.myField"
:param value: The value to match.
"""
return jsii.sinvoke(cls, "booleanValue", [json_field, value])
@jsii.member(jsii_name="exists")
@classmethod
def exists(cls, json_field: str) -> "JsonPattern":
"""A JSON log patter that matches if the field exists.
This is a readable convenience wrapper over 'field = *'
:param json_field: Field inside JSON. Example: "$.myField"
"""
return jsii.sinvoke(cls, "exists", [json_field])
@jsii.member(jsii_name="isNull")
@classmethod
def is_null(cls, json_field: str) -> "JsonPattern":
"""A JSON log pattern that matches if the field exists and has the special value 'null'.
:param json_field: Field inside JSON. Example: "$.myField"
"""
return jsii.sinvoke(cls, "isNull", [json_field])
@jsii.member(jsii_name="literal")
@classmethod
def literal(cls, log_pattern_string: str) -> "IFilterPattern":
"""Use the given string as log pattern.
See https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html
for information on writing log patterns.
:param log_pattern_string: The pattern string to use.
"""
return jsii.sinvoke(cls, "literal", [log_pattern_string])
@jsii.member(jsii_name="notExists")
@classmethod
def not_exists(cls, json_field: str) -> "JsonPattern":
"""A JSON log pattern that matches if the field does not exist.
:param json_field: Field inside JSON. Example: "$.myField"
"""
return jsii.sinvoke(cls, "notExists", [json_field])
@jsii.member(jsii_name="numberValue")
@classmethod
def number_value(cls, json_field: str, comparison: str, value: jsii.Number) -> "JsonPattern":
"""A JSON log pattern that compares numerical values.
This pattern only matches if the event is a JSON event, and the indicated field inside
compares with the value in the indicated way.
Use '$' to indicate the root of the JSON structure. The comparison operator can only
compare equality or inequality. The '*' wildcard may appear in the value may at the
start or at the end.
For more information, see:
https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html
:param json_field: Field inside JSON. Example: "$.myField"
:param comparison: Comparison to carry out. One of =, !=, <, <=, >, >=.
:param value: The numerical value to compare to.
"""
return jsii.sinvoke(cls, "numberValue", [json_field, comparison, value])
@jsii.member(jsii_name="spaceDelimited")
@classmethod
def space_delimited(cls, *columns: str) -> "SpaceDelimitedTextPattern":
"""A space delimited log pattern matcher.
The log event is divided into space-delimited columns (optionally
enclosed by "" or [] to capture spaces into column values), and names
are given to each column.
'...' may be specified once to match any number of columns.
Afterwards, conditions may be added to individual columns.
:param columns: The columns in the space-delimited log stream.
"""
return jsii.sinvoke(cls, "spaceDelimited", [*columns])
@jsii.member(jsii_name="stringValue")
@classmethod
def string_value(cls, json_field: str, comparison: str, value: str) -> "JsonPattern":
"""A JSON log pattern that compares string values.
This pattern only matches if the event is a JSON event, and the indicated field inside
compares with the string value.
Use '$' to indicate the root of the JSON structure. The comparison operator can only
compare equality or inequality. The '*' wildcard may appear in the value may at the
start or at the end.
For more information, see:
https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html
:param json_field: Field inside JSON. Example: "$.myField"
:param comparison: Comparison to carry out. Either = or !=.
:param value: The string value to compare to. May use '*' as wildcard at start or end of string.
"""
return jsii.sinvoke(cls, "stringValue", [json_field, comparison, value])
@jsii.interface(jsii_type="@aws-cdk/aws-logs.IFilterPattern")
class IFilterPattern(jsii.compat.Protocol):
"""Interface for objects that can render themselves to log patterns."""
@staticmethod
def __jsii_proxy_class__():
return _IFilterPatternProxy
@property
@jsii.member(jsii_name="logPatternString")
def log_pattern_string(self) -> str:
...
class _IFilterPatternProxy():
"""Interface for objects that can render themselves to log patterns."""
__jsii_type__ = "@aws-cdk/aws-logs.IFilterPattern"
@property
@jsii.member(jsii_name="logPatternString")
def log_pattern_string(self) -> str:
return jsii.get(self, "logPatternString")
@jsii.interface(jsii_type="@aws-cdk/aws-logs.ILogGroup")
class ILogGroup(aws_cdk.core.IResource, jsii.compat.Protocol):
@staticmethod
def __jsii_proxy_class__():
return _ILogGroupProxy
@property
@jsii.member(jsii_name="logGroupArn")
def log_group_arn(self) -> str:
"""The ARN of this log group.
attribute:
:attribute:: true
"""
...
@property
@jsii.member(jsii_name="logGroupName")
def log_group_name(self) -> str:
"""The name of this log group.
attribute:
:attribute:: true
"""
...
@jsii.member(jsii_name="addMetricFilter")
def add_metric_filter(self, id: str, *, filter_pattern: "IFilterPattern", metric_name: str, metric_namespace: str, default_value: typing.Optional[jsii.Number]=None, metric_value: typing.Optional[str]=None) -> "MetricFilter":
"""Create a new Metric Filter on this Log Group.
:param id: Unique identifier for the construct in its parent.
:param props: Properties for creating the MetricFilter.
:param filter_pattern: Pattern to search for log events.
:param metric_name: The name of the metric to emit.
:param metric_namespace: The namespace of the metric to emit.
:param default_value: The value to emit if the pattern does not match a particular event. Default: No metric emitted.
:param metric_value: The value to emit for the metric. Can either be a literal number (typically "1"), or the name of a field in the structure to take the value from the matched event. If you are using a field value, the field value must have been matched using the pattern. If you want to specify a field from a matched JSON structure, use '$.fieldName', and make sure the field is in the pattern (if only as '$.fieldName = *'). If you want to specify a field from a matched space-delimited structure, use '$fieldName'. Default: "1"
"""
...
@jsii.member(jsii_name="addStream")
def add_stream(self, id: str, *, log_stream_name: typing.Optional[str]=None) -> "LogStream":
"""Create a new Log Stream for this Log Group.
:param id: Unique identifier for the construct in its parent.
:param props: Properties for creating the LogStream.
:param log_stream_name: The name of the log stream to create. The name must be unique within the log group. Default: Automatically generated
"""
...
@jsii.member(jsii_name="addSubscriptionFilter")
def add_subscription_filter(self, id: str, *, destination: "ILogSubscriptionDestination", filter_pattern: "IFilterPattern") -> "SubscriptionFilter":
"""Create a new Subscription Filter on this Log Group.
:param id: Unique identifier for the construct in its parent.
:param props: Properties for creating the SubscriptionFilter.
:param destination: The destination to send the filtered events to. For example, a Kinesis stream or a Lambda function.
:param filter_pattern: Log events matching this pattern will be sent to the destination.
"""
...
@jsii.member(jsii_name="extractMetric")
def extract_metric(self, json_field: str, metric_namespace: str, metric_name: str) -> aws_cdk.aws_cloudwatch.Metric:
"""Extract a metric from structured log events in the LogGroup.
Creates a MetricFilter on this LogGroup that will extract the value
of the indicated JSON field in all records where it occurs.
The metric will be available in CloudWatch Metrics under the
indicated namespace and name.
:param json_field: JSON field to extract (example: '$.myfield').
:param metric_namespace: Namespace to emit the metric under.
:param metric_name: Name to emit the metric under.
return
:return: A Metric object representing the extracted metric
"""
...
@jsii.member(jsii_name="grant")
def grant(self, grantee: aws_cdk.aws_iam.IGrantable, *actions: str) -> aws_cdk.aws_iam.Grant:
"""Give the indicated permissions on this log group and all streams.
:param grantee: -
:param actions: -
"""
...
@jsii.member(jsii_name="grantWrite")
def grant_write(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
"""Give permissions to write to create and write to streams in this log group.
:param grantee: -
"""
...
class _ILogGroupProxy(jsii.proxy_for(aws_cdk.core.IResource)):
__jsii_type__ = "@aws-cdk/aws-logs.ILogGroup"
@property
@jsii.member(jsii_name="logGroupArn")
def log_group_arn(self) -> str:
"""The ARN of this log group.
attribute:
:attribute:: true
"""
return jsii.get(self, "logGroupArn")
@property
@jsii.member(jsii_name="logGroupName")
def log_group_name(self) -> str:
"""The name of this log group.
attribute:
:attribute:: true
"""
return jsii.get(self, "logGroupName")
@jsii.member(jsii_name="addMetricFilter")
def add_metric_filter(self, id: str, *, filter_pattern: "IFilterPattern", metric_name: str, metric_namespace: str, default_value: typing.Optional[jsii.Number]=None, metric_value: typing.Optional[str]=None) -> "MetricFilter":
"""Create a new Metric Filter on this Log Group.
:param id: Unique identifier for the construct in its parent.
:param props: Properties for creating the MetricFilter.
:param filter_pattern: Pattern to search for log events.
:param metric_name: The name of the metric to emit.
:param metric_namespace: The namespace of the metric to emit.
:param default_value: The value to emit if the pattern does not match a particular event. Default: No metric emitted.
:param metric_value: The value to emit for the metric. Can either be a literal number (typically "1"), or the name of a field in the structure to take the value from the matched event. If you are using a field value, the field value must have been matched using the pattern. If you want to specify a field from a matched JSON structure, use '$.fieldName', and make sure the field is in the pattern (if only as '$.fieldName = *'). If you want to specify a field from a matched space-delimited structure, use '$fieldName'. Default: "1"
"""
props = MetricFilterOptions(filter_pattern=filter_pattern, metric_name=metric_name, metric_namespace=metric_namespace, default_value=default_value, metric_value=metric_value)
return jsii.invoke(self, "addMetricFilter", [id, props])
@jsii.member(jsii_name="addStream")
def add_stream(self, id: str, *, log_stream_name: typing.Optional[str]=None) -> "LogStream":
"""Create a new Log Stream for this Log Group.
:param id: Unique identifier for the construct in its parent.
:param props: Properties for creating the LogStream.
:param log_stream_name: The name of the log stream to create. The name must be unique within the log group. Default: Automatically generated
"""
props = StreamOptions(log_stream_name=log_stream_name)
return jsii.invoke(self, "addStream", [id, props])
@jsii.member(jsii_name="addSubscriptionFilter")
def add_subscription_filter(self, id: str, *, destination: "ILogSubscriptionDestination", filter_pattern: "IFilterPattern") -> "SubscriptionFilter":
"""Create a new Subscription Filter on this Log Group.
:param id: Unique identifier for the construct in its parent.
:param props: Properties for creating the SubscriptionFilter.
:param destination: The destination to send the filtered events to. For example, a Kinesis stream or a Lambda function.
:param filter_pattern: Log events matching this pattern will be sent to the destination.
"""
props = SubscriptionFilterOptions(destination=destination, filter_pattern=filter_pattern)
return jsii.invoke(self, "addSubscriptionFilter", [id, props])
@jsii.member(jsii_name="extractMetric")
def extract_metric(self, json_field: str, metric_namespace: str, metric_name: str) -> aws_cdk.aws_cloudwatch.Metric:
"""Extract a metric from structured log events in the LogGroup.
Creates a MetricFilter on this LogGroup that will extract the value
of the indicated JSON field in all records where it occurs.
The metric will be available in CloudWatch Metrics under the
indicated namespace and name.
:param json_field: JSON field to extract (example: '$.myfield').
:param metric_namespace: Namespace to emit the metric under.
:param metric_name: Name to emit the metric under.
return
:return: A Metric object representing the extracted metric
"""
return jsii.invoke(self, "extractMetric", [json_field, metric_namespace, metric_name])
@jsii.member(jsii_name="grant")
def grant(self, grantee: aws_cdk.aws_iam.IGrantable, *actions: str) -> aws_cdk.aws_iam.Grant:
"""Give the indicated permissions on this log group and all streams.
:param grantee: -
:param actions: -
"""
return jsii.invoke(self, "grant", [grantee, *actions])
@jsii.member(jsii_name="grantWrite")
def grant_write(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
"""Give permissions to write to create and write to streams in this log group.
:param grantee: -
"""
return jsii.invoke(self, "grantWrite", [grantee])
@jsii.interface(jsii_type="@aws-cdk/aws-logs.ILogStream")
class ILogStream(aws_cdk.core.IResource, jsii.compat.Protocol):
@staticmethod
def __jsii_proxy_class__():
return _ILogStreamProxy
@property
@jsii.member(jsii_name="logStreamName")
def log_stream_name(self) -> str:
"""The name of this log stream.
attribute:
:attribute:: true
"""
...
class _ILogStreamProxy(jsii.proxy_for(aws_cdk.core.IResource)):
__jsii_type__ = "@aws-cdk/aws-logs.ILogStream"
@property
@jsii.member(jsii_name="logStreamName")
def log_stream_name(self) -> str:
"""The name of this log stream.
attribute:
:attribute:: true
"""
return jsii.get(self, "logStreamName")
@jsii.interface(jsii_type="@aws-cdk/aws-logs.ILogSubscriptionDestination")
class ILogSubscriptionDestination(jsii.compat.Protocol):
"""Interface for classes that can be the destination of a log Subscription."""
@staticmethod
def __jsii_proxy_class__():
return _ILogSubscriptionDestinationProxy
@jsii.member(jsii_name="bind")
def bind(self, scope: aws_cdk.core.Construct, source_log_group: "ILogGroup") -> "LogSubscriptionDestinationConfig":
"""Return the properties required to send subscription events to this destination.
If necessary, the destination can use the properties of the SubscriptionFilter
object itself to configure its permissions to allow the subscription to write
to it.
The destination may reconfigure its own permissions in response to this
function call.
:param scope: -
:param source_log_group: -
"""
...
class _ILogSubscriptionDestinationProxy():
"""Interface for classes that can be the destination of a log Subscription."""
__jsii_type__ = "@aws-cdk/aws-logs.ILogSubscriptionDestination"
@jsii.member(jsii_name="bind")
def bind(self, scope: aws_cdk.core.Construct, source_log_group: "ILogGroup") -> "LogSubscriptionDestinationConfig":
"""Return the properties required to send subscription events to this destination.
If necessary, the destination can use the properties of the SubscriptionFilter
object itself to configure its permissions to allow the subscription to write
to it.
The destination may reconfigure its own permissions in response to this
function call.
:param scope: -
:param source_log_group: -
"""
return jsii.invoke(self, "bind", [scope, source_log_group])
@jsii.implements(ILogSubscriptionDestination)
class CrossAccountDestination(aws_cdk.core.Resource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-logs.CrossAccountDestination"):
"""A new CloudWatch Logs Destination for use in cross-account scenarios.
CrossAccountDestinations are used to subscribe a Kinesis stream in a
different account to a CloudWatch Subscription.
Consumers will hardly ever need to use this class. Instead, directly
subscribe a Kinesis stream using the integration class in the
``@aws-cdk/aws-logs-destinations`` package; if necessary, a
``CrossAccountDestination`` will be created automatically.
resource:
:resource:: AWS::Logs::Destination
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, role: aws_cdk.aws_iam.IRole, target_arn: str, destination_name: typing.Optional[str]=None) -> None:
"""
:param scope: -
:param id: -
:param props: -
:param role: The role to assume that grants permissions to write to 'target'. The role must be assumable by 'logs.{REGION}.amazonaws.com'.
:param target_arn: The log destination target's ARN.
:param destination_name: The name of the log destination. Default: Automatically generated
"""
props = CrossAccountDestinationProps(role=role, target_arn=target_arn, destination_name=destination_name)
jsii.create(CrossAccountDestination, self, [scope, id, props])
@jsii.member(jsii_name="addToPolicy")
def add_to_policy(self, statement: aws_cdk.aws_iam.PolicyStatement) -> None:
"""
:param statement: -
"""
return jsii.invoke(self, "addToPolicy", [statement])
@jsii.member(jsii_name="bind")
def bind(self, _scope: aws_cdk.core.Construct, _source_log_group: "ILogGroup") -> "LogSubscriptionDestinationConfig":
"""Return the properties required to send subscription events to this destination.
If necessary, the destination can use the properties of the SubscriptionFilter
object itself to configure its permissions to allow the subscription to write
to it.
The destination may reconfigure its own permissions in response to this
function call.
:param _scope: -
:param _source_log_group: -
"""
return jsii.invoke(self, "bind", [_scope, _source_log_group])
@property
@jsii.member(jsii_name="destinationArn")
def destination_arn(self) -> str:
"""The ARN of this CrossAccountDestination object.
attribute:
:attribute:: true
"""
return jsii.get(self, "destinationArn")
@property
@jsii.member(jsii_name="destinationName")
def destination_name(self) -> str:
"""The name of this CrossAccountDestination object.
attribute:
:attribute:: true
"""
return jsii.get(self, "destinationName")
@property
@jsii.member(jsii_name="policyDocument")
def policy_document(self) -> aws_cdk.aws_iam.PolicyDocument:
"""Policy object of this CrossAccountDestination object."""
return jsii.get(self, "policyDocument")
@jsii.implements(IFilterPattern)
class JsonPattern(metaclass=jsii.JSIIAbstractClass, jsii_type="@aws-cdk/aws-logs.JsonPattern"):
"""Base class for patterns that only match JSON log events."""
@staticmethod
def __jsii_proxy_class__():
return _JsonPatternProxy
def __init__(self, json_pattern_string: str) -> None:
"""
:param json_pattern_string: -
"""
jsii.create(JsonPattern, self, [json_pattern_string])
@property
@jsii.member(jsii_name="jsonPatternString")
def json_pattern_string(self) -> str:
return jsii.get(self, "jsonPatternString")
@property
@jsii.member(jsii_name="logPatternString")
def log_pattern_string(self) -> str:
return jsii.get(self, "logPatternString")
class _JsonPatternProxy(JsonPattern):
pass
@jsii.implements(ILogGroup)
class LogGroup(aws_cdk.core.Resource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-logs.LogGroup"):
"""Define a CloudWatch Log Group."""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, log_group_name: typing.Optional[str]=None, removal_policy: typing.Optional[aws_cdk.core.RemovalPolicy]=None, retention: typing.Optional["RetentionDays"]=None) -> None:
"""
:param scope: -
:param id: -
:param props: -
:param log_group_name: Name of the log group. Default: Automatically generated
:param removal_policy: Determine the removal policy of this log group. Normally you want to retain the log group so you can diagnose issues from logs even after a deployment that no longer includes the log group. In that case, use the normal date-based retention policy to age out your logs. Default: RemovalPolicy.Retain
:param retention: How long, in days, the log contents will be retained. To retain all logs, set this value to RetentionDays.INFINITE. Default: RetentionDays.TWO_YEARS
"""
props = LogGroupProps(log_group_name=log_group_name, removal_policy=removal_policy, retention=retention)
jsii.create(LogGroup, self, [scope, id, props])
@jsii.member(jsii_name="fromLogGroupArn")
@classmethod
def from_log_group_arn(cls, scope: aws_cdk.core.Construct, id: str, log_group_arn: str) -> "ILogGroup":
"""Import an existing LogGroup.
:param scope: -
:param id: -
:param log_group_arn: -
"""
return jsii.sinvoke(cls, "fromLogGroupArn", [scope, id, log_group_arn])
@jsii.member(jsii_name="addMetricFilter")
def add_metric_filter(self, id: str, *, filter_pattern: "IFilterPattern", metric_name: str, metric_namespace: str, default_value: typing.Optional[jsii.Number]=None, metric_value: typing.Optional[str]=None) -> "MetricFilter":
"""Create a new Metric Filter on this Log Group.
:param id: Unique identifier for the construct in its parent.
:param props: Properties for creating the MetricFilter.
:param filter_pattern: Pattern to search for log events.
:param metric_name: The name of the metric to emit.
:param metric_namespace: The namespace of the metric to emit.
:param default_value: The value to emit if the pattern does not match a particular event. Default: No metric emitted.
:param metric_value: The value to emit for the metric. Can either be a literal number (typically "1"), or the name of a field in the structure to take the value from the matched event. If you are using a field value, the field value must have been matched using the pattern. If you want to specify a field from a matched JSON structure, use '$.fieldName', and make sure the field is in the pattern (if only as '$.fieldName = *'). If you want to specify a field from a matched space-delimited structure, use '$fieldName'. Default: "1"
"""
props = MetricFilterOptions(filter_pattern=filter_pattern, metric_name=metric_name, metric_namespace=metric_namespace, default_value=default_value, metric_value=metric_value)
return jsii.invoke(self, "addMetricFilter", [id, props])
@jsii.member(jsii_name="addStream")
def add_stream(self, id: str, *, log_stream_name: typing.Optional[str]=None) -> "LogStream":
"""Create a new Log Stream for this Log Group.
:param id: Unique identifier for the construct in its parent.
:param props: Properties for creating the LogStream.
:param log_stream_name: The name of the log stream to create. The name must be unique within the log group. Default: Automatically generated
"""
props = StreamOptions(log_stream_name=log_stream_name)
return jsii.invoke(self, "addStream", [id, props])
@jsii.member(jsii_name="addSubscriptionFilter")
def add_subscription_filter(self, id: str, *, destination: "ILogSubscriptionDestination", filter_pattern: "IFilterPattern") -> "SubscriptionFilter":
"""Create a new Subscription Filter on this Log Group.
:param id: Unique identifier for the construct in its parent.
:param props: Properties for creating the SubscriptionFilter.
:param destination: The destination to send the filtered events to. For example, a Kinesis stream or a Lambda function.
:param filter_pattern: Log events matching this pattern will be sent to the destination.
"""
props = SubscriptionFilterOptions(destination=destination, filter_pattern=filter_pattern)
return jsii.invoke(self, "addSubscriptionFilter", [id, props])
@jsii.member(jsii_name="extractMetric")
def extract_metric(self, json_field: str, metric_namespace: str, metric_name: str) -> aws_cdk.aws_cloudwatch.Metric:
"""Extract a metric from structured log events in the LogGroup.
Creates a MetricFilter on this LogGroup that will extract the value
of the indicated JSON field in all records where it occurs.
The metric will be available in CloudWatch Metrics under the
indicated namespace and name.
:param json_field: JSON field to extract (example: '$.myfield').
:param metric_namespace: Namespace to emit the metric under.
:param metric_name: Name to emit the metric under.
return
:return: A Metric object representing the extracted metric
"""
return jsii.invoke(self, "extractMetric", [json_field, metric_namespace, metric_name])
@jsii.member(jsii_name="grant")
def grant(self, grantee: aws_cdk.aws_iam.IGrantable, *actions: str) -> aws_cdk.aws_iam.Grant:
"""Give the indicated permissions on this log group and all streams.
:param grantee: -
:param actions: -
"""
return jsii.invoke(self, "grant", [grantee, *actions])
@jsii.member(jsii_name="grantWrite")
def grant_write(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
"""Give permissions to write to create and write to streams in this log group.
:param grantee: -
"""
return jsii.invoke(self, "grantWrite", [grantee])
@property
@jsii.member(jsii_name="logGroupArn")
def log_group_arn(self) -> str:
"""The ARN of this log group."""
return jsii.get(self, "logGroupArn")
@property
@jsii.member(jsii_name="logGroupName")
def log_group_name(self) -> str:
"""The name of this log group."""
return jsii.get(self, "logGroupName")
@jsii.data_type(jsii_type="@aws-cdk/aws-logs.LogGroupProps", jsii_struct_bases=[], name_mapping={'log_group_name': 'logGroupName', 'removal_policy': 'removalPolicy', 'retention': 'retention'})
class LogGroupProps():
def __init__(self, *, log_group_name: typing.Optional[str]=None, removal_policy: typing.Optional[aws_cdk.core.RemovalPolicy]=None, retention: typing.Optional["RetentionDays"]=None):
"""Properties for a LogGroup.
:param log_group_name: Name of the log group. Default: Automatically generated
:param removal_policy: Determine the removal policy of this log group. Normally you want to retain the log group so you can diagnose issues from logs even after a deployment that no longer includes the log group. In that case, use the normal date-based retention policy to age out your logs. Default: RemovalPolicy.Retain
:param retention: How long, in days, the log contents will be retained. To retain all logs, set this value to RetentionDays.INFINITE. Default: RetentionDays.TWO_YEARS
"""
self._values = {
}
if log_group_name is not None: self._values["log_group_name"] = log_group_name
if removal_policy is not None: self._values["removal_policy"] = removal_policy
if retention is not None: self._values["retention"] = retention
@property
def log_group_name(self) -> typing.Optional[str]:
"""Name of the log group.
default
:default: Automatically generated
"""
return self._values.get('log_group_name')
@property
def removal_policy(self) -> typing.Optional[aws_cdk.core.RemovalPolicy]:
"""Determine the removal policy of this log group.
Normally you want to retain the log group so you can diagnose issues
from logs even after a deployment that no longer includes the log group.
In that case, use the normal date-based retention policy to age out your
logs.
default
:default: RemovalPolicy.Retain
"""
return self._values.get('removal_policy')
@property
def retention(self) -> typing.Optional["RetentionDays"]:
"""How long, in days, the log contents will be retained.
To retain all logs, set this value to RetentionDays.INFINITE.
default
:default: RetentionDays.TWO_YEARS
"""
return self._values.get('retention')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'LogGroupProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.implements(ILogStream)
class LogStream(aws_cdk.core.Resource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-logs.LogStream"):
"""Define a Log Stream in a Log Group."""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, log_group: "ILogGroup", log_stream_name: typing.Optional[str]=None, removal_policy: typing.Optional[aws_cdk.core.RemovalPolicy]=None) -> None:
"""
:param scope: -
:param id: -
:param props: -
:param log_group: The log group to create a log stream for.
:param log_stream_name: The name of the log stream to create. The name must be unique within the log group. Default: Automatically generated
:param removal_policy: Determine what happens when the log stream resource is removed from the app. Normally you want to retain the log stream so you can diagnose issues from logs even after a deployment that no longer includes the log stream. The date-based retention policy of your log group will age out the logs after a certain time. Default: RemovalPolicy.Retain
"""
props = LogStreamProps(log_group=log_group, log_stream_name=log_stream_name, removal_policy=removal_policy)
jsii.create(LogStream, self, [scope, id, props])
@jsii.member(jsii_name="fromLogStreamName")
@classmethod
def from_log_stream_name(cls, scope: aws_cdk.core.Construct, id: str, log_stream_name: str) -> "ILogStream":
"""Import an existing LogGroup.
:param scope: -
:param id: -
:param log_stream_name: -
"""
return jsii.sinvoke(cls, "fromLogStreamName", [scope, id, log_stream_name])
@property
@jsii.member(jsii_name="logStreamName")
def log_stream_name(self) -> str:
"""The name of this log stream."""
return jsii.get(self, "logStreamName")
@jsii.data_type(jsii_type="@aws-cdk/aws-logs.LogStreamProps", jsii_struct_bases=[], name_mapping={'log_group': 'logGroup', 'log_stream_name': 'logStreamName', 'removal_policy': 'removalPolicy'})
class LogStreamProps():
def __init__(self, *, log_group: "ILogGroup", log_stream_name: typing.Optional[str]=None, removal_policy: typing.Optional[aws_cdk.core.RemovalPolicy]=None):
"""Properties for a LogStream.
:param log_group: The log group to create a log stream for.
:param log_stream_name: The name of the log stream to create. The name must be unique within the log group. Default: Automatically generated
:param removal_policy: Determine what happens when the log stream resource is removed from the app. Normally you want to retain the log stream so you can diagnose issues from logs even after a deployment that no longer includes the log stream. The date-based retention policy of your log group will age out the logs after a certain time. Default: RemovalPolicy.Retain
"""
self._values = {
'log_group': log_group,
}
if log_stream_name is not None: self._values["log_stream_name"] = log_stream_name
if removal_policy is not None: self._values["removal_policy"] = removal_policy
@property
def log_group(self) -> "ILogGroup":
"""The log group to create a log stream for."""
return self._values.get('log_group')
@property
def log_stream_name(self) -> typing.Optional[str]:
"""The name of the log stream to create.
The name must be unique within the log group.
default
:default: Automatically generated
"""
return self._values.get('log_stream_name')
@property
def removal_policy(self) -> typing.Optional[aws_cdk.core.RemovalPolicy]:
"""Determine what happens when the log stream resource is removed from the app.
Normally you want to retain the log stream so you can diagnose issues from
logs even after a deployment that no longer includes the log stream.
The date-based retention policy of your log group will age out the logs
after a certain time.
default
:default: RemovalPolicy.Retain
"""
return self._values.get('removal_policy')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'LogStreamProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-logs.LogSubscriptionDestinationConfig", jsii_struct_bases=[], name_mapping={'arn': 'arn', 'role': 'role'})
class LogSubscriptionDestinationConfig():
def __init__(self, *, arn: str, role: typing.Optional[aws_cdk.aws_iam.IRole]=None):
"""Properties returned by a Subscription destination.
:param arn: The ARN of the subscription's destination.
:param role: The role to assume to write log events to the destination. Default: No role assumed
"""
self._values = {
'arn': arn,
}
if role is not None: self._values["role"] = role
@property
def arn(self) -> str:
"""The ARN of the subscription's destination."""
return self._values.get('arn')
@property
def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:
"""The role to assume to write log events to the destination.
default
:default: No role assumed
"""
return self._values.get('role')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'LogSubscriptionDestinationConfig(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class MetricFilter(aws_cdk.core.Resource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-logs.MetricFilter"):
"""A filter that extracts information from CloudWatch Logs and emits to CloudWatch Metrics."""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, log_group: "ILogGroup", filter_pattern: "IFilterPattern", metric_name: str, metric_namespace: str, default_value: typing.Optional[jsii.Number]=None, metric_value: typing.Optional[str]=None) -> None:
"""
:param scope: -
:param id: -
:param props: -
:param log_group: The log group to create the filter on.
:param filter_pattern: Pattern to search for log events.
:param metric_name: The name of the metric to emit.
:param metric_namespace: The namespace of the metric to emit.
:param default_value: The value to emit if the pattern does not match a particular event. Default: No metric emitted.
:param metric_value: The value to emit for the metric. Can either be a literal number (typically "1"), or the name of a field in the structure to take the value from the matched event. If you are using a field value, the field value must have been matched using the pattern. If you want to specify a field from a matched JSON structure, use '$.fieldName', and make sure the field is in the pattern (if only as '$.fieldName = *'). If you want to specify a field from a matched space-delimited structure, use '$fieldName'. Default: "1"
"""
props = MetricFilterProps(log_group=log_group, filter_pattern=filter_pattern, metric_name=metric_name, metric_namespace=metric_namespace, default_value=default_value, metric_value=metric_value)
jsii.create(MetricFilter, self, [scope, id, props])
@jsii.data_type(jsii_type="@aws-cdk/aws-logs.MetricFilterOptions", jsii_struct_bases=[], name_mapping={'filter_pattern': 'filterPattern', 'metric_name': 'metricName', 'metric_namespace': 'metricNamespace', 'default_value': 'defaultValue', 'metric_value': 'metricValue'})
class MetricFilterOptions():
def __init__(self, *, filter_pattern: "IFilterPattern", metric_name: str, metric_namespace: str, default_value: typing.Optional[jsii.Number]=None, metric_value: typing.Optional[str]=None):
"""Properties for a MetricFilter created from a LogGroup.
:param filter_pattern: Pattern to search for log events.
:param metric_name: The name of the metric to emit.
:param metric_namespace: The namespace of the metric to emit.
:param default_value: The value to emit if the pattern does not match a particular event. Default: No metric emitted.
:param metric_value: The value to emit for the metric. Can either be a literal number (typically "1"), or the name of a field in the structure to take the value from the matched event. If you are using a field value, the field value must have been matched using the pattern. If you want to specify a field from a matched JSON structure, use '$.fieldName', and make sure the field is in the pattern (if only as '$.fieldName = *'). If you want to specify a field from a matched space-delimited structure, use '$fieldName'. Default: "1"
"""
self._values = {
'filter_pattern': filter_pattern,
'metric_name': metric_name,
'metric_namespace': metric_namespace,
}
if default_value is not None: self._values["default_value"] = default_value
if metric_value is not None: self._values["metric_value"] = metric_value
@property
def filter_pattern(self) -> "IFilterPattern":
"""Pattern to search for log events."""
return self._values.get('filter_pattern')
@property
def metric_name(self) -> str:
"""The name of the metric to emit."""
return self._values.get('metric_name')
@property
def metric_namespace(self) -> str:
"""The namespace of the metric to emit."""
return self._values.get('metric_namespace')
@property
def default_value(self) -> typing.Optional[jsii.Number]:
"""The value to emit if the pattern does not match a particular event.
default
:default: No metric emitted.
"""
return self._values.get('default_value')
@property
def metric_value(self) -> typing.Optional[str]:
"""The value to emit for the metric.
Can either be a literal number (typically "1"), or the name of a field in the structure
to take the value from the matched event. If you are using a field value, the field
value must have been matched using the pattern.
If you want to specify a field from a matched JSON structure, use '$.fieldName',
and make sure the field is in the pattern (if only as '$.fieldName = *').
If you want to specify a field from a matched space-delimited structure,
use '$fieldName'.
default
:default: "1"
"""
return self._values.get('metric_value')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'MetricFilterOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-logs.MetricFilterProps", jsii_struct_bases=[MetricFilterOptions], name_mapping={'filter_pattern': 'filterPattern', 'metric_name': 'metricName', 'metric_namespace': 'metricNamespace', 'default_value': 'defaultValue', 'metric_value': 'metricValue', 'log_group': 'logGroup'})
class MetricFilterProps(MetricFilterOptions):
def __init__(self, *, filter_pattern: "IFilterPattern", metric_name: str, metric_namespace: str, default_value: typing.Optional[jsii.Number]=None, metric_value: typing.Optional[str]=None, log_group: "ILogGroup"):
"""Properties for a MetricFilter.
:param filter_pattern: Pattern to search for log events.
:param metric_name: The name of the metric to emit.
:param metric_namespace: The namespace of the metric to emit.
:param default_value: The value to emit if the pattern does not match a particular event. Default: No metric emitted.
:param metric_value: The value to emit for the metric. Can either be a literal number (typically "1"), or the name of a field in the structure to take the value from the matched event. If you are using a field value, the field value must have been matched using the pattern. If you want to specify a field from a matched JSON structure, use '$.fieldName', and make sure the field is in the pattern (if only as '$.fieldName = *'). If you want to specify a field from a matched space-delimited structure, use '$fieldName'. Default: "1"
:param log_group: The log group to create the filter on.
"""
self._values = {
'filter_pattern': filter_pattern,
'metric_name': metric_name,
'metric_namespace': metric_namespace,
'log_group': log_group,
}
if default_value is not None: self._values["default_value"] = default_value
if metric_value is not None: self._values["metric_value"] = metric_value
@property
def filter_pattern(self) -> "IFilterPattern":
"""Pattern to search for log events."""
return self._values.get('filter_pattern')
@property
def metric_name(self) -> str:
"""The name of the metric to emit."""
return self._values.get('metric_name')
@property
def metric_namespace(self) -> str:
"""The namespace of the metric to emit."""
return self._values.get('metric_namespace')
@property
def default_value(self) -> typing.Optional[jsii.Number]:
"""The value to emit if the pattern does not match a particular event.
default
:default: No metric emitted.
"""
return self._values.get('default_value')
@property
def metric_value(self) -> typing.Optional[str]:
"""The value to emit for the metric.
Can either be a literal number (typically "1"), or the name of a field in the structure
to take the value from the matched event. If you are using a field value, the field
value must have been matched using the pattern.
If you want to specify a field from a matched JSON structure, use '$.fieldName',
and make sure the field is in the pattern (if only as '$.fieldName = *').
If you want to specify a field from a matched space-delimited structure,
use '$fieldName'.
default
:default: "1"
"""
return self._values.get('metric_value')
@property
def log_group(self) -> "ILogGroup":
"""The log group to create the filter on."""
return self._values.get('log_group')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'MetricFilterProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.enum(jsii_type="@aws-cdk/aws-logs.RetentionDays")
class RetentionDays(enum.Enum):
"""How long, in days, the log contents will be retained."""
ONE_DAY = "ONE_DAY"
"""1 day."""
THREE_DAYS = "THREE_DAYS"
"""3 days."""
FIVE_DAYS = "FIVE_DAYS"
"""5 days."""
ONE_WEEK = "ONE_WEEK"
"""1 week."""
TWO_WEEKS = "TWO_WEEKS"
"""2 weeks."""
ONE_MONTH = "ONE_MONTH"
"""1 month."""
TWO_MONTHS = "TWO_MONTHS"
"""2 months."""
THREE_MONTHS = "THREE_MONTHS"
"""3 months."""
FOUR_MONTHS = "FOUR_MONTHS"
"""4 months."""
FIVE_MONTHS = "FIVE_MONTHS"
"""5 months."""
SIX_MONTHS = "SIX_MONTHS"
"""6 months."""
ONE_YEAR = "ONE_YEAR"
"""1 year."""
THIRTEEN_MONTHS = "THIRTEEN_MONTHS"
"""13 months."""
EIGHTEEN_MONTHS = "EIGHTEEN_MONTHS"
"""18 months."""
TWO_YEARS = "TWO_YEARS"
"""2 years."""
FIVE_YEARS = "FIVE_YEARS"
"""5 years."""
TEN_YEARS = "TEN_YEARS"
"""10 years."""
INFINITE = "INFINITE"
"""Retain logs forever."""
@jsii.implements(IFilterPattern)
class SpaceDelimitedTextPattern(metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-logs.SpaceDelimitedTextPattern"):
"""Space delimited text pattern."""
def __init__(self, columns: typing.List[str], restrictions: typing.Mapping[str,typing.List["ColumnRestriction"]]) -> None:
"""
:param columns: -
:param restrictions: -
"""
jsii.create(SpaceDelimitedTextPattern, self, [columns, restrictions])
@jsii.member(jsii_name="construct")
@classmethod
def construct(cls, columns: typing.List[str]) -> "SpaceDelimitedTextPattern":
"""Construct a new instance of a space delimited text pattern.
Since this class must be public, we can't rely on the user only creating it through
the ``LogPattern.spaceDelimited()`` factory function. We must therefore validate the
argument in the constructor. Since we're returning a copy on every mutation, and we
don't want to re-validate the same things on every construction, we provide a limited
set of mutator functions and only validate the new data every time.
:param columns: -
"""
return jsii.sinvoke(cls, "construct", [columns])
@jsii.member(jsii_name="whereNumber")
def where_number(self, column_name: str, comparison: str, value: jsii.Number) -> "SpaceDelimitedTextPattern":
"""Restrict where the pattern applies.
:param column_name: -
:param comparison: -
:param value: -
"""
return jsii.invoke(self, "whereNumber", [column_name, comparison, value])
@jsii.member(jsii_name="whereString")
def where_string(self, column_name: str, comparison: str, value: str) -> "SpaceDelimitedTextPattern":
"""Restrict where the pattern applies.
:param column_name: -
:param comparison: -
:param value: -
"""
return jsii.invoke(self, "whereString", [column_name, comparison, value])
@property
@jsii.member(jsii_name="logPatternString")
def log_pattern_string(self) -> str:
return jsii.get(self, "logPatternString")
@jsii.data_type(jsii_type="@aws-cdk/aws-logs.StreamOptions", jsii_struct_bases=[], name_mapping={'log_stream_name': 'logStreamName'})
class StreamOptions():
def __init__(self, *, log_stream_name: typing.Optional[str]=None):
"""Properties for a new LogStream created from a LogGroup.
:param log_stream_name: The name of the log stream to create. The name must be unique within the log group. Default: Automatically generated
"""
self._values = {
}
if log_stream_name is not None: self._values["log_stream_name"] = log_stream_name
@property
def log_stream_name(self) -> typing.Optional[str]:
"""The name of the log stream to create.
The name must be unique within the log group.
default
:default: Automatically generated
"""
return self._values.get('log_stream_name')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'StreamOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class SubscriptionFilter(aws_cdk.core.Resource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-logs.SubscriptionFilter"):
"""A new Subscription on a CloudWatch log group."""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, log_group: "ILogGroup", destination: "ILogSubscriptionDestination", filter_pattern: "IFilterPattern") -> None:
"""
:param scope: -
:param id: -
:param props: -
:param log_group: The log group to create the subscription on.
:param destination: The destination to send the filtered events to. For example, a Kinesis stream or a Lambda function.
:param filter_pattern: Log events matching this pattern will be sent to the destination.
"""
props = SubscriptionFilterProps(log_group=log_group, destination=destination, filter_pattern=filter_pattern)
jsii.create(SubscriptionFilter, self, [scope, id, props])
@jsii.data_type(jsii_type="@aws-cdk/aws-logs.SubscriptionFilterOptions", jsii_struct_bases=[], name_mapping={'destination': 'destination', 'filter_pattern': 'filterPattern'})
class SubscriptionFilterOptions():
def __init__(self, *, destination: "ILogSubscriptionDestination", filter_pattern: "IFilterPattern"):
"""Properties for a new SubscriptionFilter created from a LogGroup.
:param destination: The destination to send the filtered events to. For example, a Kinesis stream or a Lambda function.
:param filter_pattern: Log events matching this pattern will be sent to the destination.
"""
self._values = {
'destination': destination,
'filter_pattern': filter_pattern,
}
@property
def destination(self) -> "ILogSubscriptionDestination":
"""The destination to send the filtered events to.
For example, a Kinesis stream or a Lambda function.
"""
return self._values.get('destination')
@property
def filter_pattern(self) -> "IFilterPattern":
"""Log events matching this pattern will be sent to the destination."""
return self._values.get('filter_pattern')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'SubscriptionFilterOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-logs.SubscriptionFilterProps", jsii_struct_bases=[SubscriptionFilterOptions], name_mapping={'destination': 'destination', 'filter_pattern': 'filterPattern', 'log_group': 'logGroup'})
class SubscriptionFilterProps(SubscriptionFilterOptions):
def __init__(self, *, destination: "ILogSubscriptionDestination", filter_pattern: "IFilterPattern", log_group: "ILogGroup"):
"""Properties for a SubscriptionFilter.
:param destination: The destination to send the filtered events to. For example, a Kinesis stream or a Lambda function.
:param filter_pattern: Log events matching this pattern will be sent to the destination.
:param log_group: The log group to create the subscription on.
"""
self._values = {
'destination': destination,
'filter_pattern': filter_pattern,
'log_group': log_group,
}
@property
def destination(self) -> "ILogSubscriptionDestination":
"""The destination to send the filtered events to.
For example, a Kinesis stream or a Lambda function.
"""
return self._values.get('destination')
@property
def filter_pattern(self) -> "IFilterPattern":
"""Log events matching this pattern will be sent to the destination."""
return self._values.get('filter_pattern')
@property
def log_group(self) -> "ILogGroup":
"""The log group to create the subscription on."""
return self._values.get('log_group')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'SubscriptionFilterProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
__all__ = ["CfnDestination", "CfnDestinationProps", "CfnLogGroup", "CfnLogGroupProps", "CfnLogStream", "CfnLogStreamProps", "CfnMetricFilter", "CfnMetricFilterProps", "CfnSubscriptionFilter", "CfnSubscriptionFilterProps", "ColumnRestriction", "CrossAccountDestination", "CrossAccountDestinationProps", "FilterPattern", "IFilterPattern", "ILogGroup", "ILogStream", "ILogSubscriptionDestination", "JsonPattern", "LogGroup", "LogGroupProps", "LogStream", "LogStreamProps", "LogSubscriptionDestinationConfig", "MetricFilter", "MetricFilterOptions", "MetricFilterProps", "RetentionDays", "SpaceDelimitedTextPattern", "StreamOptions", "SubscriptionFilter", "SubscriptionFilterOptions", "SubscriptionFilterProps", "__jsii_assembly__"]
publication.publish()
| apache-2.0 | 9,123,543,856,416,222,000 | 43.01819 | 727 | 0.659719 | false |
simone-f/qat_script | gui/QatMenu.py | 1 | 7498 | #! /usr/bin/env jython
#
# Copyright 2014 Simone F. <[email protected]>
#
# This file is part of qat_script.
# qat_script is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
#java import
from javax.swing import JMenu, JMenuItem, JCheckBoxMenuItem, ImageIcon,\
JFileChooser
from javax.swing.filechooser import FileNameExtensionFilter
from java.awt.event import ActionListener
from java.io import File
#josm import
from org.openstreetmap.josm import Main
from org.openstreetmap.josm.tools import OpenBrowser, ImageProvider
from org.openstreetmap.josm.actions import DiskAccessAction
#local import
from gui.AboutDialog import AboutDialog
from tools.data.LocalFile.LocalFile import LocalFileTool
#### QatMenu ###########################################################
class QatMenuActionListener(ActionListener):
def __init__(self, app, itemType, tool=None, view=None, check=None):
self.app = app
self.itemType = itemType
self.tool = tool
self.view = view
self.check = check
self.strings = self.app.strings
def actionPerformed(self, event):
command = event.getActionCommand()
if self.itemType == "dialog":
#False positives dialog
if command == self.strings.getString("False_positives..."):
self.app.falsePositiveDlg.show()
#Preferences dialog
elif command == self.strings.getString("Preferences..."):
self.app.open_preferences("from menu")
#About dialog
elif command == self.strings.getString("About..."):
try:
self.app.aboutDlg
except AttributeError:
#build about dialog
self.app.aboutDlg = AboutDialog(
Main.parent,
self.strings.getString("about_title"),
True,
self.app)
self.app.aboutDlg.show()
#Web link of the tool
elif self.itemType == "link":
OpenBrowser.displayUrl(self.tool.uri)
elif self.itemType in ("check", "local file"):
#Open local GPX file with errors
if self.itemType == "local file":
fileNameExtensionFilter = FileNameExtensionFilter("files GPX (*.gpx)",
["gpx"])
chooser = DiskAccessAction.createAndOpenFileChooser(True,
False,
self.strings.getString("Open_a_GPX_file"),
fileNameExtensionFilter,
JFileChooser.FILES_ONLY,
None)
if chooser is None:
return
filePath = chooser.getSelectedFile()
#remove former loaded local file
for i, tool in enumerate(self.app.tools):
if filePath.getName() == tool.name:
self.app.dlg.toolsCombo.removeItemAt(i)
del self.app.tools[i]
#create a new local file tool
self.tool = LocalFileTool(self.app, filePath)
self.view = self.tool.views[0]
self.check = self.view.checks[0]
self.app.tools.append(self.tool)
#add tool to toggle dialog
self.app.dlg.add_data_to_models(self.tool)
selection = (self.tool, self.view, self.check)
self.app.on_selection_changed("menu", selection)
class QatMenu(JMenu):
"""A menu from which the user can select an error type, toggle the
qat dialog or about dialog.
"""
def __init__(self, app, menuTitle):
JMenu.__init__(self, menuTitle)
self.app = app
#quat dialog item
dialogItem = JCheckBoxMenuItem(self.app.dlg.toggleAction)
self.add(dialogItem)
self.addSeparator()
#tool submenu
for tool in self.app.tools:
if tool.name == "favourites":
self.addSeparator()
toolMenu = JMenu(tool.title)
toolMenu.setIcon(tool.bigIcon)
if tool.uri != "":
#Website link
iconFile = File.separator.join([self.app.SCRIPTDIR,
"images",
"icons",
"browser.png"])
urlItem = JMenuItem(tool.title)
urlItem.setIcon(ImageIcon(iconFile))
urlItem.addActionListener(QatMenuActionListener(self.app,
"link",
tool))
toolMenu.add(urlItem)
toolMenu.addSeparator()
#View submenu
for view in tool.views:
viewMenu = JMenu(view.title)
if tool.name == "favourites":
self.app.favouritesMenu = viewMenu
#Check item
for check in view.checks:
self.add_check_item(tool, view, check, viewMenu)
toolMenu.add(viewMenu)
self.add(toolMenu)
#Local file with errors
localFileItem = JMenuItem(self.app.strings.getString("Open_GPX"))
localFileItem.setIcon(ImageProvider.get("open"))
localFileItem.addActionListener(QatMenuActionListener(self.app, "local file"))
self.add(localFileItem)
self.addSeparator()
#False positive dialog
falsepositiveItem = JMenuItem(self.app.strings.getString("False_positives..."))
falsepositiveItem.addActionListener(QatMenuActionListener(self.app, "dialog"))
self.add(falsepositiveItem)
#Preferences dialog
preferencesItem = JMenuItem(self.app.strings.getString("Preferences..."))
preferencesItem.addActionListener(QatMenuActionListener(self.app, "dialog"))
self.add(preferencesItem)
#About dialog item
aboutItem = JMenuItem(self.app.strings.getString("About..."))
aboutItem.addActionListener(QatMenuActionListener(self.app, "dialog"))
self.add(aboutItem)
def add_check_item(self, tool, view, check, viewMenu):
checkItem = JMenuItem(check.title)
if check.icon is not None:
checkItem.setIcon(check.icon)
checkItem.addActionListener(QatMenuActionListener(self.app,
"check",
tool,
view,
check))
viewMenu.add(checkItem)
| gpl-2.0 | 6,789,611,966,408,416,000 | 40.655556 | 87 | 0.555882 | false |
habnabit/panglery | docs/conf.py | 1 | 7868 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import vcversioner
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'panglery'
copyright = u'2013, Aaron Gallagher'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
version = release = vcversioner.find_version(version_file=None).version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'panglerydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'panglery.tex', u'panglery Documentation',
u'Aaron Gallagher', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'panglery', u'panglery Documentation',
[u'Aaron Gallagher'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'panglery', u'panglery Documentation',
u'Aaron Gallagher', 'panglery', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | 856,033,918,284,303,700 | 31.512397 | 81 | 0.706279 | false |
google-research/google-research | ravens/ravens/tasks/align_box_corner.py | 1 | 2639 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Aligning task."""
import os
import numpy as np
from ravens import utils
from ravens.tasks.task import Task
class AlignBoxCorner(Task):
"""Aligning task."""
def __init__(self):
super().__init__()
self.max_steps = 3
def reset(self, env):
super().reset(env)
# Generate randomly shaped box.
box_size = self.get_random_size(0.05, 0.15, 0.05, 0.15, 0.01, 0.06)
# Add corner.
dimx = (box_size[0] / 2 - 0.025 + 0.0025, box_size[0] / 2 + 0.0025)
dimy = (box_size[1] / 2 + 0.0025, box_size[1] / 2 - 0.025 + 0.0025)
corner_template = 'assets/corner/corner-template.urdf'
replace = {'DIMX': dimx, 'DIMY': dimy}
corner_urdf = self.fill_template(corner_template, replace)
corner_size = (box_size[0], box_size[1], 0)
corner_pose = self.get_random_pose(env, corner_size)
env.add_object(corner_urdf, corner_pose, 'fixed')
os.remove(corner_urdf)
# Add possible placing poses.
theta = utils.quatXYZW_to_eulerXYZ(corner_pose[1])[2]
fip_rot = utils.eulerXYZ_to_quatXYZW((0, 0, theta + np.pi))
pose1 = (corner_pose[0], fip_rot)
alt_x = (box_size[0] / 2) - (box_size[1] / 2)
alt_y = (box_size[1] / 2) - (box_size[0] / 2)
alt_pos = (alt_x, alt_y, 0)
alt_rot0 = utils.eulerXYZ_to_quatXYZW((0, 0, np.pi / 2))
alt_rot1 = utils.eulerXYZ_to_quatXYZW((0, 0, 3 * np.pi / 2))
pose2 = utils.multiply(corner_pose, (alt_pos, alt_rot0))
pose3 = utils.multiply(corner_pose, (alt_pos, alt_rot1))
# Add box.
box_template = 'assets/box/box-template.urdf'
box_urdf = self.fill_template(box_template, {'DIM': box_size})
box_pose = self.get_random_pose(env, box_size)
box_id = env.add_object(box_urdf, box_pose)
os.remove(box_urdf)
self.color_random_brown(box_id)
# Goal: box is aligned with corner (1 of 4 possible poses).
self.goals.append(([(box_id, (2 * np.pi, None))], np.int32([[1, 1, 1, 1]]),
[corner_pose, pose1, pose2, pose3],
False, True, 'pose', None, 1))
| apache-2.0 | -9,179,708,957,933,946,000 | 35.150685 | 79 | 0.638499 | false |
maemo-foss/maemo-multimedia-trace | disabled/test/tracetest.py | 1 | 4596 | ##########################################################################
# This file is part of libtrace
#
# Copyright (C) 2010 Nokia Corporation.
#
# This library is free software; you can redistribute
# it and/or modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA.
##########################################################################
#!/usr/bin/python
import sys
from _nsntrace import *
test = 1
tc = None
TRACE_FLAG1 = -1
TRACE_FLAG2 = -1
TRACE_FLAG3 = -1
####################
# fatal
####################
def fatal(ec, msg):
print 'fatal error: ' + msg
sys.exit(ec)
####################
# test_init
####################
def test_init():
global tc
global TRACE_FLAG1, TRACE_FLAG2, TRACE_FLAG3
c = { 'name': 'comp1',
'flags': { 'flag1': { 'description': 'blah 1...', 'flag': -1 },
'flag2': { 'description': 'blah 2...', 'flag': -1 },
'flag3': { 'description': 'blah 3...', 'flag': -1 }}}
try:
trace_init()
except:
fatal(1, "failed to create trace context for testing\n");
try:
tc = trace_open('tracetest')
except:
fatal(1, "failed to create trace context for testing\n")
trace_add_component(tc, c)
TRACE_FLAG1 = c['flags']['flag1']['flag'];
TRACE_FLAG2 = c['flags']['flag2']['flag'];
TRACE_FLAG3 = c['flags']['flag3']['flag'];
trace_set_destination(tc, TRACE_TO_STDOUT);
trace_set_header(tc, "----------\n%d %i: %W\nflag: %f, tags: { %T }\n");
####################
# test_cycle
####################
def test_cycle():
global tc, test, TRACE_FLAG1, TRACE_FLAG2, TRACE_FLAG3
for i in range(0, 16):
itags = { 'i': ("%d" % i), 'test': ("%d" % test) }
trace_write(tc, TRACE_FLAG1, itags, "test #%d, i is %d\n", test, i)
test += 1
for j in range(0, 16):
sum = i + j;
prod = i * j;
jtags = { 'i': ("%d" % i), 'j': ("%d" % j),
'sum': ("%d" % sum), 'product': ("%d" % prod) }
trace_write(tc, TRACE_FLAG2, jtags,
"test #%d, i = %d, j = %d\n", test, i, j)
test += 1
trace_write(tc, TRACE_FLAG3, jtags,
"test #%d, sum = %d, product = %d\n",
test, sum, prod)
test += 1
####################
# main test script
####################
#
# initialize tracing
#
test_init()
test = 1
#
# run a couple of different test cycles
#
# test built-int default settings
# tracing is disabled (by default) so nothing should be printed
test_cycle()
trace_enable(tc)
# trace flags are off (by default) so nothing should be printed
test_cycle()
# no filters installed so nothing should be printed
trace_on(tc, TRACE_FLAG1)
trace_on(tc, TRACE_FLAG3)
test_cycle()
# flag tests
# add a catch-all filter to make suppression effectively flag-based
trace_add_simple_filter(tc, "all")
trace_on(tc, TRACE_FLAG1)
trace_on(tc, TRACE_FLAG2)
trace_on(tc, TRACE_FLAG3)
test_cycle()
trace_on(tc, TRACE_FLAG1)
trace_off(tc, TRACE_FLAG2)
trace_off(tc, TRACE_FLAG3)
test_cycle()
trace_off(tc, TRACE_FLAG1)
trace_on(tc, TRACE_FLAG2)
trace_off(tc, TRACE_FLAG3)
test_cycle()
trace_off(tc, TRACE_FLAG1)
trace_off(tc, TRACE_FLAG2)
trace_on(tc, TRACE_FLAG3)
test_cycle()
trace_off(tc, TRACE_FLAG1)
trace_on(tc, TRACE_FLAG2)
trace_on(tc, TRACE_FLAG3)
test_cycle()
trace_on(tc, TRACE_FLAG1)
trace_off(tc, TRACE_FLAG2)
trace_on(tc, TRACE_FLAG3)
test_cycle()
trace_on(tc, TRACE_FLAG1)
trace_on(tc, TRACE_FLAG2)
trace_off(tc, TRACE_FLAG3)
test_cycle()
# filter tests
# turn on all flags to make suppression effectively filter-based
trace_on(tc, TRACE_FLAG1)
trace_on(tc, TRACE_FLAG2)
trace_on(tc, TRACE_FLAG3)
# i == 10 || j == 0 || j == 5
trace_reset_filters(tc)
trace_add_simple_filter(tc, "i=10")
trace_add_simple_filter(tc, "j=0")
trace_add_simple_filter(tc, "j=5")
test_cycle()
# i == 5 || (i == 10 && (j is odd))
trace_reset_filters(tc)
trace_add_simple_filter(tc, "i=5")
trace_add_regexp_filter(tc, "i=10 j=^(1|3|5|7|9)$")
test_cycle()
#
# clean up and exit
#
trace_close(tc)
trace_exit()
sys.exit(0)
#
# Local variables:
# c-indent-level: 4
# c-basic-offset: 4
# tab-width: 4
# End:
#
| lgpl-2.1 | 4,541,163,714,361,927,700 | 20.990431 | 74 | 0.601175 | false |
TeamReciprocity/reciprocity | reciprocity/recipe/urls.py | 1 | 1508 | from django.conf.urls import url
from .views import add_recipe, IngredientAutocomplete, Ingredient, edit_recipe
from django.contrib.auth.decorators import login_required
from django.views.generic.detail import DetailView
from .models import Recipe
from .views import (
add_recipe,
FavoriteRecipesView,
IngredientAutocomplete,
Ingredient,
MyRecipesListView,
RecipeDetailView,
vary_recipe
)
urlpatterns = [
url(r'^ingredient-autocomplete/$',
login_required(IngredientAutocomplete.as_view(model=Ingredient,
create_field='name')),
name='ingredient-autocomplete',),
url(r'^add/$', login_required(add_recipe), name='add-recipe'),
url(r'^view/(?P<pk>[0-9]+)/$',
RecipeDetailView.as_view(
model=Recipe,
template_name='recipe/view-recipe.html'
),
name='view-recipe'),
url(r'^edit/(?P<pk>[0-9]+)/$',
login_required(edit_recipe), name='edit-recipe'),
url(r'^vary/(?P<pk>[0-9]+)/$',
login_required(vary_recipe),
name='vary-recipe'),
url(r'^view/my_recipes/$',
login_required(MyRecipesListView.as_view(
model=Recipe,
template_name='recipe/my-recipes.html'
)),
name='my-recipes'),
url(r'^view/favorites/$',
login_required(FavoriteRecipesView.as_view(
model=Recipe,
template_name='recipe/favorites.html'
)),
name='favorites')
]
| mit | -4,985,277,270,709,134,000 | 32.511111 | 78 | 0.605438 | false |
stsouko/nmrdb | app/views.py | 1 | 11753 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Ramil Nugmanov <[email protected]>
# This file is part of nmrdb.
#
# nmrdb is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
from flask import render_template, request, redirect, url_for, make_response
from app.localization import localization
from app import db, app
from app.forms import Registration, Login, Newlab, Newtask, Changelab, Changeava, ChangeChief, Changepwd, Newmsg, \
Banuser, Gettask
from app.logins import User, admin_required
from flask_login import login_user, login_required, logout_user, current_user
from flask_nav.elements import *
from app.navbar import top_nav, Pagination
loc = localization()
statuscode = dict(all=None, cmp=True, new=False)
taskuserlikekeys = db.gettaskuserlikekeys()
@app.route('/', methods=['GET', 'POST'])
@app.route('/index/', methods=['GET', 'POST'])
@app.route('/index/<int:page>', methods=['GET', 'POST'])
def index(page=1):
form = Newmsg()
if form.validate_on_submit():
msg = db.addmessage(title=form.title.data, message=form.message.data)
if msg:
return redirect(url_for('index', page=1))
if page < 1:
return redirect(url_for('index', page=1))
msg, pc = db.getmessages(page=page)
pag = Pagination(page, pc, pagesize=5)
if page != pag.page:
return redirect(url_for('index', page=pag.page))
return render_template('index.html', localize=loc, data=msg, form=form, paginator=pag,
user=current_user.get_role() if current_user.is_authenticated else None)
''' TASK
pages
'''
@app.route('/newtask', methods=['GET', 'POST'])
@login_required
def newtask():
form = Newtask()
if form.validate_on_submit():
tasktypes = dict.fromkeys([db.gettasktypes().get(x, 'h1') for x in form.tasktypes.data], True)
key = db.addtask(current_user.get_id(), structure=form.structure.data, title=form.taskname.data,
solvent=form.solvent.data, **tasktypes)
if key:
return render_template('newtaskcode.html', code=key, header=loc['taskcode'],
comments=loc['taskcodecomment'])
return render_template('newtask.html', form=form, header=loc['newtask'],
comments=loc['newtaskcomment'])
@app.route('/spectras/', methods=['GET', 'POST'])
@app.route('/spectras/<sfilter>', methods=['GET', 'POST'])
@login_required
def spectras(sfilter=None):
form = Gettask()
if form.validate_on_submit():
if form.task.data:
task = db.gettaskbykey(form.task.data) or db.gettaskbyspectra(form.task.data)
if task:
return redirect(url_for('showtask', task=task['id']))
return redirect(url_for('spectras', user=form.avatar.data, sfilter=form.filters.data))
ufilter = request.args.get('user', None)
sfilter = 'all' if sfilter not in ['all', 'cmp', 'new'] else sfilter
try:
page = int(request.args.get("page", 1))
except ValueError:
page = 1
if page < 1:
return redirect(url_for('spectras', sfilter=sfilter, user=ufilter, page=1))
user = avatar = None
if ufilter:
''' спектры отсортированные по аватарам.
'''
access = [x[2] for x in db.getavatars(current_user.get_id())]
if ufilter in access or current_user.get_role() == 'admin':
avatar = ufilter
else:
user = current_user.get_id()
elif current_user.get_role() != 'admin':
''' все доступные пользователю спектры от всех шар.
'''
user = current_user.get_id()
spectras, sc = db.gettasklist(user=user, avatar=avatar, status=statuscode.get(sfilter), page=page, pagesize=50)
pag = Pagination(page, sc, pagesize=50)
return render_template('spectras.html', localize=loc, form=form, data=spectras, paginator=pag, sfilter=sfilter,
top_nav=top_nav(sfilter=sfilter, ufilter=ufilter).render(renderer='myrenderer'))
@app.route('/download/<int:task>/<file>', methods=['GET'])
@login_required
def download(task, file):
if db.chktaskaccess(task, current_user.get_id()):
resp = make_response()
resp.headers.extend({'X-Accel-Redirect': '/protected/%s' % file,
'Content-Description': 'File Transfer',
'Content-Type': 'application/octet-stream'})
return resp
return redirect(url_for('index'))
@app.route('/showtask/<int:task>', methods=['GET', 'POST'])
@login_required
def showtask(task):
task = db.gettask(task, user=None if current_user.get_role() == 'admin' else current_user.get_id())
if task:
task['task'] = [(i, taskuserlikekeys.get(i), task['files'].get(i)) for i, j in task['task'].items() if j]
return render_template('showtask.html', localize=loc, task=task, user=current_user.get_role())
else:
return redirect(url_for('spectras', sfilter='all'))
''' COMMON
pages
'''
@app.route('/contacts', methods=['GET'])
def contacts():
return render_template('contacts.html')
@app.route('/user/', methods=['GET'])
@app.route('/user/<name>', methods=['GET'])
@login_required
def user(name=None):
if name:
user = db.getuser(name=name)
if user:
if current_user.get_login() == name:
user['current'] = True
return render_template('user.html', localize=loc, user=user)
return redirect(url_for('user', name=current_user.get_login()))
@app.route('/registration', methods=['GET', 'POST'])
def registration():
form = Registration()
if form.validate_on_submit():
if db.adduser(form.fullname.data, form.username.data, form.password.data, form.laboratory.data):
return redirect(url_for('login'))
return render_template('formpage.html', form=form, header=loc['registration'], comments=loc['toe'])
@app.route('/login', methods=['GET', 'POST'])
def login():
form = Login()
if form.validate_on_submit():
user = db.getuser(name=form.username.data)
if user and db.chkpwd(user['id'], form.password.data):
login_user(User(**user), remember=True)
return redirect(url_for('index'))
return render_template('formpage.html', form=form, header=loc['login'], comments='')
@app.route('/logout', methods=['GET'])
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
@app.route('/changepos', methods=['GET', 'POST'])
@login_required
def changeava():
form = Changeava()
if form.validate_on_submit():
if db.changeava(current_user.get_id()):
return redirect(url_for('user', name=current_user.get_login()))
return render_template('formpage.html', form=form, header=loc['newava'], comments=loc['newavacomment'])
@app.route('/changelab', methods=['GET', 'POST'])
@login_required
def changelab():
form = Changelab()
if form.validate_on_submit():
if db.changelab(current_user.get_id(), form.laboratory.data):
return redirect(url_for('user', name=current_user.get_login()))
return render_template('formpage.html', form=form, header=loc['newlab'], comments=loc['changelabcomments'])
@app.route('/changepwd', methods=['GET', 'POST'])
@login_required
def changepwd():
form = Changepwd()
if form.validate_on_submit():
if db.changepasswd(current_user.get_id(), form.newpassword.data):
return redirect(url_for('user', name=current_user.get_login()))
return render_template('formpage.html', form=form, header=loc['newpwd'], comments='')
@app.route('/shareava', methods=['GET', 'POST'])
@login_required
def shareava():
form = ChangeChief()
if form.validate_on_submit():
if db.shareava(current_user.get_id(), db.getuser(name=form.name.data)['id']):
return redirect(url_for('user', name=current_user.get_login()))
return render_template('formpage.html', form=form, header=loc['setchief'], comments=loc['setchiefcomments'])
''' ADMIN SECTION
DANGEROUS code
'''
@app.route('/changerole', methods=['GET', 'POST'])
@login_required
@admin_required('admin')
def changerole():
form = Banuser()
if form.validate_on_submit():
users = db.getusersbypartname(form.username.data)
if users:
return render_template('changerolelist.html', data=users, localize=loc)
return render_template('formpage.html', form=form, header=loc['changerole'], comments='')
@app.route('/dochange/<int:user>', methods=['GET'])
@login_required
@admin_required('admin')
def dochange(user):
role = request.args.get('status')
if role:
db.changeuserrole(user, role)
return redirect(url_for('user', name=current_user.get_login()))
@app.route('/banuser', methods=['GET', 'POST'])
@login_required
@admin_required('admin')
def banuser():
form = Banuser()
if form.validate_on_submit():
users = db.getusersbypartname(form.username.data)
if users:
return render_template('banuserlist.html', data=users, localize=loc)
return render_template('formpage.html', form=form, header=loc['banuser'], comments='')
@app.route('/doban/<int:user>', methods=['GET'])
@login_required
@admin_required('admin')
def doban(user):
db.banuser(user)
return redirect(url_for('user', name=current_user.get_login()))
@app.route('/newlab', methods=['GET', 'POST'])
@login_required
@admin_required('admin')
def newlab():
form = Newlab()
if form.validate_on_submit():
db.addlab(form.labname.data)
return redirect(url_for('user', name=current_user.get_login()))
return render_template('formpage.html', form=form, header=loc['newlab'], comments='')
@app.route('/setstatus/<int:task>', methods=['GET'])
@login_required
@admin_required('admin')
def setstatus(task):
status = False if request.args.get('status') else True
db.settaskstatus(task, status=status)
return redirect(url_for('showtask', task=task))
@app.route('/addspectra/<int:task>', methods=['GET'])
@login_required
@admin_required('admin')
def addspectra(task):
stype = request.args.get('stype')
cname = request.args.get('customname') or '%s.%s.1' % (task, stype)
db.addspectras(task, cname, stype)
return redirect(url_for('showtask', task=task))
@app.route('/journal', methods=['GET'])
@login_required
@admin_required('admin')
def journal():
try:
stime = int(request.args.get('stime'))
except:
stime = 0
spectras = db.get_journal(stime=stime)
return render_template('journal.html', data=spectras, localize=loc)
@app.route('/stats', methods=['GET'])
@login_required
@admin_required('admin')
def stats():
try:
stime = int(request.args.get('stime'))
except:
stime = 0
spectras = []
for n, (k, v) in enumerate(db.getstatistics(stime=stime).items(), start=1):
v.update(dict(n=n, lab=k))
spectras.append(v)
return render_template('stats.html', data=spectras, localize=loc)
| agpl-3.0 | 2,158,832,724,887,687,400 | 33.255132 | 115 | 0.64866 | false |
sedthh/lara-hungarian-nlp | tests/test_parser.py | 1 | 19176 | # -*- coding: UTF-8 -*-
import pytest
import os.path, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from lara import nlp, parser
@pytest.mark.parametrize("intents,text,match", [
(
[
{
"alma" : [{"stem":"alma","wordclass":"noun"}],
"szed" : [{"stem":"szed","wordclass":"verb"}]
},
{
"piros" : [{"stem":"piros","wordclass":"adjective"}]
},
{
"zold" : [{"stem":"zöld","wordclass":"adjective"}]
}
],
[
"Már a zöld almákat is szedjem le, vagy cask a pirosakat?",
"Már a zöld almákat is szedjem le, vagy cask a pirosakat?",
"Már a zöld almákat is szedjem le, vagy cask a pirosakat?",
"Már a zöld almákat is szedjem le, vagy cask a pirosakat?"
],
[
{'alma': 1, 'szed': 2},
{'alma': 1, 'szed': 2, 'piros': 2},
{'alma': 1, 'szed': 2, 'piros': 2, 'zold': 2},
{'alma': 1, 'szed': 2, 'piros': 2, 'zold': 2}
]
),
])
def test_parser_intents_add(intents,text,match):
test = []
test.append(parser.Intents(intents[0]))
test.append(test[0]+parser.Intents(intents[1]))
test.append(test[1]+intents[2])
test.append(parser.Intents(str(test[2]),True))
for i in range(len(text)):
result = test[i].match(text[i])
assert match[i] == result
@pytest.mark.parametrize("intent,text,match", [
(
{
"alma" : [{"stem":"alma","wordclass":"noun"}],
"szed" : [{"stem":"szed","wordclass":"verb"}],
"piros" : [{"stem":"piros","wordclass":"adjective"}]
},
[
"Mikor szedjük le a pirosabb almákat?"
],
[
{'alma': 1, 'szed': 2, 'piros': 2}
]
),
(
{
"to_do" : [{"stem":"csinál","wordclass":"verb"}],
},
[
"Ő mit csinál a szobában?",
"Mit fogok még csinálni?",
"Mikor csináltad meg a szekrényt?",
"Megcsináltatták a berendezést.",
"Teljesen kicsinálva érzem magamat ettől a melegtől.",
"Csinálhatott volna mást is.",
"Visszacsinalnad az ekezeteket a billentyuzetemen, kerlek?",
"Szépen megcsiáltad a feladatot csak kihagytál egy karaktert!",
"Vigyázz, hogy el ne gépeld a csniálni igét!"
],
[
{'to_do': 2},
{'to_do': 2},
{'to_do': 2},
{'to_do': 2},
{'to_do': 2},
{'to_do': 2},
{'to_do': 1},
{'to_do': 1},
{'to_do': 1}
]
),
(
{
"palyaudvar" : [{"stem":"pályaudvar","wordclass":"noun","prefix":["busz"]}],
"auto" : [{"stem":"autó","wordclass":"noun","affix":["busz"]}],
"szinten_jo" : [{"stem":"pálya","wordclass":"noun","prefix":["busz"],"affix":["udvar"]}]
},
[
"Lassan beérünk az autóval a pályaudvarra.",
"Lassan beérünk az autóbusszal a buszpályaudvarra."
],
[
{'palyaudvar': 2, 'auto': 2, 'szinten_jo': 2},
{'palyaudvar': 2, 'auto': 1, 'szinten_jo': 2}
]
),
(
{
"enni" : [{"stem":"esz","wordclass":"verb","match_stem":False},{"stem":"en","wordclass":"verb","match_stem":False}]
},
[
"Tőmorfémák: esz, en.",
"Eszel valamit?",
"Azt nem lehet megenni."
],
[
{},
{'enni': 2},
{'enni': 2}
]
),
(
{
"jo_ido" : [{"stem":"jó","wordclass":"adjective","inc":[{"stem":"idő","wordclass":"noun","affix":["járás"]},{"stem":"meleg","wordclass":"adjective"}]}]
},
[
"Jó.",
"Meleg van.",
"Milyen az időjárás?",
"Jó meleg van.",
"Jó az idő.",
"Jó meleg az idő.",
"Jó meleg az időjárás."
],
[
{},
{},
{},
{'jo_ido': 2},
{'jo_ido': 2},
{'jo_ido': 4},
{'jo_ido': 4}
]
),
(
{
"jobb_ido" : [{"stem":"jó","wordclass":"adjective",
"inc":[{"stem":"idő","wordclass":"noun","affix":["járás"]},{"stem":"meleg","wordclass":"adjective"}],
"exc":[{"stem":"este","wordclass":"noun"}]}]
},
[
"Jó.",
"Jó meleg az időjárás.",
"Jó estét!",
"Jó meleg esténk van!"
],
[
{},
{'jobb_ido': 4},
{},
{}
]
),
(
{
"megszerel" : [{"stem":"szerel","wordclass":"verb"}],
"hibasan" : [{"stem":"alma","wordclass":"noun"}],
},
[
"Gyönyörű dolog a szerelem",
"Ezt is elfogadja találatként: Almainüdböz"
],
[
{'megszerel': 2},
{'hibasan': 2}
]
),
(
{
"float" : [{"stem":"a","score":.75},{"stem":"b","score":.6,"typo_score":1}],
},
[
"a b c"
],
[
{'float': 3.1},
]
),
])
def test_parser_intents_match(intent,text,match):
test = parser.Intents(intent)
for i in range(len(text)):
result = test.match(text[i])
assert match[i] == result
@pytest.mark.parametrize("intent,text,match", [
(
{
"change" : [{"stem":"szép","wordclass":"adjective"}],
"typo" : [{"stem":"görbe","wordclass":"adjective"}],
"fail" : [{"stem":"kék","wordclass":"adjective"}]
},
[
"Szebb sárga bögre göbre bögre."
],
[
['change','typo']
]
),
(
{
"capital" : [{"stem":"NAGY","wordclass":"adjective","ignorecase":False}],
"lower" : [{"stem":"kicsi","wordclass":"adjective","ignorecase":False}],
"any" : [{"stem":"VáLtAkOzÓ","wordclass":"adjective","ignorecase":True}],
"acr" : [{"stem":"KFT","ignorecase":False}]
},
[
"legesLEGNAGYobb kicsiNEK vÁlTaKoZó szöveg kft"
],
[
['capital','lower','any']
]
),
(
{
"szoto" : [{"stem":"töv","wordclass":"noun","match_stem":False,"prefix":["szó"]}],
"ragoz" : [{"stem":"ragozatlan","wordclass":"adjective","match_stem":False}],
"talal" : [{"stem":"talál","wordclass":"verb","match_stem":False}],
"talan" : [{"stem":"TALÁN","wordclass":"verb","match_stem":False,"ignorecase":False}]
},
[
"SZÓTÖVEK SZÓTÖVEK SZÓTÖVEK",
"Szótövet RAGOZATLANUL nem talál meg. TALÁN így?",
"Ebben semmi sincs"
],
[
['szoto'],
['szoto','ragoz'],
[]
]
)
])
def test_parser_intents_match_set(intent,text,match):
test = parser.Intents(intent)
for i in range(len(text)):
result = test.match_set(text[i])
assert set(match[i]) == result
@pytest.mark.parametrize("intent,text,best", [
(
{
"kave" : [{"stem":"kávé","wordclass":"noun","affix":["gép"]}],
"takarit" : [{"stem":"takarít","wordclass":"verb"}],
"sehol" : [{"stem":"sehol"}]
},
[
"Valakinek ki kellene takarítani a kávégépet. Tegnap is én takarítottam ki.",
"Kávé kávét kávénk kávém. Takarít.",
"Kávé kávét kávénk kávém. Takarít."
],
[
{'takarit': 4},
{'kave': 8, 'takarit': 2},
{'kave': 8, 'takarit': 2}
]
),
])
def test_parser_intents_match_best(intent,text,best):
test = parser.Intents(intent)
for i in range(len(text)):
result = test.match_best(text[i],i+1)
assert best[i] == result
@pytest.mark.parametrize("intent,text,order,preference", [
(
{
"alma" : [{"stem":"alma","wordclass":"noun"}],
"szed" : [{"stem":"szed","wordclass":"verb"}],
"körte" : [{"stem":"körte","wordclass":"noun"}]
},
[
"Mikor szedjük le a pirosabb almákat?",
"Mikor szedjük le a pirosabb almákat?",
"Mikor szedjük le a pirosabb almákat?",
"Mikor szedjük le a pirosabb almákat?"
],
[
["körte"],
["körte","szed"],
["körte","alma"],
["alma","szed"],
],
[
"szed",
"szed",
"alma",
"alma"
]
),
])
def test_parser_intents_match_order(intent,text,order,preference):
test = parser.Intents(intent)
for i in range(len(text)):
result = test.match_order(text[i],order[i])
assert preference[i] == result
@pytest.mark.parametrize("intent,text,best", [
(
{
"kave" : [{"stem":"kávé","wordclass":"noun","affix":["gép"]}],
"takarit" : [{"stem":"takarít","wordclass":"verb"}],
"sehol" : [{"stem":"sehol"}]
},
[
"Valakinek ki kellene takarítani a a tudod mit. Tegnap is én takarítottam ki.",
"Kávé kávét kávénk kávém. Takarít.",
],
[
{'kave': 0, 'takarit': 4, 'sehol': 0},
{'kave': 8, 'takarit': 2, 'sehol': 0},
]
),
])
def test_parser_intents_match_zeros(intent,text,best):
test = parser.Intents(intent)
for i in range(len(text)):
result = test.match(text[i],True)
assert best[i] == result
@pytest.mark.parametrize("intents,text,cleaned", [
(
[
{
"thanks" : [{"stem":"köszön","wordclass":"verb"}]
},
{
"thanks" : [{"stem":"köszön","wordclass":"verb","exc":[{"stem":"szép","wordclass":"adjective"}]}]
},
{
"thanks" : [{"stem":"köszön","wordclass":"verb","inc":[{"stem":"nagy","wordclass":"adjective"}]}]
},
{
"thanks" : [{"stem":"köszön","wordclass":"verb","inc":[{"stem":"kicsi","wordclass":"adjective"}]}]
},
{
"thanks" : [{"stem":"köszön","wordclass":"verb","inc":[{"stem":"nagy","wordclass":"adjective"}],"exc":[{"stem":"szép","wordclass":"adjective"}]}]
}
],
[
"Nagyon szépen köszönöm a teszteket!",
"Nagyon szépen köszönöm a teszteket!",
"Nagyon szépen köszönöm a teszteket!",
"Nagyon szépen köszönöm a teszteket!",
"Nagyon szépen köszönöm a teszteket!"
],
[
"Nagyon szépen a teszteket!",
"Nagyon szépen köszönöm a teszteket!",
"Nagyon szépen a teszteket!",
"Nagyon szépen köszönöm a teszteket!",
"Nagyon szépen köszönöm a teszteket!"
]
)
])
def test_parser_intents_clean(intents,text,cleaned):
for i in range(len(intents)):
test = parser.Intents(intents[i])
result = nlp.trim(test.clean(text[i]))
assert cleaned[i] == result
@pytest.mark.parametrize("info", [
(
{
"text" : "teszt szöveg"
}
),
(
{
"text" : "teszt szöveg https://www.youtube.com/watch?v=dQw4w9WgXcQ",
"urls" : ["https://www.youtube.com/watch?v=dQw4w9WgXcQ"],
"smileys" : ["=d"]
}
),
(
{
"text" : "@mention",
"mentions" : ["@mention"],
}
),
(
{
"text" : "@mention D:",
"mentions" : ["@mention"],
"smileys" : ["D:"],
}
),
(
{
"text" : ":DDDDdddd :(((8888 :3 http://",
"smileys" : [":DDDDdddd",":(((8888",":3"]
}
),
(
{
"text" : "$ߣł 🍹-😃🍔 :) ߤé$× asddasd",
"emojis" : ["🍹","😃","🍔"],
"smileys" : [":)"]
}
),
(
{
"text" : "[email protected] email",
"emails" : ["[email protected]"]
}
),
(
{
"text" : "[email protected] email @mention",
"emails" : ["[email protected]"],
"mentions" : ["@mention"]
}
)
])
def test_parser_extract(info):
test = parser.Extract(info['text'])
check = ['mentions','urls','smileys','emojis','emails']
for item in info:
if item!='text' and item not in check:
raise ValueError('Possible typo in test case:',item)
for item in check:
result = eval('test.'+item+'()')
if item in info:
assert set(info[item]) == set(result)
else:
assert not result
@pytest.mark.parametrize("info", [
(
{
"in" : "tízenkétmillióhatvanezerhetvenegy és hárommillió száz huszonkettő vagy még nullamilliárd de akkor már kettő kettő tizenkettő :) harmincnégy és nyolcvan illetve kilencvenezer az állás pedig egy-egy és végül egy kettő három",
"out" : "12060071 és 3000122 vagy még 0 de akkor már 2212 :) 34 és 80 illetve 90000 az állás pedig 1-1 és végül 1 2 3"
}
),
(
{
"in" : "harmincnégy lol első a második harmadik :D negyed végén ötödikén mit más csinálsz tízenkétmillióhatvanezerhetvenegy és hárommillió száz huszonkettő vagy még nullamilliárd de akkor már kettő kettő tizenkettő :) harmincnégy és nyolcvan illetve kilencvenezer az állás pedig egy-egy és végül egy kettő három",
"out" : "34 lol 1 a 2 3 :D negyed végén 5ödikén mit más csinálsz 12060071 és 3000122 vagy még 0 de akkor már 2212 :) 34 és 80 illetve 90000 az állás pedig 1-1 és végül 1 2 3"
}
),
(
{
"in" : "egymillió és százezer és tízezer és tízmilliótíz és százezerszáz",
"out" : "1000000 és 100000 és 10000 és 10000010 és 100100"
}
),
(
{
"in" : "mennyi egyszer egy és kétszer kettő?",
"out" : "mennyi 1szer 1 és 2szer 2?"
}
)
])
def test_parser_extract_convert_numbers(info):
test = parser.Extract(info['in'])
assert test.ntext==info['out']
@pytest.mark.parametrize("info", [
(
{
"text" : "120 a 5 100 forint 420 dollár 34.56 yen 300 300 és 20. 3 és 2.3.4 1",
"function" : "digits",
"result" : ['120', '5100', '420', '3456', '300300', '20', '3', '2341']
}
),
(
{
"text" : "120 a 5 100 forint 420 dollár 34.56 yen 300 300 és 20. 3 és 2.3.4 1",
"function" : "digits",
"args" : [3],
"result" : ['120', '420']
}
),
(
{
"text" : "1-2-0 és 420 meg 3.6.0",
"function" : "digits",
"args" : [3,False],
"result" : ['1-2-0', '420', '3.6.0']
}
),
(
{
"text" : "120 a 5 100 forint 420 dollár 34.56 yen 78,90 yen 300 300 és 20. 3 és 2.3.4 1 de -2 jó e és a -2.0",
"function" : "numbers",
"result" : [120.0, 5100.0, 420.0, 34.56, 78.90, 300300.0, 20.0, 3.0, 2.0, 3.4, 1.0, -2.0, -2.0]
}
),
(
{
"text" : "120 a 5 100 forint 420 dollár 34.56 yen 300 300 és 20. 3 és 2.3.4 1 de -2 jó e és a -2.0",
"function" : "numbers",
"args" : [False,False],
"result" : [120, 5100, 420, 300300, 20, 3, 1, -2]
}
),
(
{
"text" : "100 a 90% 1100% 123,45% 0.5 % és 0,4% valamint .7 %",
"function" : "percentages",
"result" : [0.90,11.0,1.2345,0.005,0.004,0.007]
}
),
(
{
"text" : "100 a 90% 1100% 123,45% 0.5 % és 0,4% valamint .7 %",
"function" : "percentages",
"args" : [False],
"result" : ["90%","1100%","123,45%","0.5 %","0,4%",".7 %"]
}
),
(
{
"text" : "#hashtag #YOLO",
"function" : "hashtags",
"result" : ["#hashtag","#yolo"]
}
),
(
{
"text" : "#hashtag #YOLO",
"function" : "hashtags",
"args" : [False],
"result" : ["#hashtag","#YOLO"]
}
),
(
{
"text" : "Hívj fel! A számom (06 30) 123/45 67!",
"function" : "phone_numbers",
"result" : ['+36 30 1234567']
}
),
(
{
"text" : "Hívj fel! A számom (0630) 123/45 67!",
"function" : "phone_numbers",
"args" : [False],
"result" : ['(0630) 123/45 67']
}
),
(
{
"text" : "5 000 YEN vagy 5 000€ vagy 5000 fontot 5000£",
"function" : "currencies",
"result" : ["5000.0 JPY","5000.0 EUR","5000.0 GBP","5000.0 GBP"],
}
),
(
{
"text" : "$5 000 vagy 5 000$ vagy 5000 dollár 5000.-",
"function" : "currencies",
"args" : [False],
"result" : ["$5 000","5 000$","5000 dollár","5000.-"],
}
),
(
{
"text" : "adj nekem $99,99-et meg 19 dollárt és 99 centet!",
"function" : "currencies",
"result" : ["99.99 USD", "19.99 USD"]
}
),
(
{
"text" : "adj nekem $99,99-et meg 19 dollárt és 99 centet!",
"function" : "currencies",
"args" : [False],
"result" : ["$99,99", "19 dollárt és 99 centet"]
}
),
(
{
"text" : "csak 1 000 000 van ide írva",
"function" : "currencies",
"result" : ["1000000.0 HUF"],
}
),
(
{
"text" : "találkozzunk háromnegyed 3 előtt 4 perccel, holnap!",
"function" : "times",
"args" : [False,False,0],
"result" : ["háromnegyed 3 előtt 4 perccel, holnap"]
}
),
(
{
"text" : "3 óra 4 perc",
"function" : "times",
"args" : [True,False,0],
"result" : ["03:04"]
}
),
(
{
"text" : "három óra négy perc",
"function" : "times",
"args" : [True,True,10],
"result" : ["15:04"]
}
),
(
{
"text" : "találkozzunk 10 perccel 9 előtt vagy 20 perccel 20 előtt vagy akár nekem 10 perccel 20 után is jó",
"function" : "times",
"args" : [True,False,10],
"result" : ["20:50","19:40","20:10"]
}
),
(
{
"text" : "10:30 simán, de reggel 9-től este 10-ig és holnap 4-kor vagy holnap délután 4-kor illetve 8-kor és holnap 8-kor",
"function" : "times",
"args" : [True,False,10],
"result" : ["10:30","09:00","22:00","16:00","16:00","20:00","20:00"]
}
),
(
{
"text" : "fél 3 után 2 perccel vagy háromnegyed 2 körül vagy fél 5 előtt vagy 5 előtt 2 perccel vagy fél 5 előtt 2 perccel vagy 2 perccel fél 5 előtt vagy fél 5 után vagy fél 5 után 2 perccel vagy 2 perccel fél 5 után",
"function" : "times",
"args" : [True,False,10],
"result" : ["14:32","13:45","16:30","16:58","16:28","16:28","16:30","16:32","16:32"]
}
),
(
{
"text" : "Hívj fel ezen a számon 2018 IV. huszadikán mondjuk délután nyolc perccel háromnegyed kettő előtt!",
"function" : "times",
"args" : [True,True,10],
"result" : ["13:37"]
}
),
(
{
"text" : "Hívj fel ezen a számon 2018 IV. huszadikán mondjuk délután nyolc perccel háromnegyed kettő előtt!",
"function" : "dates",
"args" : [True,True,'2018-04-01'],
"result" : ["2018-04-20"]
}
),
(
{
"text" : "18/01/09 vagy 18-01-09 vagy 2018. 01. 09. vagy 2018. 01. 09-én vagy 2018 VII 20. és így 2018 január 20-án",
"function" : "dates",
"args" : [False,True,'2018-04-01'],
"result" : ["18/01/09","18-01-09","2018. 01. 09","2018. 01. 09","2018 VII 20","2018 január 20-án"]
}
),
(
{
"text" : "találkozzunk 20-án valamikor vagy 21-én?",
"function" : "dates",
"args" : [True,True,'2018-04-01'],
"result" : ["2018-04-20","2018-04-21"]
}
),
(
{
"text" : "18/01/09 vagy 18-01-09 vagy 2019. 01. 09. vagy 2018. 01. 09-én vagy 2018 VII 20. és így 2018 január 20-án",
"function" : "dates",
"args" : [True,True,'2018-04-01'],
"result" : ["2018-01-09","2018-01-09","2019-01-09","2018-01-09","2018-07-20","2018-01-20"]
}
),
(
{
"text" : "3 óra múlva vagy 12 percen belül de akár 2 és fél évvel előbb is megtörténhet, hogy 5 órával vissza kell állítani, az órát, mert jöttömre kelet felől 1,5 hét múlva számítsatok",
"function" : "durations",
"args" : [True],
"result" : [10800.0, 720.0, -78840000.0, -18000.0, 907200.0]
}
),
(
{
"text" : "3 óra és 4 perc múlva valamint majd egyszer egy héttel rá",
"function" : "durations",
"args" : [False],
"result" : ['3 óra és 4 perc múlva', '1 7tel rá']
}
),
(
{
"text" : "3 óra és 4 perc múlva valamint majd egyszer 1 héttel rá",
"function" : "durations",
"args" : [True],
"result" : [11040.0, 604800.0]
}
),
(
{
"text" : "vagyis jövő kedden is tegnapelőtt vagyis múlt hét vasárnap azaz hétfőn",
"function" : "relative_dates",
"args" : [True,'2018-04-01'],
"result" : ['2018-04-03', '2018-03-30', '2018-03-25', '2018-03-26']
}
),
(
{
"text" : "vagyis jövő kedden is tegnapelőtt vagyis múlt hét vasárnap azaz hétfőn",
"function" : "relative_dates",
"args" : [False,'2018-04-01'],
"result" : ['jövő kedd', 'tegnapelőtt', 'múlt hét vasárnap', 'hétfő']
}
),
(
{
"text" : "délben mi van? akkor találkozzunk holnap 9 és 11 óra között vagy valamikor reggel 7-kor",
"function" : "timestamps",
"args" : ['2018-04-01 12:00'],
"result" : ['2018-04-01 12:00', '2018-04-02 21:00', '2018-04-02 23:00', '2018-04-02 07:00']
}
),
])
def test_parser_extract_parameter(info):
test = parser.Extract(info['text'])
if 'args' not in info or not info['args']:
result = eval('test.'+info['function']+'()')
else:
result = eval('test.'+info['function']+'('+str(info['args']).strip('[]')+')')
assert info['result'] == result
| mit | 4,608,900,468,915,946,500 | 24.629121 | 317 | 0.558396 | false |
fraoustin/mail2blog | mail2blog/main.py | 1 | 9106 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
save mail to file for ablog
"""
from __future__ import print_function # for pylint
import sys
import os
import os.path
import shutil
import datetime
import imaplib
import getpass
import email
import smtplib
try:
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
except: # for python3
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import myterm
import myterm.log
from myterm.parser import OptionParser, read_conf, find_confdir
import mail2blog
__version__ = mail2blog.__version__
class MyError(Exception):
"""
manage Error specific
"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return repr(self.value)
def create_logger(level='DEBUG', file='log.txt',
format="%(asctime)s - %(levelname)s - %(message)s"):
"""add logger"""
# create logger
logger = myterm.Logger(level=myterm.log.LEVELLOG[level])
logger.add_stream()
# Add the log message handler to the logger
if len(file):
logger.add_rotating_file(path=file, maxBytes=5012, form=format)
return logger
def send_mail(server, port, login, password, mfrom, mto, subject, body="",
ssl=True):
""" send mail by smtp """
msg = MIMEMultipart()
msg['From'] = mfrom
msg['To'] = mto
msg['Subject'] = subject
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP(server, port)
if ssl:
server.starttls()
server.login(login, password)
server.sendmail(mfrom, mto, msg.as_string())
server.quit()
def main():
""" run mail2blog """
parser = OptionParser(
version="%prog " + __version__, usage="usage: %prog [options] args")
parser.description = "mail to blog: save attachement in blog path and del mail"
parser.epilog = "by Frederic Aoustin"
parser.add_option("-l", "--level-log",
dest="level",
choices=[key for key in myterm.log.LEVELLOG.keys()],
help="level of log",
default="INFO",
type="choice")
parser.add_option("-d", "--path-log",
dest="logfile",
help="directory of log file",
default="",
type="string")
parser.add_option("-c", "--path-conf",
dest="conf",
help="path of conf.py",
default="",
type="string")
(options, args) = parser.parse_args()
try:
logger = create_logger(options.level, options.logfile)
if len(options.conf):
if os.path.isdir(options.conf):
options.conf = os.path.join(options.conf, "conf.py")
if os.path.isfile(options.conf):
logger.debug("read conf %s" % options.conf)
conf = read_conf(os.path.dirname(options.conf), os.path.basename(
os.path.splitext(options.conf)[0]))
else:
logger.error("%s does not a file" % options.conf)
raise MyError("not found %s" % options.conf)
else:
dirn, name, conf = find_confdir("conf")
options.conf = os.path.join(dirn, name)
logger.debug("find and read conf %s" % os.path.join(dirn, name))
# manage conf by conf
imap = getattr(conf, 'mail2blog_imap', 'imap.gmail.com')
mailbox = getattr(conf, 'mail2blog_mailbox', None)
mailboxpassword = getattr(conf, 'mail2blog_mailbox_password', None)
authorized = getattr(conf, 'mail2blog_user_authorized', [])
blogpath = os.path.dirname(options.conf)
bckpath = getattr(conf, 'mail2blog_bck_path', None)
forkpath = getattr(conf, 'mail2blog_fork_path', {})
smtp = getattr(conf, 'mail2blog_smtp', 'smtp.gmail.com')
smtpport = getattr(conf, 'mail2blog_smtp_port', 587)
smtpnotssl = getattr(conf, 'mail2blog_smtp_not_ssl', False)
smtplogin = getattr(conf, 'mail2blog_smtp_login', None)
smtppassword = getattr(conf, 'mail2blog_smtp_password', None)
smtpfrom = getattr(conf, 'mail2blog_smtp_from', None)
build = getattr(conf, 'mail2blog_build', False)
if not len(mailbox) or not len(imap):
logger.error("mailbox, imap are mandatory ")
sys.exit(1)
if not len(mailboxpassword):
mailboxpassword = getpass.getpass("password for mailbox:")
try:
runbuild = False
logger.info("connect to imap server")
try:
mailinbox = imaplib.IMAP4_SSL(imap)
logger.info(mailinbox.login(mailbox, mailboxpassword)[1])
except Exception as exp:
logger.critical(exp)
sys.exit(1)
mailinbox.select()
typ, data = mailinbox.uid('SEARCH', 'ALL')
msgs = data[0].split()
logger.info("Found {0} msgs".format(len(msgs)))
for uid in msgs:
typ, content = mailinbox.uid('FETCH', uid, '(RFC822)')
mail = email.message_from_string(content[0][1])
logger.info("From: {0}, Subject: {1}, Date: {2}\n".format(
mail["From"], mail["Subject"], mail["Date"]))
mailfrom = mail["From"]
if '<' in mailfrom:
mailfrom = mailfrom.split('<')[-1].split('>')[0]
if mailfrom in authorized:
logger.debug("From %s authorized" % mailfrom)
if mail.is_multipart():
for part in mail.walk():
if part.get_filename():
ext = os.path.splitext(
part.get_filename())[1].lower()
logger.debug(
"treat %s, extension : %s" % (part.get_filename(), ext))
if ext in forkpath.keys():
logger.debug(
"save %s" % part.get_filename())
if not os.path.exists(os.path.join(blogpath, forkpath[ext])):
os.makedirs(
os.path.join(blogpath, forkpath[ext]))
pathfile = os.path.join(
blogpath, forkpath[ext], part.get_filename())
if os.path.isfile(pathfile):
if len(bckpath):
logger.debug(
"save bkp %s" % part.get_filename())
if not os.path.exists(bckpath):
os.makedirs(bckpath)
qfile = os.path.join(
bckpath, '%s_%s' % (datetime.datetime.now(), part.get_filename()))
shutil.move(pathfile, qfile)
else:
os.remove(pathfile)
open(pathfile, 'wb').write(
part.get_payload(decode=True))
runbuild = True
else:
logger.debug("not save %s" %
part.get_filename())
if len(smtplogin) and len(smtp):
logger.info("send response by mail")
send_mail(smtp,
smtpport,
smtplogin,
smtppassword,
smtpfrom,
mailfrom,
'Add content in blog',
"Your mail %s add in blog" % mail[
"Subject"],
not smtpnotssl)
else:
logger.warning("From %s not authorized" % mailfrom)
logger.info("delete mail")
mailinbox.uid('STORE', uid, '+FLAGS', '(\Deleted)')
logger.info("disconnect to imap server")
mailinbox.close()
mailinbox.logout()
if runbuild and build:
import ablog.commands
ablog.commands.ablog_build()
except Exception as exp:
logger.critical(parser.error(exp))
raise exp
except Exception as exp:
print(parser.error(exp))
parser.print_help()
sys.exit(1)
if __name__ == '__main__':
main()
| gpl-2.0 | -1,194,266,685,859,457,000 | 40.20362 | 114 | 0.479025 | false |
litrin/pyPIDcontroller | test_PIDcontroller.py | 1 | 1459 | import unittest, random, time
from PID.datasets import SerialValue, ListValue
from PID.controllers import Controller
class TestSerialValue(unittest.TestCase):
def test_sv(self):
sv = SerialValue()
self.main_testcase(sv)
def test_lv(self):
lv = ListValue()
self.main_testcase(lv)
def main_testcase(self, v):
v.value(1)
v.value(2)
self.assertEqual(v.current, 2)
self.assertEqual(v.previous, 1)
self.assertEqual(v.delta, 2 - 1)
self.assertEqual(len(v), 2)
class TestController(unittest.TestCase):
def prepare_controller(self):
p = Controller()
p.set_PID(0.7, 0.1, 0.002)
return p
def test_controller(self):
controller = self.prepare_controller()
target = 0
controller.set_target(target)
feedback = 0.0
for i in range(1, 0xff):
time.sleep(0.001)
controller.update(feedback)
output = controller.get_output(True)
serial = SerialValue()
feedback += output
if i & 0x8 is 0x8:
target = random.random()
controller.set_target(target)
elif i & 0x8 > 0x2:
serial.value(feedback)
self.assertLessEqual(abs(serial.current - target),
abs(serial.previous - target))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -8,957,620,833,714,989,000 | 24.596491 | 67 | 0.559287 | false |
pferreir/indico | indico/core/signals/rb.py | 1 | 1189 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from blinker import Namespace
_signals = Namespace()
booking_created = _signals.signal('booking-created', """
Executed after a booking has been successfully created. The *sender*
is the new `Reservation` object.
""")
booking_state_changed = _signals.signal('booking-state-changed', """
Executed after a booking has been cancelled/rejected/accepted. The *sender*
is the `Reservation` object.
""")
booking_modified = _signals.signal('booking-modified', """
Executed after a booking has been modified. The *sender* is the `Reservation` object and
a dictionary of changed values is passed in the `changes` kwarg.
""")
booking_deleted = _signals.signal('booking-deleted', """
Executed after a booking has been deleted. The *sender* is the `Reservation` object.
""")
booking_occurrence_state_changed = _signals.signal('booking-occurrence-state-changed', """
Executed after the state of a booking occurrence changed.
The *sender* is the `ReservationOccurrence` object.
""")
| mit | -5,946,231,696,676,463,000 | 32.027778 | 90 | 0.745164 | false |
beiju/thekindleonthewall | src/python/web.py | 1 | 13234 | from flask import Flask, request, g, render_template, redirect, url_for, session, jsonify, json
from apscheduler.scheduler import Scheduler
from flask_oauth import OAuth
import requests
import arduino
import time
import pytz
import datetime
from dateutil.parser import parse
from settings import *
import logging
import threading
app = Flask(__name__, static_url_path='')
logging.basicConfig() # used by apscheduler
sched = Scheduler()
sched.start()
app.secret_key = GOOGLE_SECRET_KEY
oauth = OAuth()
google = oauth.remote_app('google',
base_url='https://www.google.com/accounts/',
authorize_url='https://accounts.google.com/o/oauth2/auth',
request_token_url=None,
request_token_params={'scope': 'https://www.googleapis.com/auth/calendar',
'response_type': 'code'},
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_method='POST',
access_token_params={'grant_type': 'authorization_code'},
consumer_key=GOOGLE_CLIENT_ID,
consumer_secret=GOOGLE_CLIENT_SECRET)
the_arduino = arduino.Arduino(SIGNATURE, FILENAME, BAUD, TIMEOUT)
cache = dict()
print "Flask imported"
if the_arduino.connected:
print "Starting up with arduino connected"
else:
print "Starting up without arduino connected"
##########
# Routes #
##########
@app.route('/')
def index():
print 'connected', the_arduino.connected #!
return app.send_static_file('index.html')
@app.route('/update')
def update():
lastUpdate = float(request.args.get('lastUpdate') or 0)
data = dict()
sources = [
('weather', forecast_upd),
('local_info', local_info_upd),
('calendar', calendar_upd)
]
for item in sources:
data[ item[0] ] = retrieve_cached(item[0], lastUpdate, item[1])
if data[ item[0] ] is None: del data[ item[0] ]
return jsonify(data)
@app.route('/run', methods=['POST'])
def run_command():
command = request.form.get('command', '')
print 'command', command
the_arduino.send_raw_command(command)
print 'command', command
return redirect(url_for('run'))
@app.route('/clear')
def clearCache():
cache.clear()
return redirect(url_for('index'))
@app.route('/login')
def login():
if cache and 'calendar' in cache and 'data' in cache['calendar']:
cache['calendar']['data']['err_code'] = "pending_authorization" # Avoid endless redirects
callback=url_for('authorized', _external=True)
return google.authorize(callback=callback)
@app.route(GOOGLE_REDIRECT_URI)
@google.authorized_handler
def authorized(response):
access_token = response['access_token']
session['access_token'] = access_token, ''
print access_token
if 'calendar' in cache:
del cache['calendar'] # Update the calendar the next time it's requested
return redirect(url_for('index'))
@google.tokengetter
def get_access_token():
return session.get('access_token')
####################
# Arduino Commands #
####################
@the_arduino.command('lights_status')
def upd_lights_status(args):
if 'local_info' not in cache:
cache['local_info'] = dict()
cache['local_info']['light1'] = (int(args[0]) & 0b001 == 0b001)
cache['local_info']['light2'] = (int(args[0]) & 0b010 == 0b010)
cache['local_info_upd'] = time.time()
print 'lights updated '
###########
# Updates #
###########
@sched.interval_schedule(seconds=1)
def refresh_arduino():
if the_arduino.connected:
the_arduino.refresh()
else:
the_arduino.open()
if the_arduino.connected:
print "Reconnected arduino"
@sched.interval_schedule(minutes=5)
def forecast_upd():
print "Updating forecast"
forecast = requests.get('https://api.forecast.io/forecast/'+FORECAST_API+'/'+FORECAST_LAT+','+FORECAST_LONG, params = {
'units': FORECAST_UNITS,
'exclude': 'flags'
})
data = forecast.json()
for key in ['minutely', 'hourly', 'daily']:
if key in data and 'data' in data[key]: del data[key]['data']
# forecast.load_forecast(FORECAST_LAT, FORECAST_LONG, units=FORECAST_UNITS)
#
# currently = forecast.get_currently()
# minutely = forecast.get_minutely()
# hourly = forecast.get_hourly()
# daily = forecast.get_daily()
# data = {
# 'current': {
# 'icon': currently.icon,
# 'description': currently.summary,
# 'temperature': {
# 'c': currently.temperature,
# 'f': currently.temperature * 9 / 5 + 32
# },
# 'wind': {
# 'speed': currently.windspeed,
# 'angle': currently.windbaring # Typo in the library
# }
# },
# 'next_hr': {
# 'icon': minutely.icon,
# 'description': minutely.summary
# },
# 'tomorrow': {
# 'icon': hourly.icon,
# 'description': hourly.summary
# },
# 'this_week': {
# 'icon': daily.icon,
# 'description': daily.summary
# }
# }
if 'weather' not in cache: cache['weather'] = dict()
cache['weather']['data'] = data
cache['weather']['last_update'] = time.time()
@sched.interval_schedule(minutes=1)
def local_info_upd():
print "Updating Local Info"
the_arduino.send_command('send_status', 'lights')
# the_arduino.send_command('send_status', 'temperature') # When there's a temp sensor on the arduino, enable this
@sched.interval_schedule(minutes=5)
def calendar_upd(access_token=False):
print "Updating Calendar"
caldata = dict()
try:
access_token = access_token or session.get('access_token')
if access_token is None:
fill_calendar_cache({
'error': "Google Calendar Not Authorized",
'err_code': "not_authorized",
'err_note': "Visit /login to authorize"
})
return False
access_token = access_token[0]
caldata['daka_hours'] = query_gcal(access_token, '0cto0462lqrpt673m51bf1ucuk%40group.calendar.google.com')
if caldata['daka_hours'] == True:
return False
caldata['spoon_hours'] = query_gcal(access_token, 'ieqe1kvtb6narapqoafv59umog%40group.calendar.google.com')
caldata['will'] = query_gcals(access_token, '488or1ai5vadl5psti3iq8ipgs%40group.calendar.google.com', # work
'beiju.i.am%40gmail.com', # personal
't7ijq9al3asosqh1jnp93hvgdk%40group.calendar.google.com') # class
caldata['ian'] = query_gcals(access_token, 'sodbfdhm4q7api4qvf5h5k7rlg%40group.calendar.google.com', # social
'36gqite9pam369c6mknuttgsqg%40group.calendar.google.com', # work
'achgl7e3m1pokdo8o1uqis70fk%40group.calendar.google.com', # ACM
'jnqo9lo8efm5ogj78pr176qstg%40group.calendar.google.com', # WPI Extracurricular
'a82i41iavlvd37d9fnrofklrms%40group.calendar.google.com', # WPI Schoolwork
'ianonavy%40gmail.com')
fill_calendar_cache(caldata)
except RuntimeError:
with app.test_request_context('/update'):
calendar_upd()
####################
# Helper Functions #
####################
def retrieve_cached(name, since, getter):
if name not in cache: cache[name] = dict()
if 'data' not in cache[name]:
if 'last_request' not in cache[name] or cache[name]['last_request'] + RETRY_REQUEST_TIMEOUT < time.time():
# This is for google calendar, which tries its hardest to make my code break
if name is 'calendar':
args = [session.get('access_token')]
else:
args = []
# Force the function to run asynchronously
thr = threading.Thread(target=getter, args=args)
thr.start()
cache[name]['last_request'] = time.time()
if 'data' in cache[name] and ('last_update' not in cache[name] or cache[name]['last_update'] > since):
return cache[name]['data']
return None
def query_gcals(access_token, *calIDs):
data = dict()
for calID in calIDs:
currdata = query_gcal(access_token, calID)
if currdata == False:
continue
if 'current' in currdata and currdata['current'] and 'current' not in data:
data['current'] = currdata['current']
if 'next' in currdata and currdata['next'] and ('next' not in data or parse(currdata['next']['start_time']) < parse(data['next']['start_time'])):
data['next'] = currdata['next']
if 'current' in data and 'end_time' in data['current'] and 'start_time' in data['next'] and \
abs(parse(data['current']['end_time']) - parse(data['next']['start_time'])) < time.timedelta(minutes=5):
data['next']['continuation'] = True
return data; # at this point the data won't ever change
else:
data['next']['continuation'] = False
return data
def query_gcal(access_token, calID):
from urllib2 import Request, urlopen, URLError
url = 'https://www.googleapis.com/calendar/v3/calendars/'
url+= calID+'/events'
url+= '?maxResults=7'
url+= '&orderBy=startTime'
url+= '&singleEvents=true'
url+= '&timeMin='+datetime.date.today().isoformat()+'T00:00:00Z'
url+= '&key='+GOOGLE_SECRET_KEY
req = Request(url, None, {'Authorization': 'OAuth '+access_token})
try:
res = urlopen(req)
except URLError, e:
if e.code == 401:
# Unauthorized - bad token
session.pop('access_token', None)
return True
if e.code == 403:
return {
'error': "403 Forbidden",
'err_code': "api_403",
'err_note': "This error is often caused by sending an API request from an IP address not included in https://code.google.com/apis/console",
'url': url
}
res = urlopen(req)
try:
items = json.loads(res.read())['items']
except KeyError, e:
return False
data = dict()
now = datetime.datetime.now(pytz.utc)
for item in items:
if 'dateTime' not in item['start']: #all-day event
startTime = pytz.utc.localize(parse(item['start']['date']))
endTime = pytz.utc.localize(parse(item['end']['date'])+datetime.timedelta(days=1,seconds=-1))
else:
startTime = parse(item['start']['dateTime'])
endTime = parse(item['end']['dateTime'])
if 'current' not in data: # Look for the current event
if startTime < now and endTime > now:
data['current'] = {
'exists': True,
'start_time': startTime.isoformat(),
'end_time': endTime.isoformat(),
'duration': time_btwn(startTime, endTime),
'remaining_time': time_btwn(now, endTime),
}
if 'location' in item:
data['current']['event_loc'] = item['location']
if 'summary' in item:
data['current']['event_name'] = item['summary']
if 'next' not in data:
if startTime > now: # The first event for which startTime is after now is 'next', since events are ordered by startTime
data['next'] = {
'exists': True,
'start_time': startTime.isoformat(),
'end_time': endTime.isoformat(),
'duration': time_btwn(startTime, endTime),
'time_until': time_btwn(startTime, now),
'continuation': 'current' in data and (abs(startTime - parse(data['current']['end_time'])) < datetime.timedelta(minutes=5))
}
if 'location' in item:
data['next']['event_loc'] = item['location']
if 'summary' in item:
data['next']['event_name'] = item['summary']
if 'current' not in data:
data['current'] = {} # Do this to clear cached result
return data
def time_btwn(datetime1, datetime2):
hours, remainder = divmod((datetime2 - datetime1).seconds, 3600)
minutes, seconds = divmod(remainder, 60)
strval = ''
if hours == 1:
strval += '1 hour '
elif hours != 0:
strval += str(hours)+' hours '
if minutes == 1:
strval += '1 minute '
elif minutes != 0:
strval += str(minutes)+' minutes '
return strval.strip()
def fill_calendar_cache(data):
cache['calendar']['data'] = data
cache['calendar']['last_update'] = time.time()
########
# Init #
########
if __name__ == "__main__":
app.run('0.0.0.0', 5000, debug=True, use_reloader=False) # Reloader doesn't play nice with apscheduler
| mit | -4,555,791,068,157,134,000 | 35.15847 | 155 | 0.570198 | false |
jeffdougherty/dca | InGameWindow.py | 1 | 20284 | from tkintertable import *
from helperfunctions import connect_to_db, close_connection, create_names_dict, parse_comma_separated_numbers
from helperclasses import DataTable
from ttk import Combobox, Checkbutton
from DamageRules import shell_bomb_hit
class GameWindow(Frame):
def __init__(self, parent, game_id):
self.parent = parent
#Set up the window
self.width = 1280
self.height = 800
self.center_window()
self.damage_type_string = StringVar()
self.damage_type_string.set('Bomb')
self.torpedo_aspect_string = StringVar()
self.torpedo_aspect_string.set('Other')
self.armor_pen_string = StringVar()
self.armor_pen_string.set('Yes')
self.torpedo_depth_string = StringVar()
self.torpedo_depth_string.set('Shallow')
self.debug_frame_armed = IntVar()
self.debug_frame_armed.set(0)
self.verbose_mode = BooleanVar()
self.verbose_mode.set(True)
Frame.__init__(self, parent, background='white')
self.pack(fill=BOTH, expand=1)
#Create the frames for holding the UI
self.tables_frame = Frame(master=self)
self.tables_frame.grid(row=1, column=1)
controls_frame = Frame(master=self)
controls_frame.grid(row=1, column=2)
nav_controls_frame = Frame(master=self)
nav_controls_frame.grid(row=2, column=1)
self.console_frame = Frame(master=self)
self.console_frame.grid(row=3, column=1)
# Need the game turn before we proceed any further
self.game_id = game_id
cursor = connect_to_db()
cmd_string = """SELECT * FROM Game WHERE [Game ID] = ?;"""
cursor.execute(cmd_string, (self.game_id,))
game_data = cursor.fetchone()
col_headings = [description[0] for description in cursor.description]
self.turn_index = col_headings.index('Game Turn')
#print("Turn_index = " + str(self.turn_index))
print("Game data: " + str(game_data))
self.game_turn = game_data[self.turn_index]
scenario_index = col_headings.index('Scenario Key')
self.scenario_key = game_data[scenario_index]
close_connection(cursor)
self.load_game_log()
self.draw_controls(controls_frame)
self.draw_tables(self.tables_frame)
self.draw_nav_controls(nav_controls_frame)
self.draw_console(self.console_frame)
def center_window(self):
w, h = self.width, self.height
self.sw = self.parent.winfo_screenwidth()
self.sh = self.parent.winfo_screenheight()
x = (self.sw - self.width) / 2
y = (self.sh - self.height) / 2
self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))
def draw_controls(self, parent):
turn_frame = Frame(parent)
turn_frame.pack(side='top')
turn_label = Label(turn_frame, text='Game Turn')
turn_label.pack(side='top')
prev_turn_button = Button(turn_frame, text='<<<', command=lambda: self.prev_turn())
prev_turn_button.pack(side='left')
self.turn_readout = Entry(turn_frame, width=2)
self.turn_readout.insert(0, self.game_turn)
self.turn_readout.config(state='readonly')
self.turn_readout.pack(side='left')
next_turn_button = Button(turn_frame, text='>>>', command=lambda: self.next_turn())
next_turn_button.pack(side='left')
spacer_frame = Frame(parent)
spacer_frame.pack(side='top')
spacer_label = Label(spacer_frame, text='')
spacer_label.pack(side='left')
self.draw_hit_controls(parent)
def draw_tables(self, parent):
game_ship_table_column_names_list = ['Ship Name', 'Scenario Side', 'Ship Type', 'Size Class', 'Annex A Key', 'UWP Port Dmg', 'UWP Stbd Dmg', 'Critical Hits', 'Damage Pts Start', 'Side Name', 'Speed', 'Speed Damaged', 'Damage Pts','25% Threshold Crossed', '10% Threshold Crossed', 'Crit Engineering', 'Crit Flood Magazines', 'Extra DC', 'Game ID', 'Formation ID', 'Formation Ship Key']
ship_table_column_types_dict = {'Ship Name': 'text', 'Scenario Side': 'text', 'Critical Hits': 'text', 'Side Name': 'text', 'default': 'number'}
self.shipsTable = DataTable(parent, scenario_key=self.scenario_key, column_types_dict=ship_table_column_types_dict, table_name='Game Ship Formation Ship', column_names_list = game_ship_table_column_names_list, sig_figs=3, column_title_alias_dict={'Speed Damaged': 'Max Speed', 'Damage Pts': 'Damage Pts Left'})
#Need to move columns, for that we need to address shipsTable's tableModel directly
ships_table_model = self.shipsTable.get_model()
ships_table_canvas = self.shipsTable.get_table()
#ships_table_model.moveColumn(1, 3)
ships_table_model.moveColumn(ships_table_model.getColumnIndex('Critical Hits'), ships_table_model.getColumnCount() - 1) #Puts "Critical Hits" in last place
this_side_names_dict = create_names_dict(self.scenario_key)
#Now we need to go through the data
for this_record in ships_table_model.data.values():
this_record['Side Name'] = this_side_names_dict[int(this_record['Scenario Side'])]
this_record['Critical Hits'] = ''
#fill in calculated columns
ships_table_canvas.redrawTable()
ships_table_model.setSortOrder(columnName='Scenario Side')
self.shipsTable.hide_column('Starting Damage Pts')
self.shipsTable.hide_column('Annex A Key')
self.shipsTable.hide_column('Scenario Side')
self.shipsTable.hide_column('Formation ID')
self.shipsTable.hide_column('Formation Ship Key')
self.shipsTable.hide_column('Speed')
self.shipsTable.hide_column('25% Threshold Crossed')
self.shipsTable.hide_column('10% Threshold Crossed')
self.shipsTable.hide_column('Crit Engineering')
self.shipsTable.hide_column('Crit Flood Magazines')
self.shipsTable.hide_column('Extra DC')
self.shipsTable.hide_column('Game ID')
ships_table_canvas.redrawVisible()
#Need to store the columns and their indexes for later reference.
self.ships_table_index_dict = {}
for i in range(ships_table_model.getColumnCount()):
self.ships_table_index_dict[ships_table_model.getColumnName(i)] = i
def draw_nav_controls(self, parent):
quit_button = Button(parent, text="Quit", command=self.quit)
#quit_button.pack(side='right') #Do we want a quit button on every sub-window?!?
close_button = Button(parent, text="Close", command=lambda: self.close_window())
close_button.pack(side='left')
def draw_console(self, parent):
self.log_console = Text(master=parent, relief=RIDGE, bd=4)
self.log_console.pack(side='top')
#Initial load of game log events
cursor = connect_to_db()
cursor.execute("""SELECT * FROM 'Game Log' WHERE [Game ID]=?;""",(self.game_id,))
for record in cursor.fetchall():
this_record = str(record[1]) + " Turn: " + str(record[2]) + " " + record[3] + "\n"
self.log_console.insert(END, this_record)
self.log_console.config(state=DISABLED)
close_connection(cursor)
def draw_hit_controls(self, parent):
dc_label_frame = Frame(parent)
dc_label_frame.pack(side='top')
dc_label = Label(dc_label_frame, text='Damage Control')
dc_label.pack(side='top')
dc_buttons_frame = Frame(parent)
dc_buttons_frame.pack(side='top')
flood_button = Button(dc_buttons_frame, text='Flood Magazines', command=lambda: self.flood_this_magazine())
flood_button.pack(side='left')
extra_dc_button = Button(dc_buttons_frame, text='Toggle Extra DC', command=lambda: self.toggle_extra_dc())
extra_dc_button.pack(side='right')
hit_label_frame = Frame(parent)
hit_label_frame.pack(side='top')
panel_label = Label(hit_label_frame, text='Hit Controls')
panel_label.pack(side='top')
hit_type_frame = Frame(parent)
hit_type_frame.pack(side='top')
hit_type_label = Label(hit_type_frame, text="Type")
hit_type_label.pack(side='left')
self.damage_type_picker = Combobox(hit_type_frame, values=['Bomb','Shell', 'Torpedo/Mine'],
textvariable=self.damage_type_string, state='readonly', width=9)
self.damage_type_picker.pack(side='left')
self.damage_type_picker.bind("<<ComboboxSelected>>", lambda a: self.set_hit_panels())
hit_dp_frame = Frame(parent)
hit_dp_frame.pack(side='top')
self.hit_dp_amount = Entry(hit_type_frame, width=3)
self.hit_dp_amount.pack(side='left')
hit_dp_label = Label(hit_type_frame, text="DP")
hit_dp_label.pack(side='left')
self.bomb_shell_frame = Frame(parent)
self.bomb_shell_frame.pack(side='top')
armor_pen_label = Label(self.bomb_shell_frame, text="Armor Penetrated?")
armor_pen_label.pack(side="left")
self.armor_pen_picker = Combobox(self.bomb_shell_frame, values=['Yes', 'No'], state='readonly', textvariable = self.armor_pen_string, width=5)
self.armor_pen_picker.pack(side='left')
#Add something in for very small caliber hits
self.torpedo_frame = Frame(parent)
self.torpedo_frame.pack(side='top')
depth_label = Label(self.torpedo_frame, text='Run Depth')
depth_label.pack(side='left')
self.depth_picker = Combobox(self.torpedo_frame, values=['Shallow', 'Deep'], state='disabled', textvariable=self.torpedo_depth_string, width=8)
self.depth_picker.pack(side='left')
aspect_label = Label(self.torpedo_frame, text="Hit Aspect")
aspect_label.pack(side='left')
self.aspect_picker = Combobox(self.torpedo_frame, values=['Bow', 'Stern', 'Other'], state='disabled', textvariable=self.torpedo_aspect_string, width=5)
self.aspect_picker.pack(side='left')
execute_button_frame = Frame(parent)
execute_button_frame.pack(side='top')
execute_button = Button(execute_button_frame, text='Apply', command=lambda: self.apply_this_hit())
execute_button.pack(side='left')
self.draw_debug_controls(parent) #!!! Can remove this when debug functionality no longer desired
def set_hit_panels(self):
new_val = self.damage_type_picker.get()
assert new_val == 'Shell' or new_val == 'Torpedo/Mine' or new_val == 'Bomb'
if new_val == 'Shell' or new_val == 'Bomb':
bomb_shell_val = 'readonly'
torpedo_val = 'disabled'
elif new_val == 'Torpedo/Mine':
bomb_shell_val = 'disabled'
torpedo_val = 'readonly'
for this_widget in self.bomb_shell_frame.winfo_children():
if this_widget.widgetName == 'ttk::combobox':
this_widget.config(state=bomb_shell_val)
for this_widget in self.torpedo_frame.winfo_children():
if this_widget.widgetName == 'ttk::combobox':
this_widget.config(state=torpedo_val)
def draw_debug_controls(self, parent):
verbose_mode_frame = Frame(parent)
verbose_mode_frame.pack(side='top')
verbose_mode_button = Checkbutton(verbose_mode_frame, text="VERBOSE", command = lambda: None, variable=self.verbose_mode)
verbose_mode_button.pack(side='top')
debug_arm_frame = Frame(parent)
debug_arm_frame.pack(side='top')
debug_arm_button = Checkbutton(debug_arm_frame, text="DEBUG", command = lambda: self.toggle_debug_frame(), variable=self.debug_frame_armed)
debug_arm_button.pack(side='top')
dice_entry_frame = Frame(parent)
dice_entry_frame.pack(side='top')
d6_label = Label(dice_entry_frame, text="D6")
d6_label.pack(side='left')
self.d6_entry = Entry(dice_entry_frame, width=2)
self.d6_entry.pack(side='left')
self.d6_entry.config(state='disabled')
d100_label = Label(dice_entry_frame, text="D100 Rolls")
d100_label.pack(side='left')
self.d100_entry = Entry(dice_entry_frame, width=20)
self.d100_entry.pack(side='left')
self.d100_entry.config(state='disabled')
def toggle_debug_frame(self):
new_val = self.debug_frame_armed.get()
if new_val == 1:
self.d6_entry.config(state='normal')
self.d100_entry.config(state='normal')
else:
self.d6_entry.delete(0, END)
self.d6_entry.config(state='disabled')
self.d100_entry.delete(0, END)
self.d100_entry.config(state='disabled')
def toggle_verbose(self):
print("Verbose mode is now " + str(self.verbose_mode.get()))
pass
def close_window(self):
self.destroy()
self.parent.destroy()
def prev_turn(self):
self.write_game_log("Game turn moved back to " + str(self.game_turn - 1))
self.game_turn -= 1
self.update_turn_readout()
def next_turn(self):
self.write_game_log("Game turn advanced to " + str(self.game_turn + 1))
self.game_turn += 1
self.update_turn_readout()
self.apply_fire_flooding_tac_turn()
def update_turn_readout(self):
self.turn_readout.config(state='normal')
self.turn_readout.delete(0, END)
self.turn_readout.insert(0, self.game_turn)
self.turn_readout.config(state='readonly')
cursor, conn = connect_to_db(returnConnection=True)
cursor.execute("""UPDATE Game SET [Game Turn] = ? WHERE [Game ID] = ?;""", (self.game_turn, self.game_id,))
conn.commit()
close_connection(cursor)
def load_game_log(self):
cursor = connect_to_db()
cursor.execute("""SELECT * FROM 'Game Log' WHERE [Game ID] = ?;""", (self.game_id,))
data_dump = cursor.fetchall()
if len(data_dump) == 0: #No logs yet for this game
self.log_sequence = 1
else:
last_record = data_dump[-1]
col_headings = [description[0] for description in cursor.description]
sequence_index = col_headings.index('Log Sequence')
self.log_sequence = last_record[sequence_index]
#May add something here to input existing entries into the console once that exists
close_connection(cursor)
def write_game_log(self, message):
#Do something here to make the message appear in the console, once that exists
#Do something else here to write the message to the MySQL DB
#First put it on the console
self.log_console.config(state=NORMAL)
this_message = str(self.log_sequence) + " Turn: " + str(self.game_turn) + " " + message + "\n"
self.log_console.insert(END, this_message)
self.log_console.config(state=DISABLED)
self.log_console.see(END)
self.log_console.update()
cursor, conn = connect_to_db(returnConnection=True)
cursor.execute("""INSERT INTO 'Game Log' VALUES (?,?,?,?);""", (self.game_id, self.log_sequence, self.game_turn, message,))
conn.commit()
close_connection(cursor)
self.log_sequence += 1
def generate_crits_statement(self, this_record):
#this_record is a ship record from the database. Renders into readable string form
#Need to be able to read from multiple databases. Need to write those as we go along
pass
def apply_this_hit(self, target = None, dp = None, hit_type = None): #Will usually be None, but occasionally we'll need to send in hits from fire/flooding
#Note that when these values are set to None they're automatically initialized from where those values will *usually* be.
if target == None:
target = self.shipsTable.get_currentRecord()
if dp == None:
dp = self.hit_dp_amount.get()
if hit_type == None:
hit_type = self.damage_type_picker.get()
debug = self.debug_frame_armed.get()
if debug == 1:
d6 = int(self.d6_entry.get())
d100_list = parse_comma_separated_numbers(self.d100_entry.get())
else:
d6 = None
d100_list = None
if hit_type == 'Shell' or hit_type == 'Bomb':
if self.armor_pen_picker.get() == 'Yes':
armor_pen = True
#default is no armor pen
else:
armor_pen = False
self.write_game_log(target['Ship Name'] + " takes " + dp + " DP from " + hit_type + " hit. ")
critical_hit_result = shell_bomb_hit(target, int(dp), hit_type, armor_pen, d6, d100_list, self.debug_frame_armed.get(), self.verbose_mode.get())
if critical_hit_result == 'Unsupported Ship':
tkMessageBox.showinfo('Unsupported Ship', 'Critical Hits for this ship are not yet supported by Damage Control Assistant')
else:
self.write_game_log(critical_hit_result)
if hit_type == 'Torpedo':
#!!! Finish me!
depth = self.depth_picker.get()
aspect = self.aspect_picker.get()
#torpedo_hit(target, dp, depth, aspect)
if hit_type == 'Fire' or hit_type == 'Flood':
#!!! Finish me!
armor_pen = True
def flood_this_magazine(self):
target = self.shipsTable.get_currentRecord()
if target['Crit Flood Magazines'] == 0:
cursor, conn = connect_to_db(returnConnection=True)
cursor.execute("""UPDATE 'Game Ship Formation Ship' SET [Crit Flood Magazines]=1 WHERE [Game ID]=? AND [Scenario Side]=? AND [Formation ID]=? AND [Formation Ship Key]=?;""",(self.game_id, target['Scenario Side'], target['Formation ID'], target['Formation Ship Key'], ))
self.shipsTable.update()
conn.commit()
close_connection(cursor)
def toggle_extra_dc(self):
target = self.shipsTable.get_currentRecord()
cursor, conn = connect_to_db(returnConnection=True)
if target['Extra DC'] == 0:
new_val = 1
else:
new_val = 0
cursor.execute("""UPDATE 'Game Ship Formation Ship' SET [Extra DC]=? WHERE [Game ID]=? AND [Scenario Side]=? AND [Formation ID]=? AND [Formation Ship Key]=?;""",(new_val, self.game_id, target['Scenario Side'], target['Formation ID'], target['Formation Ship Key'], ))
conn.commit()
close_connection(cursor)
def apply_fire_flooding_tac_turn(self):
cursor, conn = connect_to_db(returnConnection=True)
cursor.execute("""SELECT * FROM 'Game Ship Fire/Flood' WHERE [Game ID] = ? AND [Turns Remaining] > 0;""")
fire_flood_data = cursor.fetchall
fire_flood_columns = [description[0] for description in cursor.description]
for i in xrange(len(fire_flood_data)):
this_turns_remaining = fire_flood_data[i][fire_flood_columns.index('Turns Remaining')]
this_turns_remaining -= 1
if this_turns_remaining == 0:
this_value = fire_flood_data[i][fire_flood_columns.index('Value')]
this_type = fire_flood_data[i][fire_flood_columns.index('Type')]
cursor.execute("""SELECT * FROM 'Game Ship Formation Ship' WHERE [Game ID] = ? AND [Scenario Side] = ? AND [Formation ID] = ? AND [Formation Ship Key] = ?;""", (fire_flood_data[i][fire_flood_columns.index('Game ID')], fire_flood_data[i][fire_flood_columns.index('Scenario Side')], fire_flood_data[i][fire_flood_columns.index('Formation ID')], fire_flood_data[i][fire_flood_columns.index('Formation Ship Key')],))
target_data = cursor.fetchone()
self.apply_this_hit(target_data, dp = this_value, hit_type = this_type)
cursor.execute("""UPDATE 'Game Ship Fire/Flood' SET [Turns Remaining] = ? WHERE [Game ID] = ? AND [Scenario Side] = ? AND [Formation ID] = ? AND [Formation Ship Key] = ?;""", (this_turns_remaining, fire_flood_data[i][fire_flood_columns.index('Game ID')], fire_flood_data[i][fire_flood_columns.index('Scenario Side')], fire_flood_data[i][fire_flood_columns.index('Formation ID')], fire_flood_data[i][fire_flood_columns.index('Formation Ship Key')],))
conn.commit()
| lgpl-3.0 | -3,432,203,865,741,450,000 | 51.413437 | 461 | 0.62532 | false |
au9ustine/elrond | elrond/aliyun-oss-upload.py | 1 | 1825 | from __future__ import print_function
from __future__ import division
import oss2
import sys
import datetime
import math
access_key_id = '<access_key_id>'
access_key_secret = '<access_key_secret>'
bucket_name = '<bucket_name>'
bucket_endpoint = 'http://oss-cn-shanghai.aliyuncs.com' # endpoint name
genesis = None
last_consumed = 0
last_right_now = None
def login():
auth = oss2.Auth(access_key_id, access_key_secret)
bucket = oss2.Bucket(auth, bucket_endpoint, bucket_name, connect_timeout=8*3600.0) # seconds, otherwise it always gets timeout
return bucket
def resumable_upload(bucket, object_key, local_file_path):
# get a file path (string)
oss2.resumable_upload(bucket, object_key, local_file_path, progress_callback=percentage, num_threads=4)
def percentage(consumed_bytes, total_bytes):
# TODO: estimation is not accurated when num_threads > 1 due to gloabl variable is not set by lock
global last_consumed
global genesis
global last_right_now
right_now = datetime.datetime.now()
if last_right_now is None:
last_right_now = genesis
delta = right_now - last_right_now
speed = 0
if delta.seconds > 0:
speed = (consumed_bytes-last_consumed) / 1024 / delta.seconds
if total_bytes:
rate = 100 * (float(consumed_bytes) / float(total_bytes))
print('uploaded: {0} bytes, total: {1} bytes, speed: {2} KiB/s, estimated: {3:.2f}% '.format(consumed_bytes, total_bytes, speed, rate))
last_consumed = consumed_bytes
last_right_now = right_now
if __name__ == '__main__':
bucket = login()
file_path = '<local_file_path>'
genesis = datetime.datetime.now()
print(genesis)
resumable_upload(bucket, file_path, file_path)
print(datetime.datetime.now())
| mit | 1,349,357,877,743,282,700 | 35.244898 | 143 | 0.665205 | false |
Countbasic/repository.mediabox.storage | plugin.video.burningseries/searchlist.py | 1 | 1661 | # -*- coding: utf-8 -*-
import xbmc,xbmcaddon,xbmcgui
import sys, os, random, json, zlib
import urllib, urllib2, cookielib
import string, re, base64
import contact
SEP = os.sep
addonInfo = xbmcaddon.Addon ('plugin.video.burningseries')
dataPath = xbmc.translatePath(addonInfo.getAddonInfo('profile'))
addonPath = addonInfo.getAddonInfo('path')
urlHost = "http://bs.to/api/"
urlPics = "http://s.bs.to/img/cover/"
def getUrl(url):
try:
req = urllib2.Request(urlHost+url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
req.add_header('Accept-encoding', 'gzip')
token_data = contact.do_token(url)
req.add_header('BS-Token',token_data)
response = urllib2.urlopen(req)
if response.info().get('Content-Encoding') == 'gzip':
d = zlib.decompressobj(16+zlib.MAX_WBITS)
return d.decompress(response.read())
else:
return response.read()
response.close()
except:
return False
pDialog = xbmcgui.DialogProgressBG()
pDialog.create('searchlist','refreshing')
data = getUrl("series")
jsonContent = json.loads(data)
items = len(jsonContent)
pos = 0
print "[bs] items search "
print items
w = open(dataPath+SEP+"searchList.data","wb")
w.write('')
w.close()
pDialog.update(pos)
out = u""
for d in jsonContent:
pos = pos + 1
out = d['series'].strip().encode('utf-8')+"|".encode('utf-8')+d['id'].encode('utf-8')+'\n'.encode('utf-8')
val = int(100*pos/items)
pDialog.update(val)
with open (dataPath+SEP+"searchList.data", 'a') as f:
f.write (out)
print "[bs] pos val"
print out
pDialog.close();
xbmc.executebuiltin("Notification(searchList,success,2000)")
| gpl-2.0 | -6,174,330,035,613,720,000 | 24.553846 | 124 | 0.694762 | false |
TODOTomorrow/qxpacker | qxpacker/TarContainer.py | 1 | 2442 | from qxpacker.Container import Container , ContainerFileType
from qxpacker.DdContainer import DdContainer
from qxpacker.EchoContainer import EchoContainer
import os , tempfile
import tarfile
# OPTIONS:
# name : possible values : description
#------------------------------------------------------------
# compress : gzip none : Set compression type
# loader : dd echo : Select bootloader (dd by default)
class TarContainer(Container):
compression=''
bootloader = DdContainer()
def shell_module_required(self):
return []
def __init__(self, ctx = None):
self.compression = ''
self.bootloader = DdContainer()
Container.__init__(self, ctx)
def _control_code_flush(self, shellctx):
shellctx.constant("CONTAINER_TYPE","tar")
def _data_flush(self, to, callnext, data_extraction_fname = "extract_data", after_extraction = "", control_code_flush=True):
targz_tmpfile = tempfile.mktemp()
tf = tarfile.open(targz_tmpfile, 'w:' + self.compression)
for f in self.search(recurse_datalist = True):
absname = os.path.abspath(f.name)
dirname=os.path.dirname(absname)
filename=os.path.basename(absname)
tf.add(absname , arcname = f.target_path)
tf.close()
to.write("tarcontainer_extract() { \n")
to.write("tar -xf")
if self.compression == "gz":
to.write("z")
to.write(" $TMPFILENAME\n");
if after_extraction != "":
to.write("\t%s $@" % after_extraction)
to.write(" }\n")
self.bootloader.add(targz_tmpfile, tpath = "$TMPFILENAME")
self.bootloader.flush(to, callnext, data_extraction_fname = data_extraction_fname, after_extraction = "tarcontainer_extract", control_code_flush = False)
if os.path.isfile(targz_tmpfile): os.remove(targz_tmpfile)
def set_opt(self,opt,val):
if opt == 'compress':
if val == 'none': self.compression = ''
elif val == 'gzip': self.compression = 'gz'
else: raise Exception('Bad option value ' + opt + ' = ' + val)
elif opt == 'loader':
if val == 'dd': self.bootloader = DdContainer()
elif val == 'echo': self.bootloader = EchoContainer()
else: raise Exception('Bad option value ' + opt + ' = ' + val)
| mit | 3,619,203,423,904,631,000 | 38.387097 | 162 | 0.576167 | false |
captiosus/treadmill | treadmill/runtime/linux/image/fs.py | 1 | 1629 | """The base implementation for fs plugins."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import logging
import six
from treadmill import appcfg
from treadmill import plugin_manager
_LOGGER = logging.getLogger(__name__)
_FS_PLUGIN_NAMESPACE = 'treadmill.image.{0}.fs'
@six.add_metaclass(abc.ABCMeta)
class FilesystemPluginBase(object):
"""The base class of filesystem plugins for the image.
:param tm_env:
The Treadmill application environment
:type tm_env:
`appenv.AppEnvironment`
"""
__slots__ = (
'tm_env',
)
def __init__(self, tm_env):
self.tm_env = tm_env
@abc.abstractmethod
def init(self):
"""Initializes the plugin."""
pass
@abc.abstractmethod
def configure(self, container_dir, app):
"""Configures the filesystem plugin.
:param ``str`` container_dir:
Container base directtory.
:param ``object`` app:
Container manifest object.
"""
pass
def init_plugins(tm_env):
"""Inits all plugins."""
for app_type in appcfg.AppType:
namespace = _FS_PLUGIN_NAMESPACE.format(app_type.value)
for plugin in plugin_manager.load_all(namespace):
plugin(tm_env).init()
def configure_plugins(tm_env, container_dir, app):
"""Configures all plugins."""
namespace = _FS_PLUGIN_NAMESPACE.format(app.type)
for plugin in plugin_manager.load_all(namespace):
plugin(tm_env).configure(container_dir, app)
| apache-2.0 | -4,066,293,365,900,030,000 | 23.313433 | 63 | 0.644567 | false |
vanceza/short-programs | budget_summary.py | 1 | 1511 | #!/usr/bin/python3
import datetime, re, sys
if len(sys.argv) > 1:
fp = sys.argv[1]
else:
current_date=str(datetime.date.today())
fp = "/home/zachary/blog2.za3k.com/_posts/{}-weekly-review.md".format(current_date)
with open(fp, "r") as f:
lines = list(line for line in f)
budget_start = re.compile("^\\| Date")
budget_end = re.compile("^$")
start_line, end_line = None, None
for i, line in enumerate(lines):
if start_line is None and budget_start.match(line):
start_line = i
if end_line is None and start_line is not None and budget_end.match(line):
end_line = i
budget = lines[start_line:end_line]
lines = []
for line in budget[2:]:
date, place, amount, category, thing = [x.strip() for x in line.split("|")[1:]]
lines.append((float(amount), category))
print("{: <12} {:.2f}".format("Total:", sum(amount for (amount, category) in lines)))
print("{: <12} {:.2f}".format("Total (no rent):", sum(amount for (amount, category) in lines if category != "Rent")))
categories = sorted(set(category for (amount, category) in lines))
print()
OTHER = ("Food", "Grocery", "Luxury")
for category in categories:
if category not in OTHER:
print("{: <12} {:.2f}".format(category+":", sum(amount for (amount, c) in lines if category == c)))
print("{: <12} {:.2f}".format("Other"+":", sum(amount for (amount, c) in lines if c in OTHER)))
for category in OTHER:
print(" {: <12} {:.2f}".format(category+":", sum(amount for (amount, c) in lines if category == c)))
| cc0-1.0 | 8,097,837,572,993,923,000 | 42.171429 | 117 | 0.636664 | false |
twitter/pants | tests/python/pants_test/engine/legacy/test_address_mapper.py | 1 | 6949 | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from builtins import object, str
import mock
from pants.base.specs import SiblingAddresses, SingleAddress
from pants.build_graph.address import Address, BuildFileAddress
from pants.build_graph.address_mapper import AddressMapper
from pants.engine.legacy.address_mapper import LegacyAddressMapper
from pants.engine.nodes import Throw
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_file_dump, safe_mkdir
from pants_test.test_base import TestBase
class LegacyAddressMapperTest(TestBase):
def create_build_files(self):
# Create BUILD files
# build_root:
# BUILD
# BUILD.other
# dir_a:
# BUILD
# BUILD.other
# subdir:
# BUILD
# dir_b:
# BUILD
dir_a = os.path.join(self.build_root, 'dir_a')
dir_b = os.path.join(self.build_root, 'dir_b')
dir_a_subdir = os.path.join(dir_a, 'subdir')
safe_mkdir(dir_a)
safe_mkdir(dir_b)
safe_mkdir(dir_a_subdir)
safe_file_dump(os.path.join(self.build_root, 'BUILD'), 'target(name="a")\ntarget(name="b")')
safe_file_dump(os.path.join(self.build_root, 'BUILD.other'), 'target(name="c")')
safe_file_dump(os.path.join(dir_a, 'BUILD'), 'target(name="a")\ntarget(name="b")')
safe_file_dump(os.path.join(dir_a, 'BUILD.other'), 'target(name="c")')
safe_file_dump(os.path.join(dir_b, 'BUILD'), 'target(name="a")')
safe_file_dump(os.path.join(dir_a_subdir, 'BUILD'), 'target(name="a")')
def test_is_valid_single_address(self):
self.create_build_files()
mapper = self.address_mapper
self.assertFalse(mapper.is_valid_single_address(SingleAddress('dir_a', 'foo')))
self.assertTrue(mapper.is_valid_single_address(SingleAddress('dir_a', 'a')))
with self.assertRaises(TypeError):
mapper.is_valid_single_address('foo')
def test_scan_build_files(self):
self.create_build_files()
mapper = self.address_mapper
build_files = mapper.scan_build_files('')
self.assertEqual(build_files,
{'BUILD', 'BUILD.other',
'dir_a/BUILD', 'dir_a/BUILD.other',
'dir_b/BUILD', 'dir_a/subdir/BUILD'})
build_files = mapper.scan_build_files('dir_a/subdir')
self.assertEqual(build_files, {'dir_a/subdir/BUILD'})
def test_scan_build_files_edge_cases(self):
self.create_build_files()
mapper = self.address_mapper
# A non-existent dir.
build_files = mapper.scan_build_files('foo')
self.assertEqual(build_files, set())
# A dir with no BUILD files.
safe_mkdir(os.path.join(self.build_root, 'empty'))
build_files = mapper.scan_build_files('empty')
self.assertEqual(build_files, set())
def test_is_declaring_file(self):
scheduler = mock.Mock()
mapper = LegacyAddressMapper(scheduler, '')
self.assertTrue(mapper.is_declaring_file(Address('path', 'name'), 'path/BUILD'))
self.assertTrue(mapper.is_declaring_file(Address('path', 'name'), 'path/BUILD.suffix'))
self.assertFalse(mapper.is_declaring_file(Address('path', 'name'), 'path/not_a_build_file'))
self.assertFalse(mapper.is_declaring_file(Address('path', 'name'), 'differing-path/BUILD'))
self.assertFalse(mapper.is_declaring_file(
BuildFileAddress(target_name='name', rel_path='path/BUILD.new'),
'path/BUILD'))
self.assertTrue(mapper.is_declaring_file(
BuildFileAddress(target_name='name', rel_path='path/BUILD'),
'path/BUILD'))
def test_addresses_in_spec_path(self):
self.create_build_files()
mapper = self.address_mapper
addresses = mapper.addresses_in_spec_path('dir_a')
self.assertEqual(addresses,
{Address('dir_a', 'a'), Address('dir_a', 'b'), Address('dir_a', 'c')})
def test_addresses_in_spec_path_no_dir(self):
self.create_build_files()
mapper = self.address_mapper
with self.assertRaises(AddressMapper.BuildFileScanError) as cm:
mapper.addresses_in_spec_path('foo')
self.assertIn('does not match any targets.', str(cm.exception))
def test_addresses_in_spec_path_no_build_files(self):
self.create_build_files()
safe_mkdir(os.path.join(self.build_root, 'foo'))
mapper = self.address_mapper
with self.assertRaises(AddressMapper.BuildFileScanError) as cm:
mapper.addresses_in_spec_path('foo')
self.assertIn('does not match any targets.', str(cm.exception))
def test_scan_specs(self):
self.create_build_files()
mapper = self.address_mapper
addresses = mapper.scan_specs([SingleAddress('dir_a', 'a'), SiblingAddresses('')])
self.assertEqual(addresses,
{Address('', 'a'), Address('', 'b'), Address('', 'c'), Address('dir_a', 'a')})
def test_scan_specs_bad_spec(self):
self.create_build_files()
mapper = self.address_mapper
with self.assertRaises(AddressMapper.BuildFileScanError) as cm:
mapper.scan_specs([SingleAddress('dir_a', 'd')])
self.assertIn('does not match any targets.', str(cm.exception))
def test_scan_addresses(self):
self.create_build_files()
mapper = self.address_mapper
addresses = mapper.scan_addresses()
self.assertEqual(addresses,
{Address('', 'a'), Address('', 'b'), Address('', 'c'),
Address('dir_a', 'a'), Address('dir_a', 'b'), Address('dir_a', 'c'),
Address('dir_b', 'a'), Address('dir_a/subdir', 'a')})
def test_scan_addresses_with_root_specified(self):
self.create_build_files()
mapper = self.address_mapper
addresses = mapper.scan_addresses(os.path.join(self.build_root, 'dir_a'))
self.assertEqual(addresses,
{Address('dir_a', 'a'), Address('dir_a', 'b'), Address('dir_a', 'c'),
Address('dir_a/subdir', 'a')})
def test_scan_addresses_bad_dir(self):
# scan_addresses() should not raise an error.
self.create_build_files()
mapper = self.address_mapper
addresses = mapper.scan_addresses(os.path.join(self.build_root, 'foo'))
self.assertEqual(addresses, set())
def test_other_throw_is_fail(self):
# scan_addresses() should raise an error if the scheduler returns an error it can't ignore.
class ThrowReturningScheduler(object):
def execution_request(self, *args):
pass
def execute(self, *args):
return [], [(('some-thing', None), Throw(Exception('just an exception')))]
with temporary_dir() as build_root:
mapper = LegacyAddressMapper(ThrowReturningScheduler(), build_root)
with self.assertRaises(LegacyAddressMapper.BuildFileScanError) as cm:
mapper.scan_addresses(os.path.join(build_root, 'foo'))
self.assertIn('just an exception', str(cm.exception))
| apache-2.0 | 1,835,209,349,135,756,500 | 38.482955 | 100 | 0.659088 | false |
scriptharness/python-scriptharness | scriptharness/os.py | 1 | 1164 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Wrapping python os and related functions.
Args:
LOGGER_NAME (str): the default logging.Logger name
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
import logging
import os
LOGGER_NAME = "scriptharness.commands.os"
def makedirs(path, level=logging.INFO, context=None):
"""os.makedirs() wrapper.
Args:
path (str): path to the directory
level (Optional[int]): the logging level to log with. Defaults to
logging.INFO.
"""
if context:
logger = context.logger
else:
logger = logging.getLogger(LOGGER_NAME)
logger.log(level, "Creating directory %s", path)
if not os.path.exists(path):
os.makedirs(path)
logger.log(level, "Done.")
else:
logger.log(level, "Already exists.")
def make_parent_dir(path, **kwargs):
"""Create the parent of path if it doesn't exist.
Args:
path (str): path to the file.
**kwargs: These are passed to makedirs().
"""
dirname = os.path.dirname(path)
if dirname:
makedirs(dirname, **kwargs)
| mpl-2.0 | -1,072,963,174,791,318,500 | 24.304348 | 72 | 0.62457 | false |
ikn/boom | game/engine/sched.py | 1 | 33054 | """Event scheduler and interpolation."""
from time import time
from bisect import bisect
from math import cos, atan, exp
from random import randrange, expovariate
from functools import partial
from pygame.time import wait
from .conf import conf
from .util import ir
def _match_in_nest (obj, x):
"""Check if every object in a data structure is equal to some given object.
_match_in_nest(obj, x)
obj: data structure to look in: an arbitrarily nested list of lists.
x: object to compare to (not a list or tuple).
"""
if isinstance(obj, (tuple, list)):
return all(_match_in_nest(o, x) == x for o in obj)
else:
return obj == x
def call_in_nest (f, *args):
"""Collapse a number of similar data structures into one.
Used in ``interp_*`` functions.
call_in_nest(f, *args) -> result
:arg f: a function to call with elements of ``args``.
:arg args: each argument is a data structure of nested lists with a similar
format.
:return: a new structure in the same format as the given arguments with each
non-list object the result of calling ``f`` with the corresponding
objects from each arg.
For example::
>>> f = lambda n, c: str(n) + c
>>> arg1 = [1, 2, 3, [4, 5], []]
>>> arg2 = ['a', 'b', 'c', ['d', 'e'], []]
>>> call_in_nest(f, arg1, arg2)
['1a', '2b', '3c', ['4d', '5e'], []]
One argument may have a list where others do not. In this case, those that do
not have the object in that place passed to ``f`` for each object in the
(possibly further nested) list in the argument that does. For example::
>>> call_in_nest(f, [1, 2, [3, 4]], [1, 2, 3], 1)
[f(1, 1, 1), f(2, 2, 1), [f(3, 3, 1), f(4, 3, 1)]]
However, in arguments with lists, all lists must be the same length.
"""
# Rect is a sequence but isn't recognised as collections.Sequence, so test
# this way
is_list = [(hasattr(arg, '__len__') and hasattr(arg, '__getitem__') and
not isinstance(arg, basestring))
for arg in args]
if any(is_list):
n = len(args[is_list.index(True)])
# listify non-list args (assume all lists are the same length)
args = (arg if this_is_list else [arg] * n
for this_is_list, arg in zip(is_list, args))
return [call_in_nest(f, *inner_args) for inner_args in zip(*args)]
else:
return f(*args)
def _cmp_structure (x, y):
"""Find whether the (nested list) structure of two objects is the same."""
is_list = isinstance(x, (tuple, list))
if is_list != isinstance(y, (tuple, list)):
# one is a list, one isn't
return False
elif is_list:
# both are lists: check length and contents
return len(x) == len(y) and \
all(_cmp_structure(xi, yi) for xi, yi in zip(x, y))
else:
# neither is a list
return True
def interp_linear (*waypoints):
"""Linear interpolation for :meth:`Scheduler.interp`.
interp_linear(*waypoints) -> f
:arg waypoints: each is ``(v, t)`` to set the value to ``v`` at time ``t``.
``t`` can be omitted for any but the last waypoint: the first
is ``0``, and other gaps are filled in with equal spacing.
``v`` is like the arguments taken by :func:`call_in_nest`, and
we interpolate for each number in the nested list structure of
``v``. Some objects in the ``v`` structures may be
non-numbers, in which case they will not be varied (maybe your
function takes another argument you don't want to vary);
objects may be ``None`` to always use the initial value in that
position.
:return: a function for which ``f(t) = v`` for every waypoint ``(t, v)``, with
intermediate values linearly interpolated between waypoints.
"""
# fill in missing times
vs = []
ts = []
last = waypoints[-1]
for w in waypoints:
if w is last or _cmp_structure(w, last):
vs.append(w[0])
ts.append(w[1])
else:
vs.append(w)
ts.append(None)
ts[0] = 0
# get groups with time = None
groups = []
group = None
for i, (v, t) in enumerate(zip(vs, ts)):
if t is None:
if group is None:
group = [i]
groups.append(group)
else:
if group is not None:
group.append(i)
group = None
# and assign times within those groups
for i0, i1 in groups:
t0 = ts[i0 - 1]
dt = float(ts[i1] - t0) / (i1 - (i0 - 1))
for i in xrange(i0, i1):
ts[i] = t0 + dt * (i - (i0 - 1))
interp_val = lambda r, v1, v2, v0: (r * (v2 - v1) + v1) \
if isinstance(v2, (int, float)) else v0
def val_gen ():
t = yield
while True:
# get waypoints we're between
i = bisect(ts, t)
if i == 0:
# before start
t = yield vs[0]
elif i == len(ts):
# past end: use final value, then end
last_val = lambda vl, v0: vl if isinstance(vl, (int, float)) \
else v0
t = yield call_in_nest(last_val, vs[-1], vs[0])
yield None
else:
v1 = vs[i - 1]
v2 = vs[i]
t1 = ts[i - 1]
t2 = ts[i]
# get ratio of the way between waypoints
r = 1 if t2 == t1 else (t - t1) / (t2 - t1) # t is always float
t = yield call_in_nest(interp_val, r, v1, v2, vs[0])
# start the generator; get_val is its send method
g = val_gen()
g.next()
return g.send
def interp_target (v0, target, damp, freq = 0, speed = 0, threshold = 0):
"""Move towards a target.
interp_target(v0, target, damp, freq = 0, speed = 0, threshold = 0) -> f
:arg v0: the initial value (a structure of numbers like arguments to
:func:`call_in_nest`). Elements which are not numbers are ignored.
:arg target: the target value (has the same form as ``v0``).
:arg damp: rate we move towards the target (``> 0``).
:arg freq: if ``damp`` is small, oscillation around ``target`` can occur, and
this controls the frequency. If ``0``, there is no oscillation.
:arg speed: if ``freq`` is non-zero, this is the initial 'speed', in the same
form as ``v0``.
:arg threshold: stop when within this distance of ``target``; in the same form
as ``v0``. If ``None``, never stop. If varying more than one
number, only stop when every number is within its threshold.
:return: a function that returns position given the current time.
"""
if v0 == target: # nothing to do
return lambda t: None
def get_phase (v0, target, sped):
if freq == 0 or not isinstance(v0, (int, float)) or v0 == target:
return 0
else:
return atan(-(float(speed) / (v0 - target) + damp) / freq)
phase = call_in_nest(get_phase, v0, target, speed)
def get_amplitude (v0, target, phase):
if isinstance(v0, (int, float)):
return (v0 - target) / cos(phase) # cos(atan(x)) is never 0
amplitude = call_in_nest(get_amplitude, v0, target, phase)
def get_val (t):
def interp_val (v0, target, amplitude, phase, threshold):
if not isinstance(v0, (int, float)):
return v0
# amplitude is None if non-number
if amplitude is None or v0 == target:
if threshold is not None:
return None
return v0
else:
dist = amplitude * exp(-damp * t)
if threshold is not None and abs(dist) <= threshold:
return None
return dist * cos(freq * t + phase) + target
rtn = call_in_nest(interp_val, v0, target, amplitude, phase, threshold)
if _match_in_nest(rtn, None):
# all done
rtn = None
return rtn
return get_val
def interp_shake (centre, amplitude = 1, threshold = 0, signed = True):
"""Shake randomly.
interp_shake(centre, amplitude = 1, threshold = 0, signed = True) -> f
:arg centre: the value to shake about; a nested list (a structure of numbers
like arguments to :func:`call_in_nest`). Elements which are not
numbers are ignored.
:arg amplitude: a number to multiply the value by. This can be a function that
takes the elapsed time in seconds to vary in time. Has the
same form as ``centre`` (return value does, if a function).
:arg threshold: stop when ``amplitude`` is this small; in the same form as
``centre``. If ``None``, never stop. If varying more than one
number, only stop when every number is within its threshold.
:arg signed: whether to shake around ``centre``. If ``False``, values are
always greater than ``centre`` (note that ``amplitude`` may be
signed).
:return: a function that returns position given the current time.
"""
def get_val (t):
def interp_val (centre, amplitude, threshold):
if not isinstance(centre, (int, float)):
return centre
if threshold is not None and abs(amplitude) <= threshold:
return None
val = amplitude * expovariate(1)
if signed:
val *= 2 * randrange(2) - 1
return centre + val
a = amplitude(t) if callable(amplitude) else amplitude
rtn = call_in_nest(interp_val, centre, a, threshold)
if _match_in_nest(rtn, None):
# all done
rtn = None
return rtn
return get_val
def interp_round (get_val, do_round = True):
"""Round the output of an existing interpolation function to integers.
interp_round(get_val, round_val = True) -> f
:arg get_val: the existing function.
:arg do_round: determines which values to round. This is in the form of the
values ``get_val`` returns, a structure of lists and booleans
corresponding to each number (see :func:`call_in_nest`).
:return: the ``get_val`` wrapper that rounds the returned value.
"""
def round_val (do, v):
return ir(v) if isinstance(v, (int, float)) and do else v
def round_get_val (t):
return call_in_nest(round_val, do_round, get_val(t))
return round_get_val
def interp_repeat (get_val, period = None, t_min = 0, t_start = None):
"""Repeat an existing interpolation function.
interp_repeat(get_val[, period], t_min = 0, t_start = t_min) -> f
:arg get_val: an existing interpolation function, as taken by
:meth:`Scheduler.interp`.
Times passed to the returned function are looped around to fit in the range
[``t_min``, ``t_min + period``), starting at ``t_start``, and the result is
passed to ``get_val``.
If ``period`` is not given, repeats end at the end of ``get_val``. Note that
this will not be entirely accurate, and you're probably better off specifying a
value if you can easily do so.
:return: the ``get_val`` wrapper that repeats ``get_val`` over the given
period.
"""
if t_start is None:
t_start = t_min
def val_gen ():
pd = period
val = None
t = yield
while True:
# transform time and get the corresponding value
t = t_min + (t_start - t_min + t)
if pd is not None:
t %= pd
# else still in the first period (and want the whole thing)
new_val = get_val(t)
# if we got a value, yield it
if new_val is not None:
val = new_val
elif pd is None:
# else get_val has ended: we know the period size now
pd = t - t_min
# else yield the previous value (which may be None: if get_val
#: returns None on the first call, we want to yield None)
t = yield val
# start the generator
g = val_gen()
g.next()
return g.send
def interp_oscillate (get_val, t_max = None, t_min = 0, t_start = None):
"""Repeat a linear oscillation over an existing interpolation function.
interp_oscillate(get_val[, t_max], t_min = 0, t_start = t_min) -> f
:arg get_val: an existing interpolation function, as taken by
:meth:`Scheduler.interp`.
Times passed to the returned function are looped and reversed to fit in the
range [``t_min``, ``t_max``), starting at ``t_start``. If ``t_start`` is in
the range [``t_max``, ``2 * t_max - t_min``), it is mapped to the 'return
journey' of the oscillation.
If ``t_max`` is not given, it is taken to be the end of ``get_val``. Note that
this will not be entirely accurate, and you're probably better off specifying a
value if you can easily do so.
:return: the ``get_val`` wrapper that oscillates ``get_val`` over the given
range.
"""
if t_start is None:
t_start = t_min
if t_max is not None:
period = t_max - t_min
else:
period = None
def val_gen ():
pd = period
val = None
t = yield
while True:
# transform time and get the corresponding value
t = t_start - t_min + t
if pd is not None:
t %= 2 * pd
if t >= pd:
t = 2 * pd - t
# else still in the first period (and want the whole thing)
new_val = get_val(t)
# if we got a value, yield it
if new_val is not None:
val = new_val
elif pd is None:
# else get_val has ended: we know the period size now
pd = t - t_min
# else yield the previous value (which may be None: if get_val
#: returns None on the first call, we want to yield None)
t = yield val
# start the generator
g = val_gen()
g.next()
return g.send
class Timer (object):
"""Frame-based timer.
Timer(fps=60)
:arg fps: frames per second to aim for.
"""
def __init__ (self, fps=60):
#: The current length of a frame in seconds.
self.frame = None
#: The current average frame time in seconds (like
#: :attr:`current_fps`).
self.current_frame_time = None
self.fps = fps
#: The amount of time in seconds that has elapsed since the start of
#: the current call to :meth:`run`, if any.
self.t = 0
#: How many seconds the last frame took to run (including calling the
#: ``cb`` argument to :meth:`run` and any sleeping to make up a full
#: frame).
self.elapsed = None
@property
def fps (self):
"""The target FPS. Set this directly."""
return self._fps
@fps.setter
def fps (self, fps):
self._fps = int(round(fps))
self.current_frame_time = self.frame = 1. / fps
@property
def current_fps (self):
"""The current framerate, an average based on
:data:`conf.FPS_AVERAGE_RATIO`.
If this is less than :attr:`fps`, then the timer isn't running at full speed
because of slow calls to the ``cb`` argument to :meth:`run`.
"""
return 1 / self.current_frame_time
def run (self, cb, *args, **kwargs):
"""Run indefinitely or for a specified amount of time.
run(cb, *args[, seconds][, frames]) -> remain
:arg cb: a function to call every frame.
:arg args: extra arguments to pass to cb.
:arg seconds: the number of seconds to run for; can be a float. Accounts for
changes to :attr:`fps`.
:arg frames: the number of frames to run for; can be a float. Ignored if
``seconds`` is passed.
If neither ``seconds`` nor ``frames`` is given, run forever (until :meth:`stop`
is called). Time passed is based on the number of frames that have passed, so
it does not necessarily reflect real time.
:return: the number of seconds/frames left until the timer has been running for
the requested amount of time (or ``None``, if neither were given).
This may be less than ``0`` if ``cb`` took a long time to run.
"""
r = conf.FPS_AVERAGE_RATIO
self.t = 0
self._stopped = False
seconds = kwargs.get('seconds')
frames = kwargs.get('frames')
if seconds is not None:
seconds = max(seconds, 0)
elif frames is not None:
frames = max(frames, 0)
# main loop
t0 = time()
while True:
# call the callback
frame = self.frame
cb(*args)
t_gone = time() - t0
# return if necessary
if self._stopped:
if seconds is not None:
return seconds - t_gone
elif frames is not None:
return frames - t_gone / frame
else:
return None
# check how long to wait until the end of the frame by aiming for a
# rolling frame average equal to the target frame time
frame_t = (1 - r) * self.current_frame_time + r * t_gone
t_left = (frame - frame_t) / r
# reduce wait if we would go over the requested running time
if seconds is not None:
t_left = min(seconds, t_left)
elif frames is not None:
t_left = min(frames * frame, t_left)
# wait
if t_left > 0:
wait(int(1000 * t_left))
t_gone += t_left
frame_t += r * t_left
# update some attributes
t0 += t_gone
self.elapsed = t_gone
self.current_frame_time = frame_t
self.t += t_gone
# return if necessary
if seconds is not None:
seconds -= t_gone
if seconds <= 0:
return seconds
elif frames is not None:
frames -= t_gone / frame
if frames <= 0:
return frames
def stop (self):
"""Stop the current call to :meth:`run`, if any."""
self._stopped = True
class Scheduler (Timer):
"""Frame-based event scheduler.
Scheduler(fps = 60)
:arg fps: frames per second to aim for.
"""
def __init__ (self, fps = 60):
Timer.__init__(self, fps)
self._cbs = {}
self._max_id = 0
def run (self, seconds = None, frames = None):
"""Start the scheduler.
run([seconds][, frames]) -> remain
Arguments and return value are as for :meth:`Timer.run`.
"""
return Timer.run(self, self._update, seconds = seconds,
frames = frames)
def add_timeout (self, cb, seconds=None, frames=None, repeat_seconds=None,
repeat_frames=None):
"""Call a function after a delay.
add_timeout(cb[, seconds][, frames][, repeat_seconds][, repeat_frames])
-> ident
:arg cb: the function to call.
:arg seconds: how long to wait before calling, in seconds (respects changes to
:attr:`Timer.fps`). If passed, ``frames`` is ignored.
:arg frames: how long to wait before calling, in frames (same number of frames
even if :attr:`Timer.fps` changes).
:arg repeat_seconds: how long to wait between calls, in seconds; time is
determined as for ``seconds``. If passed,
``repeat_frames`` is ignored; if neither is passed, the
initial time delay is used between calls.
:arg repeat_frames: how long to wait between calls, in frames (like
``repeat_seconds``).
:return: a timeout identifier to pass to :meth:`rm_timeout`. This is
guaranteed to be unique over time.
Times can be floats, in which case part-frames are carried over, and time
between calls is actually an average over a large enough number of frames.
``cb`` can return a boolean true object to repeat the timeout; otherwise it
will not be called again.
"""
if seconds is not None:
frames = None
elif frames is None:
raise TypeError('expected \'seconds\' or \'frames\' argument')
if repeat_seconds is not None:
repeat_frames = None
elif repeat_frames is None:
repeat_seconds = seconds
repeat_frames = frames
self._cbs[self._max_id] = [seconds, frames, repeat_seconds,
repeat_frames, True, cb]
self._max_id += 1
# ID is key in self._cbs
return self._max_id - 1
def rm_timeout (self, *ids):
"""Remove the timeouts with the given identifiers.
Missing IDs are ignored.
"""
cbs = self._cbs
for i in ids:
if i in cbs:
del cbs[i]
def pause_timeout (self, *ids):
"""Pause the timeouts with the given identifiers."""
cbs = self._cbs
for i in ids:
if i in cbs:
cbs[i][4] = False
def unpause_timeout (self, *ids):
"""Continue the paused timeouts with the given identifiers."""
cbs = self._cbs
for i in ids:
if i in cbs:
cbs[i][4] = True
def _update (self):
"""Handle callbacks this frame."""
cbs = self._cbs
frame = self.frame
# cbs might add/remove cbs, so use items instead of iteritems
for i, data in cbs.items():
if i not in cbs:
# removed since we called .items()
continue
if data[0] is not None:
remain = 0
dt = frame
else:
remain = 1
dt = 1
if data[4]:
data[remain] -= dt
if data[remain] <= 0:
# call callback
if data[5]():
# add on delay
total = data[2] is None
data[total] += data[total + 2]
elif i in cbs: # else removed in above call
del cbs[i]
# else paused
def interp (self, get_val, set_val, t_max = None, bounds = None,
end = None, round_val = False, multi_arg = False,
resolution = None):
"""Vary a value over time.
interp(get_val, set_val[, t_max][, bounds][, end], round_val = False,
multi_arg = False[, resolution]) -> timeout_id
:arg get_val: a function called with the elapsed time in seconds to obtain the
current value. If this function returns ``None``, the
interpolation will be canceled. The ``interp_*`` functions in
this module can be used to construct such functions.
:arg set_val: a function called with the current value to set it. This may
also be an ``(obj, attr)`` tuple to do ``obj.attr = val``.
:arg t_max: if time becomes larger than this, cancel the interpolation.
:arg bounds: a function that takes the value returned from ``get_val`` and
checks if it is outside of some boundaries, and returns the
boundary value ``bdy`` if so (else None). If the value falls out
of bounds, ``set_val`` is called with ``bdy`` and the
interpolation is canceled.
:arg end: used to do some cleanup when the interpolation is canceled (when
``get_val`` returns ``None`` or ``t_max``, ``val_min`` or ``val_max``
comes into effect, but not when the ``rm_timeout`` method is called
with ``timeout_id``). This can be a final value to pass to
``set_val``, or a function to call without arguments. If the
function returns a (non-``None``) value, ``set_val`` is called with
it.
:arg round_val: whether to round the value(s) (see :func:`interp_round` for
details).
:arg multi_arg: whether values should be interpreted as lists of arguments to
pass to ``set_val`` instead of a single argument.
:arg resolution: 'framerate' to update the value at. If not given, the value
is set every frame it changes; if given, this sets an upper
limit on the number of times per second the value may updated.
The current value of :attr:`fps <Timer.fps>` (which may change
over the interpolation) also puts an upper limit on the rate.
:return: an identifier that can be passed to :meth:`rm_timeout` to remove the
callback that continues the interpolation. In this case ``end`` is not
respected.
"""
if round_val:
get_val = interp_round(get_val, round_val)
if not callable(set_val):
obj, attr = set_val
set_val = lambda val: setattr(obj, attr, val)
def timeout_cb ():
if resolution is not None:
update_frame = 1. / resolution
t = 0
dt = 0
last_v = None
done = False
while True:
frame = self.frame
t += frame
dt += frame
if resolution is None or dt >= update_frame:
if resolution is not None:
dt -= update_frame
# perform an update
v = get_val(t)
if v is None:
done = True
# check bounds
elif t_max is not None and t > t_max:
done = True
else:
if bounds is not None:
bdy = bounds(v)
if bdy is not None:
done = True
v = bdy
if v != last_v:
set_val(*v) if multi_arg else set_val(v)
last_v = v
if done:
# canceling for some reason
if callable(end):
v = end()
else:
v = end
# set final value if want to
if v is not None and v != last_v:
set_val(*v) if multi_arg else set_val(v)
yield False
# just in case we get called again (should never happen)
return
else:
yield True
else:
yield True
return self.add_timeout(timeout_cb().next, frames=1)
def interp_simple (self, obj, attr, target, t, end_cb = None,
round_val = False):
"""A simple version of :meth:`interp`.
Varies an object's attribute linearly from its current value to a target value
in a set amount of time.
interp_simple(obj, attr, target, t[, end_cb], round_val = False) -> timeout_id
:arg obj: vary an attribute of this object.
:arg attr: the attribute name of ``obj`` to vary.
:arg target: a target value, in the same form as the current value in the given
attribute (see :func:`call_in_nest`).
:arg t: the amount of time to take to reach the target value, in seconds.
:arg end_cb: a function to call when the target value has been reached.
:arg round_val: whether to round the value(s) (see :func:`interp_round` for
details).
:return: an identifier that can be passed to :meth:`rm_timeout` to remove the
callback that continues the interpolation. In this case ``end_cb`` is
not called.
"""
get_val = interp_linear(getattr(obj, attr), (target, t))
return self.interp(get_val, (obj, attr), end = end_cb,
round_val = round_val)
def _interp_locked (self, interp_fn, *args, **kwargs):
# HACK: Python 2 closures aren't great
timeout_id = [None]
def interp (*args, **kwargs):
if timeout_id[0] is not None:
self.rm_timeout(timeout_id[0])
timeout_id[0] = interp_fn(*args, **kwargs)
return timeout_id[0]
return partial(interp, *args, **kwargs)
def interp_locked (self, *args, **kwargs):
"""Generate a :meth:`interp` wrapper that allows only one running
interpolation.
With each successive call, the current interpolation is aborted and a new one
started.
The wrapper is partially applied using the positional and keyword arguments
passed to this function. Typical usage is as follows::
# create the wrapper that knows how to set values
interp = scheduler.interp_locked(set_val=set_val)
[...]
# call it at some point with an interpolation function
interp(get_val)
[...]
# call it again later with a different interpolation function
interp(get_val2)
# only one interpolation is running
"""
return self._interp_locked(self.interp, *args, **kwargs)
def interp_simple_locked (self, *args, **kwargs):
"""Like :meth:`interp_locked`, but wraps :meth:`interp_simple`."""
return self._interp_locked(self.interp_simple, *args, **kwargs)
def counter (self, t, autoreset=False):
"""Create and return a :class:`Counter` that uses this instance for
timing.
counter(t, autoreset=False) -> new_counter
Arguments are as taken by :class:`Counter`.
"""
return Counter(self, t, autoreset)
class Counter (object):
"""A simple way of counting down to an event.
Counter(scheduler, t, autoreset=False)
:arg scheduler: :class:`Scheduler` instance to use for timing.
:arg t: how long a countdown lasts, in seconds.
:arg autoreset: whether to reset and count down from the beginning again when
the countdown ends. This is only useful with :attr:`cbs` (the
finished state never becomes ``True``).
An instance is boolean ``True`` if the countdown has finished, else ``False``.
The initial state is finished---use :meth:`reset` to start the countdown.
An instance is boolean ``True`` if the countdown has finished, else ``False``.
See also :meth:`Scheduler.counter`.
"""
def __init__ (self, scheduler, t, autoreset=False):
self._scheduler = scheduler
self._t = t
#: As passed to the constructor.
self.autoreset = autoreset
#: ``set`` of functions to call when the countdown ends.
self.cbs = set()
self._timer_id = None
self._finished = True
@property
def t (self):
"""How long a countdown lasts, in seconds.
Changing this resets the countdown (if running).
"""
return self._t
@t.setter
def t (self, t):
self._t = t
if self._timer_id is not None:
self.reset()
def __nonzero__ (self):
return self._finished
def _end_cb (self):
# called when the timeout ends
if not self.autoreset:
self._timer_id = None
self._finished = True
for cb in self.cbs:
cb()
return self.autoreset
def reset (self):
"""Start counting down from the beginning again.
reset() -> self
Starts counting down even if the countdown wasn't already running.
"""
if self._timer_id is not None:
self._scheduler.rm_timeout(self._timer_id)
self._finished = False
self._timer_id = self._scheduler.add_timeout(self._end_cb, self.t)
return self
def cancel (self):
"""Stop counting down and set the finished state to ``False``.
cancel() -> self
"""
if self._timer_id is not None:
self._scheduler.rm_timeout(self._timer_id)
self._timer_id = None
self._finished = False
return self
def finish (self):
"""Stop counting down and set the finished state to ``True``.
finish() -> self
"""
self.cancel()
self._finished = True
return self
def cb (self, *cbs):
"""Add any number of callbacks to :attr:`cbs`.
cb(*cbs) -> self
Callbacks take no arguments.
"""
self.cbs.update(cbs)
return self
def rm_cbs (self, *cbs):
"""Remove any number of callbacks from :attr:`cbs`.
rm_cbs(*cbs) -> self
Missing items are ignored.
"""
self.cbs.difference_update(cbs)
return self
def pause (self):
"""Pause the counter, if running.
pause() -> self
"""
if self._timer_id is not None:
self._scheduler.pause_timeout(self._timer_id)
return self
def unpause (self):
"""Unpause the counter, if paused.
unpause() -> self
"""
if self._timer_id is not None:
self._scheduler.unpause_timeout(self._timer_id)
return self
| gpl-3.0 | -880,562,481,323,498,800 | 33.288382 | 80 | 0.559569 | false |
methoxid/micropystat | stmhal/build-MSTAT01/pins_af.py | 1 | 2478 | PINS_AF = (
('A1', (1, 'TIM2_CH2'), (2, 'TIM5_CH2'), (7, 'USART2_RTS'), (8, 'UART4_RX'), ),
('CS_DAC', (1, 'TIM2_CH3'), (2, 'TIM5_CH3'), (3, 'TIM9_CH1'), (7, 'USART2_TX'), ),
('MX30', (1, 'TIM2_CH4'), (2, 'TIM5_CH4'), (3, 'TIM9_CH2'), (7, 'USART2_RX'), ),
('A4', (5, 'SPI1_NSS'), (6, 'SPI3_NSS'), (7, 'USART2_CK'), ),
('A5', (1, 'TIM2_CH1'), (1, 'TIM2_ETR'), (3, 'TIM8_CH1N'), (5, 'SPI1_SCK'), ),
('MX31', (1, 'TIM1_BKIN'), (2, 'TIM3_CH1'), (3, 'TIM8_BKIN'), (5, 'SPI1_MISO'), (9, 'TIM13_CH1'), ),
('MX20', (1, 'TIM1_CH1N'), (2, 'TIM3_CH2'), (3, 'TIM8_CH1N'), (5, 'SPI1_MOSI'), (9, 'TIM14_CH1'), ),
('B6', (2, 'TIM4_CH1'), (4, 'I2C1_SCL'), (7, 'USART1_TX'), ),
('B7', (2, 'TIM4_CH2'), (4, 'I2C1_SDA'), (7, 'USART1_RX'), ),
('C4', ),
('C5', ),
('C6_PWM', (2, 'TIM3_CH1'), (3, 'TIM8_CH1'), (8, 'USART6_TX'), ),
('PWR_ANALOG', (2, 'TIM3_CH2'), (3, 'TIM8_CH2'), (8, 'USART6_RX'), ),
('B3', (1, 'TIM2_CH2'), (5, 'SPI1_SCK'), (6, 'SPI3_SCK'), ),
('C0', ),
('C1', ),
('C2', (5, 'SPI2_MISO'), ),
('CS_ADC', (5, 'SPI2_MOSI'), ),
('U3RX', (6, 'SPI3_MISO'), (7, 'USART3_RX'), (8, 'UART4_RX'), ),
('C13', ),
('A10', (1, 'TIM1_CH3'), (7, 'USART1_RX'), ),
('A9', (1, 'TIM1_CH2'), (7, 'USART1_TX'), ),
('B8', (2, 'TIM4_CH3'), (3, 'TIM10_CH1'), (4, 'I2C1_SCL'), ),
('B9', (2, 'TIM4_CH4'), (3, 'TIM11_CH1'), (4, 'I2C1_SDA'), (5, 'SPI2_NSS'), ),
('B12_NSS', (1, 'TIM1_BKIN'), (5, 'SPI2_NSS'), (7, 'USART3_CK'), ),
('SCK', (1, 'TIM1_CH1N'), (5, 'SPI2_SCK'), (7, 'USART3_CTS'), ),
('MISO', (1, 'TIM1_CH2N'), (3, 'TIM8_CH2N'), (5, 'SPI2_MISO'), (7, 'USART3_RTS'), (9, 'TIM12_CH1'), ),
('MOSI', (1, 'TIM1_CH3N'), (3, 'TIM8_CH3N'), (5, 'SPI2_MOSI'), (9, 'TIM12_CH2'), ),
('I2C_SCL', (1, 'TIM2_CH3'), (4, 'I2C2_SCL'), (5, 'SPI2_SCK'), (7, 'USART3_TX'), ),
('I2C_SDA', (1, 'TIM2_CH4'), (4, 'I2C2_SDA'), (7, 'USART3_RX'), ),
('B0', (1, 'TIM1_CH2N'), (2, 'TIM3_CH3'), (3, 'TIM8_CH2N'), ),
('MX21', (1, 'TIM1_CH3N'), (2, 'TIM3_CH4'), (3, 'TIM8_CH3N'), ),
('CS_I2C', (2, 'TIM3_CH1'), (5, 'SPI1_MISO'), (6, 'SPI3_MISO'), ),
('A13', ),
('A14', ),
('A15', (1, 'TIM2_CH1'), (1, 'TIM2_ETR'), (5, 'SPI1_NSS'), (6, 'SPI3_NSS'), ),
('SW', (1, 'TIM2_CH1'), (1, 'TIM2_ETR'), (2, 'TIM5_CH1'), (3, 'TIM8_ETR'), (7, 'USART2_CTS'), (8, 'UART4_TX'), ),
('A8', (1, 'TIM1_CH1'), (4, 'I2C3_SCL'), (7, 'USART1_CK'), ),
('B5', (2, 'TIM3_CH2'), (5, 'SPI1_MOSI'), (6, 'SPI3_MOSI'), ),
('MX11', (1, 'TIM1_CH1N'), ),
('MX10', (1, 'TIM1_CH3N'), ),
)
| mit | -662,254,176,372,448,000 | 56.627907 | 115 | 0.432203 | false |
jsfenfen/senate_disbursements | 112_sdoc4/read_pages.py | 1 | 11367 | import re, csv
header_end = re.compile("\s+START\s+END\s+")
five_data_re = re.compile("\s*([\w\d]+)\s+(\d\d\/\d\d\/\d\d\d\d)\s+(.*?)\s+(\d\d\/\d\d\/\d\d\d\d)\s+(\d\d\/\d\d\/\d\d\d\d)\s*(.+?)\s+([\d\.\-\,]+)\s*\Z")
five_data_missing_date = re.compile("\s*([\w\d]+)\s+(\d\d\/\d\d\/\d\d\d\d)\s+(.*?)\s{10,}(.*?)\s+([\d\.\-\,]+)\s*\Z")
three_data_re = re.compile("\s+(\w[\w\,\s\.\-\']+?)\s{10,}(\w.*?)\s{4,}([\d\.\-\,]+)\s*")
top_matter_end_re = re.compile("\s+DOCUMENT\s+NO\.\s+DATE\s+PAYEE")
funding_year_re = re.compile("\s*Funding\s+Year\s+(\d+)")
blank_line_re = re.compile("\s+\Z")
## many page numbers appear to be missing in this one -- there are spaces inserted.
page_number_re = re.compile("\s+\w\s{0,1}\-\s{0,1}\d+")
page_number_alt_re = re.compile("\s+\w\s{0,1}\-\s{0,1}\d\\s{0,1}-\s{0,1}\d+")
continuation_with_amount_re = re.compile("\s*(.+?)\s{10,}([\d\.\-\,]+)\s+\Z")
travel_re = re.compile("\s+TRAVEL\s+AND\s+TRANSPORTATION\s+OF\s+PERSONS\s+")
it_re = re.compile("\s+INTERDEPARTMENTAL\s+TRANSPORTATION\s+")
ocs_re = re.compile("\s+OTHER\s+CONTRACTUAL\s+SERVICES\s+")
acq_re = re.compile("\s+ACQUISITION\s+OF\s+ASSETS\s+")
prsnl_re = re.compile("\s+PERSONNEL\s+BENEFITS\s+")
netpayroll_re = re.compile("\s+NET\s+PAYROLL\s+EXPENSES\s+")
persnl_comp_re = re.compile("\s+PERSONNEL COMP. FULL-TIME PERMANENT\s+")
other_personal_comp = re.compile("\s+OTHER PERSONNEL COMPENSATION\s+")
def is_subtotal(line):
if travel_re.match(line):
return True
if it_re.match(line):
return True
if ocs_re.match(line):
return True
if acq_re.match(line):
return True
if prsnl_re.match(line):
return True
if netpayroll_re.match(line):
return True
if persnl_comp_re.match(line):
return True
if other_personal_comp.match(line):
return True
return False
def compute_break_position(top_matter):
return None
for whole_line in top_matter:
if top_matter_end_re.match(whole_line):
break
if blank_line_re.match(line):
continue
return None
def process_top_matter(page_num, top_matter):
#top_matter_top_left_column_delimiter = compute_break_position(top_matter)
top_matter_top_left_column_delimiter = 48
#return None
expense_description = ''
for whole_line in top_matter:
if top_matter_end_re.match(whole_line):
break
line = whole_line[:top_matter_top_left_column_delimiter]
if blank_line_re.match(line):
continue
result = funding_year_re.match(line)
line_stripped = line.strip()
if line_stripped:
expense_description += ' ' + line_stripped + ' '
expense_description = re.sub( '\s+', ' ', expense_description ).strip()
return expense_description
# some carryover lines have amounts in them, and some don't -- that is, they are just extensions of the text field. See, e.g. p. 1672.
def test_carryover_line(line_offset, line):
# are the first n characters of the line empty ?
line_start = line[:line_offset]
if blank_line_re.match(line_start):
line_end = line[line_offset:]
if not blank_line_re.match(line_end):
#print "***possible continuation: %s" % (line_end)
return True
return False
def process_data_lines(page_num, data_lines):
missing_data = []
return_data = []
return_data_index = 0
# these are lines that describe prior lines--typically the travel associated with a per diem or a transportation line. They aren't processed in this step, but instead just recorded in the one_part_continuation_register, and processed after that.
one_part_continuation_register = []
last_line_data_index = None
for data_line in data_lines:
#print "handling %s %s" % (last_line_data_index, data_line)
if blank_line_re.match(data_line):
# don't reset last line data index--sometimes the page number appears in the middle of a page.
continue
if is_subtotal(data_line):
last_line_data_index = None
#assert False
continue
found_data = five_data_re.match(data_line)
if found_data:
#print found_data.groups()
if found_data:
return_data.append(['five data line', False, page_num] + list(found_data.groups()))
return_data_index += 1
#print "index of text description is: " + str(found_data.start(6))
last_line_data_index = str(found_data.start(6))
#print "Five data---last line data index: %s %s" % (last_line_data_index, found_data.groups())
# we need this to figure out if the next line is a continuation or a sub-header type thing.
else:
#pass
found_data2 = three_data_re.match(data_line)
found_data_missing_date = five_data_missing_date.match(data_line)
if found_data2:
results = list(found_data2.groups())
result_formatted = ['three data line', False, page_num, '', '', results[0], '', '', results[1], results[2]]
return_data.append(result_formatted)
return_data_index += 1
last_line_data_index = None
elif (found_data_missing_date):
#print "found missing date line"
results = list(found_data_missing_date.groups())
result_formatted = ['missing date line', False, page_num, results[0], results[1], results[2], '', '', results[3], results[4]]
return_data.append(result_formatted)
return_data_index += 1
last_line_data_index = None
else:
is_page_num = page_number_re.match(data_line)
is_page_num_alt = page_number_alt_re.match(data_line)
if is_page_num or is_page_num_alt:
continue
if last_line_data_index:
#print "running carryover test with n=%s" % (last_line_data_index)
carryover_found = test_carryover_line(int(last_line_data_index), data_line)
if carryover_found:
continuation_data = continuation_with_amount_re.match(data_line)
if continuation_data:
#print "two part continuation found: '" + continuation_data.group(1) + "'-'" + continuation_data.group(2) + "'"
# it's a two part continuation--probably per diem/travel. So add same data as for the first line.
previous_result = return_data[return_data_index-1]
result_formatted = ['continuation_data', True, previous_result[2], previous_result[3], previous_result[4], previous_result[5], previous_result[6], previous_result[7], continuation_data.group(1), continuation_data.group(2)]
return_data.append(result_formatted)
return_data_index += 1
else:
description = data_line.strip()
#print "one part continuation found: '" + description +"'"
register_data = {'array_index':return_data_index, 'data':description}
one_part_continuation_register.append(register_data)
## annoyingly, these descriptions themselves can span over multiple lines.
## e.g. p. 1557:
# WASHINGTON DC TO CHARLESTON, COLUMBIA, CHARLESTON, COLUMBIA, LEXINGTON,
# CLINTON, SPARTANBURG, GREENVILLE, COLUMBIA, AIKEN, COLUMBIA, CHARLESTON AND RETURN
# RETURN
## append it to previous rows.
else:
print "missing <" + data_line + ">"
missing_data.append({'data':data_line, 'offset':return_data_index,'page_num':page_num })
#if one_part_continuation_register:
#print "one_part_continuation_register: %s" % (one_part_continuation_register)
return {'data':return_data, 'register':one_part_continuation_register, 'missing_data':missing_data}
def find_header_index(line_array):
matches = 0
header_index = None
for index, line in enumerate(line_array):
r = header_end.search(line)
if r:
#print "match: %s: %s" % (index, line)
matches += 1
header_index = index
# break if we don't find exactly one occurrence of this per page.
assert matches == 1
return header_index
start_page = 17
end_page = 2306
#start_page = 1945
#end_page = 2109
page_file_unfilled = "pages/layout_%s.txt"
header_index_hash = {}
csvfile = open("senate_data.csv", 'wb')
datawriter = csv.writer(csvfile)
current_description = None
description = None
missing_data_file = open("missing_data.txt", 'w')
pages_to_process = range(start_page, end_page+1)
for page in pages_to_process:
#print "Processing page %s" % page
filename = page_file_unfilled % (page)
fh = open(filename, 'r')
page_array = []
for line in fh:
page_array.append(line)
header_index = find_header_index(page_array)
# keep stats on where we find the index.
try:
header_index_hash[header_index] += 1
except KeyError:
header_index_hash[header_index] = 1
# This is based on research...
if header_index > 6:
top_matter = page_array[:header_index+1]
description = process_top_matter(page, top_matter)
current_description = description
data_lines = page_array[header_index+1:]
data_found = process_data_lines(page, data_lines)
# get the data lines, and the run-on lines.
data_lines = data_found['data']
one_line_continuation_register = data_found['register']
# run through the continuation lines and append them to the right places.
for cl in one_line_continuation_register:
all_related_lines_found = False
current_line_position = cl['array_index']-1
while all_related_lines_found == False:
data_lines[current_line_position][8] = data_lines[current_line_position][8] + " + " + cl['data']
if data_lines[current_line_position][0] != 'continuation_data':
all_related_lines_found = True
else:
# it's a continuation line, so append this to the previous line too.
current_line_position -= 1
for data in data_lines:
datawriter.writerow([current_description] + data)
if data_found['missing_data']:
missing_data_file.write(str(data_found['missing_data']) + "\n")
for k,v in sorted(header_index_hash.items()):
print k,v
"""
header index frequency:
3 1229
21 114
22 11
23 31
24 32
25 11
26 7
27 5
29 11
30 53
31 175
32 192
33 16
34 31
35 40
"""
| bsd-2-clause | 923,991,653,169,949,300 | 37.144295 | 250 | 0.568664 | false |
Jan-zou/LeetCode | python/Stack/32_longest_valid_parentheses.py | 1 | 1080 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Description:
Given a string containing just the characters '(' and ')', find the length of the longest valid (well-formed) parentheses substring.
For "(()", the longest valid parentheses substring is "()", which has length = 2.
Another example is ")()())", where the longest valid parentheses substring is "()()", which has length = 4.
Tags: Dynamic Programming, String
'''
class Solution(object):
def longestValidParentheses(self, s):
"""
:type s: str
:rtype: int
"""
stack = []
longest, last = 0, -1
for i in xrange(len(s)):
if s[i] == '(':
stack.append(i)
elif not stack:
last = i
else:
stack.pop()
if not stack:
longest = max(longest, i-last)
else:
longest = max(longest, i-stack[-1])
return longest
if __name__ == '__main__':
print Solution().longestValidParentheses(')()())')
| mit | 6,137,508,830,261,811,000 | 27.421053 | 136 | 0.516667 | false |
dramatis/dramatis | lib/dramatis/actor/behavior.py | 1 | 1127 | from __future__ import absolute_import
from logging import warning
import dramatis
from dramatis.actor.name import Name as _Name
from dramatis import Runtime
import dramatis.runtime as runtime
from dramatis.actor.interface import Interface as _Interface
class Metaclass(type):
def __init__(cls,name,bases,dict):
super(Metaclass,cls).__init__(cls,name,bases, dict)
def __call__( cls, *args, **kwds ):
# See also actor comments ...
interface = dramatis.Actor.Interface( None )
behavior = cls.__new__( cls, *args, **kwds )
class actor_class ( behavior.__class__ ):
@property
def actor( cls ):
return interface
behavior.__class__ = actor_class
behavior.__init__( *args, **kwds )
return behavior
class Behavior(object):
"""Class used as the base of actor behavior classes.
Creating an actor behavior does not create an actor but creates an
object that an actor can "become" while still providing the actor
method hook or value-add operations."""
__metaclass__ = Metaclass
| mit | 1,966,169,237,736,875,000 | 31.2 | 70 | 0.640639 | false |
antoviaque/mail | lmtpd/lmtpd.py | 1 | 1531 | #!/usr/bin/python
# Imports ###########################################################
import asyncore
import daemon
from pymongo import Connection
from smtpd import SMTPChannel, SMTPServer
from config import settings
from mail import Mail
# Classes ###########################################################
class LMTPServer(SMTPServer):
def __init__(self):
localaddr = (settings['lmtpd_host'], settings['lmtpd_port'])
SMTPServer.__init__(self, localaddr, None)
self.db_connect()
def db_connect(self):
self.mongo = Connection('{0}:{1}'.format(settings['db_host'], settings['db_port']))
self.db = self.mongo[settings['db_name']]
if settings['db_user'] and settings['db_password']:
self.db.authenticate(settings['db_user'], settings['db_password'])
def process_message(self, peer, mailfrom, rcpttos, payload):
mail = Mail(peer, mailfrom, rcpttos, payload)
self.db.mails.insert(mail.to_python())
# TODO: Threads - cf http://www.jwz.org/doc/threading.html
def handle_accept(self):
conn, addr = self.accept()
LMTPChannel(self, conn, addr)
class LMTPChannel(SMTPChannel):
# LMTP "LHLO" command is routed to the SMTP/ESMTP command
def smtp_LHLO(self, arg):
self.smtp_HELO(arg)
# Main ##############################################################
def start():
with daemon.DaemonContext():
LMTPServer()
asyncore.loop()
if __name__ == '__main__':
start()
| agpl-3.0 | 3,794,836,578,577,613,000 | 26.836364 | 91 | 0.558459 | false |
odbelix/mnTool | ModulosDevice/Configuration/configOption.py | 1 | 3098 | #!/usr/bin/env python
########################################################################
# RecopilaInformationSNMP.py of data base, get device in a network by nmap command and
# get information of device whit snmp tool and cisco OID, updateing the
# information in data base
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
########################################################################
# David Medina Ortiz <[email protected]>
########################################################################
import commands
import sys
import ConfigParser
#create option configuration file
def CreateConfiguration():
cfg = ConfigParser.ConfigParser()
cfg.read(["/etc/mn_Tools/db_configuration.cfg"])#read information params in fiel configuration
print "Configuration Section Data Base?\n<1>Si\n<x>No"
config_db = raw_input(">> ")
#evaluate if wish set section connection
if config_db == "1":
print "Set configuration Host?\n<1>Si\n<x>No"
configure_host = raw_input(">>")
#set configuration host
if configure_host == "1":
print "Input new host"
host = raw_input(">> ")
cfg.set("connection", "host", host)
print "Set configuration name data base?\n<1>Si\n<x>No"
configurate_db = raw_input(">> ")
#set configuration db name
if configurate_db == "1":
print "Input new db name"
db_name = raw_input(">> ")
cfg.set("connection", "data_base", db_name)
print "Set configuration user data base?\n<1>Si\n<x>No"
configuration_user = raw_input(">> ")
#set configuration user db
if configuration_user == "1":
print "Input new name for user db"
user_db = raw_input(">> ")
cfg.set("connection", "user", user_db)
print "Set configuratio password user data base?\n<1>Si\n<x>No"
configuration_pass = raw_input(">> ")
#set configuration password
if configuration_pass == "1":
print "Input new pass for user"
pass_user = raw_input(">> ")
cfg.set("connection", "password", pass_user)
print "Configuration SNMP option?\n<1>Si\n<x>No"
config_snmp = raw_input(">> ")
if config_snmp == "1":
print "Configurate comunity?\n<1>Si\n<x>No"
config_comunity = raw_input(">> ")
#set configuration of comunity
if config_comunity == "1":
print "Input new comunity"
comunity = raw_input(">> ")
cfg.set("SNMP", "comunity", comunity)
file_configuration = open("/etc/mn_Tools/db_configuration.cfg", "w")#create file configuration
cfg.write(file_configuration)
| gpl-2.0 | 4,724,921,019,083,620,000 | 32.673913 | 95 | 0.653325 | false |
gdietz/OpenMEE | common_wizard_pages/ui_bootstrap_page.py | 1 | 4515 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'bootstrap_page.ui'
#
# Created: Wed Oct 16 14:56:05 2013
# by: PyQt4 UI code generator 4.10.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_BootstrapPage(object):
def setupUi(self, BootstrapPage):
BootstrapPage.setObjectName(_fromUtf8("BootstrapPage"))
BootstrapPage.resize(371, 293)
self.verticalLayout_2 = QtGui.QVBoxLayout(BootstrapPage)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(BootstrapPage)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.replicates_spinBox = QtGui.QSpinBox(BootstrapPage)
self.replicates_spinBox.setMinimum(10)
self.replicates_spinBox.setMaximum(100000)
self.replicates_spinBox.setProperty("value", 1000)
self.replicates_spinBox.setObjectName(_fromUtf8("replicates_spinBox"))
self.horizontalLayout.addWidget(self.replicates_spinBox)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label_2 = QtGui.QLabel(BootstrapPage)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout_2.addWidget(self.label_2)
self.plot_path_le = QtGui.QLineEdit(BootstrapPage)
self.plot_path_le.setObjectName(_fromUtf8("plot_path_le"))
self.horizontalLayout_2.addWidget(self.plot_path_le)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.label_3 = QtGui.QLabel(BootstrapPage)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_3.addWidget(self.label_3)
self.plot_title_le = QtGui.QLineEdit(BootstrapPage)
self.plot_title_le.setObjectName(_fromUtf8("plot_title_le"))
self.horizontalLayout_3.addWidget(self.plot_title_le)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.label_4 = QtGui.QLabel(BootstrapPage)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.horizontalLayout_4.addWidget(self.label_4)
self.xlab_le = QtGui.QLineEdit(BootstrapPage)
self.xlab_le.setObjectName(_fromUtf8("xlab_le"))
self.horizontalLayout_4.addWidget(self.xlab_le)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
spacerItem = QtGui.QSpacerItem(20, 131, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.retranslateUi(BootstrapPage)
QtCore.QMetaObject.connectSlotsByName(BootstrapPage)
def retranslateUi(self, BootstrapPage):
BootstrapPage.setWindowTitle(_translate("BootstrapPage", "WizardPage", None))
BootstrapPage.setTitle(_translate("BootstrapPage", "Bootstrap Parameters", None))
BootstrapPage.setSubTitle(_translate("BootstrapPage", "Bootstrap parameters", None))
self.label.setText(_translate("BootstrapPage", "# bootstrap replicates:", None))
self.label_2.setText(_translate("BootstrapPage", "Plot Path:", None))
self.plot_path_le.setText(_translate("BootstrapPage", "./r_tmp/bootstrap.png", None))
self.label_3.setText(_translate("BootstrapPage", "Plot Title:", None))
self.plot_title_le.setText(_translate("BootstrapPage", "Bootstrap Histogram", None))
self.label_4.setText(_translate("BootstrapPage", "horizontal label:", None))
self.xlab_le.setText(_translate("BootstrapPage", "Effect Size", None))
| gpl-3.0 | -7,719,177,139,544,901,000 | 50.306818 | 103 | 0.709856 | false |
sistason/pa3 | src/pa3_recognizer/hw_watchdog/main.py | 1 | 2092 | import urequests
import network
import machine
import ntptime
import utime
class Watchdog:
NETWORK = ('pa-debug', 'ahchaiSaph6oomieN3oo')
def __init__(self):
self.downtime = 0
self.relay_powercut = machine.Pin(14, machine.Pin.OUT)
self.relay_powercut.off()
network.WLAN(network.AP_IF).active(False)
self.sta_if = network.WLAN(network.STA_IF)
self.sta_if.active(True)
self.connect_to_AP()
def run(self):
while self.downtime < 60:
utime.sleep(60)
if self.check_connectivity():
if self.check_pa_data():
self.downtime = 0
self.update_ntp()
continue
else:
self.connect_to_AP()
self.downtime += 1
self.reset_pi()
self.reset_self()
def check_pa_data(self):
ret = urequests.get("http://pa.freitagsrunde.org/api/02")
if ret.status_code == 200:
timestamp = ret.json().get('entries', {}).get('date')
if timestamp is not None and type(timestamp) is int:
return utime.time()-timestamp < 3600
def update_ntp(self):
try:
ntptime.settime()
except OSError:
pass
def reset_pi(self):
self.relay_powercut.on()
utime.sleep(2)
self.relay_powercut.off()
def reset_self(self):
machine.reset()
def check_connectivity(self):
if self.sta_if.isconnected():
ret = urequests.get("http://pa.freitagsrunde.org")
return ret.status_code == 200
def connect_to_AP(self):
if self.sta_if.isconnected():
return True
self.sta_if.connect(*self.NETWORK)
if __name__ == '__main__':
watchdog = None
try:
watchdog = Watchdog()
watchdog.run()
except:
# Be very sure the Pi is on when the watchdog fails
if watchdog is not None:
watchdog.relay_powercut.on()
else:
machine.Pin(14, machine.Pin.OUT).on()
| gpl-3.0 | -5,140,031,006,544,024,000 | 24.82716 | 65 | 0.545889 | false |
jantman/gw2copilot | setup.py | 1 | 4102 | """
setup.py
The latest version of this package is available at:
<https://github.com/jantman/gw2copilot>
################################################################################
Copyright 2016 Jason Antman <[email protected]> <http://www.jasonantman.com>
This file is part of gw2copilot.
gw2copilot is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
gw2copilot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with gw2copilot. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/gw2copilot> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
################################################################################
AUTHORS:
Jason Antman <[email protected]> <http://www.jasonantman.com>
################################################################################
"""
from setuptools import setup, find_packages
from gw2copilot.version import VERSION, PROJECT_URL
with open('README.rst') as file:
long_description = file.read()
# NOTE: when adding dependencies, be sure to add them to
# requirements_docs.txt as well.
requires = [
'requests',
'twisted>=16.0.0,<17.0.0',
'psutil>=4.4.0,<5.0',
'klein>=15.0.0,<16.0.0',
'Jinja2>=2.8.0, <2.9.0',
'autobahn>=0.16.0,<0.17.0',
'pillow>=3.4.0,<4.0.0',
'slimit>=0.8.0,<0.9.0',
'versionfinder>=0.1.0,<0.2.0'
]
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Environment :: Web Environment',
'Framework :: Twisted',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2 :: Only',
# Programming Language :: Python :: 3
# Programming Language :: Python :: 3.0
# Programming Language :: Python :: 3.1
# Programming Language :: Python :: 3.2
# Programming Language :: Python :: 3.3
# Programming Language :: Python :: 3.4
# Programming Language :: Python :: 3.5
# Programming Language :: Python :: 3.6
# Programming Language :: Python :: 3 :: Only
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Games/Entertainment'
]
setup(
name='gw2copilot',
version=VERSION,
author='Jason Antman',
author_email='[email protected]',
packages=find_packages(),
package_data={
'gw2copilot': [
'templates/*.html',
'static/*.*'
]
},
url=PROJECT_URL,
description='gw2copilot is a browser-based "helper" for Guild Wars '
'2, to automate manual tasks that players currently perform '
'out of the game.',
long_description=long_description,
keywords="gw2 guildwars arenanet mumble mumblelink",
classifiers=classifiers,
install_requires=requires,
entry_points="""
[console_scripts]
gw2copilot = gw2copilot.runner:console_entry_point
""",
)
| agpl-3.0 | -4,623,683,879,201,383,000 | 36.290909 | 89 | 0.623111 | false |
fedora-infra/hrf | hrf/hrf.py | 1 | 4008 | from flask import Flask, Response, request, jsonify
import fedmsg.config
import fedmsg.meta
import json
import datetime
import pretty
import requests
from pytz import timezone, UnknownTimeZoneError
meta_config = fedmsg.config.load_config([], None)
fedmsg.meta.make_processors(**meta_config)
app = Flask(__name__)
app.debug = True
def _timestamp(message, user_timezone):
'''Return a dict containing the timestamp in a bunch of formats.'''
ts = message['timestamp']
ts_obj = datetime.datetime.fromtimestamp(ts)
utc = timezone('UTC')
utc_obj = utc.normalize(utc.localize(ts_obj))
localized = utc_obj.astimezone(timezone(user_timezone))
return {
'ago': pretty.date(ts_obj),
'iso': localized.isoformat(),
'usadate': localized.strftime("%m/%d/%Y"),
'fulldate': localized.strftime("%A, %B %d, %Y"),
'time': localized.strftime("%H:%M"),
'epoch': str(ts),
}
meta_methods = {
#'avatars': fedmsg.meta.msg2avatars,
#'emails': fedmsg.meta.msg2emails,
'icon': fedmsg.meta.msg2icon,
'link': fedmsg.meta.msg2link,
'objects': fedmsg.meta.msg2objects,
'packages': fedmsg.meta.msg2packages,
'repr': fedmsg.meta.msg2repr,
'secondary_icon': fedmsg.meta.msg2secondary_icon,
'subtitle': fedmsg.meta.msg2subtitle,
'title': fedmsg.meta.msg2title,
'usernames': fedmsg.meta.msg2usernames,
'timestamp': _timestamp,
'all': str,
}
@app.route("/")
def usage():
methods = '\n'.join([
'/' + name
for name in sorted(meta_methods.keys())
])
return Response(
"""Welcome to hrf - the Human Readable frontend to Fedmsg.
To use hrf, simply POST a list of fedmsg messages from datagrepper to any of the
endpoints below.
The names of the endpoints reflect the names of the fedmsg.meta API
methods. For example POSTing to /title will return fedmsg.meta.msg2title()
and POSTing to /repr will return fedmsg.meta.msg2repr().
If you 'GET' instead, we will query datagrepper on your behalf, sending it your
querystring.
Available endpoints:
%s
""" % methods,
mimetype='text/plain')
@app.route("/<api_method>", methods=['POST', 'GET'])
def route(api_method):
parsed = {}
if request.method == 'GET':
qs = request.query_string
r = requests.get('https://apps.fedoraproject.org/datagrepper/raw/?' + qs)
if r.status_code != 200:
return jsonify({"error": "Datagrepper returned non-200 response code."}), 400
else:
parsed = r.json()['raw_messages']
else:
parsed = json.loads(request.data)
user_timezone = request.args.get('timezone', 'UTC')
# Sigh.
if isinstance(parsed, dict):
parsed = [parsed]
results = []
try:
if api_method not in meta_methods:
return jsonify({"error": "That method was invalid."}), 404
for message in parsed:
if api_method == 'all':
# Return a JSON dict of all HR responses
values = {}
for name in meta_methods.keys():
if name == 'all':
continue
elif name == 'timestamp':
result = meta_methods[name](message, user_timezone)
else:
result = meta_methods[name](message)
if isinstance(result, set):
result = list(result)
values[name] = result
results.append(values)
elif api_method == 'timestamp':
method = meta_methods[api_method]
results.append(method(message, user_timezone))
else:
method = meta_methods[api_method]
results.append(method(message))
except UnknownTimeZoneError as e:
return jsonify({"error": "Invalid timezone parameter."}), 400
else:
return jsonify({'results': results})
if __name__ == "__main__":
app.run()
| lgpl-2.1 | 776,196,798,319,913,500 | 29.830769 | 89 | 0.598303 | false |
juancarlospaco/webutil | main.py | 1 | 17750 | # -*- coding: utf-8 -*-
# PEP8:NO, LINT:OK, PY3:OK
#############################################################################
## This file may be used under the terms of the GNU General Public
## License version 2.0 or 3.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following information to ensure GNU
## General Public Licensing requirements will be met:
## http:#www.fsf.org/licensing/licenses/info/GPLv2.html and
## http:#www.gnu.org/copyleft/gpl.html.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#############################################################################
# metadata
" Ninja Web Util "
__version__ = ' 0.8 '
__license__ = ' GPL '
__author__ = ' juancarlospaco '
__email__ = ' [email protected] '
__url__ = ''
__date__ = ' 15/08/2013 '
__prj__ = ' webutil '
__docformat__ = 'html'
__source__ = ''
__full_licence__ = ''
# imports
from os import path
from sip import setapi
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen # lint:ok
from PyQt4.QtGui import (QLabel, QCompleter, QDirModel, QPushButton, QWidget,
QFileDialog, QDockWidget, QVBoxLayout, QCursor, QLineEdit, QIcon, QGroupBox,
QCheckBox, QGraphicsDropShadowEffect, QGraphicsBlurEffect, QColor, QComboBox,
QApplication, QMessageBox, QScrollArea, QProgressBar)
from PyQt4.QtCore import Qt, QDir
try:
from PyKDE4.kdeui import KTextEdit as QPlainTextEdit
except ImportError:
from PyQt4.QtGui import QPlainTextEdit # lint:ok
from ninja_ide.gui.explorer.explorer_container import ExplorerContainer
from ninja_ide.core import plugin
from css_minifica import *
from html_minifica import *
from js_minifica import *
# API 2
(setapi(a, 2) for a in ("QDate", "QDateTime", "QString", "QTime", "QUrl",
"QTextStream", "QVariant"))
# constans
HELPMSG = '''
<h3>Ninja Web Util</h3>
This is an HTML5/CSS3/JS Optimizer Non-Obfuscating Compressor tool for Ninja.
<ul>
<li>Average compress better than YUI Compressor
<li>The only tool to remove optional HTML tags
<li>The only tool to compress HTML tags
<li>The only tool to compress Percentage/Pixel CSS values
<li>Does Not Obfuscate JS (its a feature or a bug, you decide)
</ul>
<br><br>
''' + ''.join((__doc__, __version__, __license__, 'by', __author__, __email__))
SAMPLE_TEXT = '''
/* -----------------------------------------------------------------------------
####################TEST SAMPLE, THIS COMMENT WILL BE REMOVED###################
----------------------------------------------------------------------------- */
.chun.li {
color: rgb(255, 255, 255);
width: 100%;
height: 1000px;
font-weight: normal;
backgroud: url("example.com/img.gif");
color: #00ff00;
line-height: 0.5;
border: 0px solid yellow;
} ;;
empty.selector.will.be.removed {}
/*--------------------------------------------------------------------------- */
'''
###############################################################################
class Main(plugin.Plugin):
" Main Class "
def initialize(self, *args, **kwargs):
" Init Main Class "
ec = ExplorerContainer()
super(Main, self).initialize(*args, **kwargs)
self.editor_s = self.locator.get_service('editor')
# directory auto completer
self.completer = QCompleter(self)
self.dirs = QDirModel(self)
self.dirs.setFilter(QDir.AllEntries | QDir.NoDotAndDotDot)
self.completer.setModel(self.dirs)
self.completer.setCaseSensitivity(Qt.CaseInsensitive)
self.completer.setCompletionMode(QCompleter.PopupCompletion)
self.group0 = QGroupBox()
self.group0.setTitle(' Source ')
self.source = QComboBox()
self.source.addItems(['Clipboard', 'Local File', 'Remote URL', 'Ninja'])
self.source.currentIndexChanged.connect(self.on_source_changed)
self.infile = QLineEdit(path.expanduser("~"))
self.infile.setPlaceholderText(' /full/path/to/file.html ')
self.infile.setCompleter(self.completer)
self.open = QPushButton(QIcon.fromTheme("folder-open"), 'Open')
self.open.setCursor(QCursor(Qt.PointingHandCursor))
self.open.clicked.connect(lambda: self.infile.setText(str(
QFileDialog.getOpenFileName(self.dock, "Open a File to read from",
path.expanduser("~"), ';;'.join(['{}(*.{})'.format(e.upper(), e)
for e in ['css', 'html', 'js', 'txt', '*']])))))
self.inurl = QLineEdit('http://www.')
self.inurl.setPlaceholderText('http://www.full/url/to/remote/file.html')
self.output = QPlainTextEdit(SAMPLE_TEXT)
vboxg0 = QVBoxLayout(self.group0)
for each_widget in (self.source, self.infile, self.open, self.inurl,
self.output, ):
vboxg0.addWidget(each_widget)
[a.hide() for a in iter((self.infile, self.open, self.inurl))]
self.group1 = QGroupBox()
self.group1.setTitle(' CSS3 ')
self.group1.setCheckable(True)
self.group1.setGraphicsEffect(QGraphicsBlurEffect(self))
self.group1.graphicsEffect().setEnabled(False)
self.group1.toggled.connect(self.toggle_css_group)
self.ckcss1 = QCheckBox('Remove unnecessary Comments')
self.ckcss2 = QCheckBox('Remove unnecessary Whitespace characters')
self.ckcss3 = QCheckBox('Remove unnecessary Semicolons')
self.ckcss4 = QCheckBox('Remove unnecessary Empty rules')
self.ckcss5 = QCheckBox('Condense and Convert Colors from RGB to HEX')
self.ckcss6 = QCheckBox('Condense all Zero units')
self.ckcss7 = QCheckBox('Condense Multidimensional Zero units')
self.ckcss8 = QCheckBox('Condense Floating point numbers')
self.ckcss9 = QCheckBox('Condense HEX Colors')
self.ckcss10 = QCheckBox('Condense multiple adjacent Whitespace chars')
self.ckcss11 = QCheckBox('Condense multiple adjacent semicolon chars')
self.ckcss12 = QCheckBox('Wrap the lines of the to 80 character length')
self.ckcss13 = QCheckBox('Condense Font Weight values')
self.ckcss14 = QCheckBox('Condense the 17 Standard Named Colors values')
self.ckcss15 = QCheckBox('Condense the 124 Extra Named Colors values')
self.ckcss16 = QCheckBox('Condense all Percentages values when posible')
self.ckcss17 = QCheckBox('Condense all Pixels values when posible')
self.ckcss18 = QCheckBox('Remove unnecessary quotes from url()')
self.ckcss19 = QCheckBox('Add standard Encoding Declaration if missing')
vboxg1 = QVBoxLayout(self.group1)
for each_widget in (self.ckcss1, self.ckcss2, self.ckcss3, self.ckcss4,
self.ckcss5, self.ckcss6, self.ckcss7, self.ckcss8, self.ckcss9,
self.ckcss10, self.ckcss11, self.ckcss12, self.ckcss13,
self.ckcss14, self.ckcss15, self.ckcss16, self.ckcss17,
self.ckcss18, self.ckcss19):
vboxg1.addWidget(each_widget)
each_widget.setToolTip(each_widget.text())
self.group2 = QGroupBox()
self.group2.setTitle(' HTML5 ')
self.group2.setCheckable(True)
self.group2.setGraphicsEffect(QGraphicsBlurEffect(self))
self.group2.graphicsEffect().setEnabled(False)
self.group2.toggled.connect(self.toggle_html_group)
self.ckhtml0 = QCheckBox('Condense Style and Script HTML Tags')
self.ckhtml1 = QCheckBox('Condense DOCTYPE to new HTML5 Tags')
self.ckhtml2 = QCheckBox('Condense Href and Src to protocol agnostic')
self.ckhtml4 = QCheckBox('Remove unnecessary Tags but keep HTML valid')
self.help1 = QLabel('''<a href=
"https://developers.google.com/speed/articles/optimizing-html">
<small><center>Help about Unneeded Unnecessary HTML tags ?</a>''')
self.help1.setTextInteractionFlags(Qt.LinksAccessibleByMouse)
self.help1.setOpenExternalLinks(True)
vboxg2 = QVBoxLayout(self.group2)
for each_widget in (self.ckhtml0, self.ckhtml1, self.ckhtml2,
self.ckhtml4, self.help1, ):
vboxg2.addWidget(each_widget)
each_widget.setToolTip(each_widget.text())
self.group3 = QGroupBox()
self.group3.setTitle(' Javascript ')
self.ckjs0 = QCheckBox('Condense and Compress Javascript')
self.ckjs1 = QCheckBox('Condense $(document).ready(function(){ });')
vboxg2 = QVBoxLayout(self.group3)
for each_widget in (self.ckjs0, self.ckjs1):
vboxg2.addWidget(each_widget)
each_widget.setToolTip(each_widget.text())
self.group4 = QGroupBox()
self.group4.setTitle(' General ')
self.chckbx1 = QCheckBox('Lower case ALL the text')
self.chckbx2 = QCheckBox('Remove Spaces, Tabs, New Lines, Empty Lines')
self.befor, self.after = QProgressBar(), QProgressBar()
self.befor.setFormat("%v Chars")
self.after.setFormat("%v Chars")
vboxg4 = QVBoxLayout(self.group4)
for each_widget in (self.chckbx1, self.chckbx2,
QLabel('<b>Before:'), self.befor, QLabel('<b>After:'), self.after):
vboxg4.addWidget(each_widget)
each_widget.setToolTip(each_widget.text())
[a.setChecked(True) for a in iter((self.ckcss1, self.ckcss2,
self.ckcss3, self.ckcss4, self.ckcss5, self.ckcss6, self.ckcss7,
self.ckcss8, self.ckcss9, self.ckcss10, self.ckcss11, self.ckcss12,
self.ckcss13, self.ckcss14, self.ckcss15, self.ckcss16,
self.ckcss17, self.ckcss18, self.ckcss19, self.ckjs1, self.ckhtml0,
self.ckhtml1, self.ckhtml2, self.ckhtml4, self.chckbx1,
self.chckbx2))]
self.button = QPushButton(QIcon.fromTheme("face-cool"), 'Process Text')
self.button.setCursor(QCursor(Qt.PointingHandCursor))
self.button.setMinimumSize(100, 50)
self.button.clicked.connect(self.run)
def must_glow(widget_list):
' apply an glow effect to the widget '
for glow, each_widget in enumerate(widget_list):
try:
if each_widget.graphicsEffect() is None:
glow = QGraphicsDropShadowEffect(self)
glow.setOffset(0)
glow.setBlurRadius(99)
glow.setColor(QColor(99, 255, 255))
each_widget.setGraphicsEffect(glow)
glow.setEnabled(True)
except:
pass
must_glow((self.button, ))
class TransientWidget(QWidget):
' persistant widget thingy '
def __init__(self, widget_list):
' init sub class '
super(TransientWidget, self).__init__()
vbox = QVBoxLayout(self)
for each_widget in widget_list:
vbox.addWidget(each_widget)
tw = TransientWidget((QLabel('<b>HTML5/CSS3/JS Optimizer Compressor'),
self.group0, self.group1, self.group2, self.group3, self.group4,
self.button, ))
self.scrollable = QScrollArea()
self.scrollable.setWidgetResizable(True)
self.scrollable.setWidget(tw)
self.dock = QDockWidget()
self.dock.setWindowTitle(__doc__)
self.dock.setStyleSheet('QDockWidget::title{text-align: center;}')
self.dock.setMinimumWidth(350)
self.dock.setWidget(self.scrollable)
ec.addTab(self.dock, "Web")
QPushButton(QIcon.fromTheme("help-about"), 'About', self.dock
).clicked.connect(lambda: QMessageBox.information(self.dock, __doc__,
HELPMSG))
def run(self):
' run the string replacing '
if self.source.currentText() == 'Local File':
with open(path.abspath(str(self.infile.text()).strip()), 'r') as f:
txt = f.read()
elif self.source.currentText() == 'Remote URL':
txt = urlopen(str(self.inurl.text()).strip()).read()
elif self.source.currentText() == 'Clipboard':
txt = str(self.output.toPlainText()) if str(self.output.toPlainText()) is not '' else str(QApplication.clipboard().text())
else:
txt = self.editor_s.get_text()
self.output.clear()
self.befor.setMaximum(len(txt) + 10)
self.after.setMaximum(len(txt) + 10)
self.befor.setValue(len(txt))
txt = txt.lower() if self.chckbx1.isChecked() is True else txt
txt = condense_style(txt) if self.ckhtml0.isChecked() is True else txt
txt = condense_script(txt) if self.ckhtml0.isChecked() is True else txt
txt = condense_doctype(txt) if self.ckhtml1.isChecked() is True else txt
txt = condense_href_src(txt) if self.ckhtml2 is True else txt
txt = clean_unneeded_tags(txt) if self.ckhtml4.isChecked() is True else txt
txt = condense_doc_ready(txt) if self.ckjs1.isChecked() is True else txt
txt = jsmin(txt) if self.ckjs0.isChecked() is True else txt
txt = remove_comments(txt) if self.ckcss1.isChecked() is True else txt
txt = condense_whitespace(txt) if self.ckcss10.isChecked() is True else txt
txt = remove_empty_rules(txt) if self.ckcss4.isChecked() is True else txt
txt = remove_unnecessary_whitespace(txt) if self.ckcss2.isChecked() is True else txt
txt = remove_unnecessary_semicolons(txt) if self.ckcss3.isChecked() is True else txt
txt = condense_zero_units(txt) if self.ckcss6.isChecked() is True else txt
txt = condense_multidimensional_zeros(txt) if self.ckcss7.isChecked() is True else txt
txt = condense_floating_points(txt) if self.ckcss8.isChecked() is True else txt
txt = normalize_rgb_colors_to_hex(txt) if self.ckcss5.isChecked() is True else txt
txt = condense_hex_colors(txt) if self.ckcss9.isChecked() is True else txt
txt = wrap_css_lines(txt, 80) if self.ckcss12.isChecked() is True else txt
txt = condense_semicolons(txt) if self.ckcss11.isChecked() is True else txt
txt = condense_font_weight(txt) if self.ckcss13.isChecked() is True else txt
txt = condense_std_named_colors(txt) if self.ckcss14.isChecked() is True else txt
# txt = condense_xtra_named_colors(txt) if self.ckcss14.isChecked() is True else txt # FIXME
txt = condense_percentage_values(txt) if self.ckcss16.isChecked() is True else txt
txt = condense_pixel_values(txt) if self.ckcss17.isChecked() is True else txt
txt = remove_url_quotes(txt) if self.ckcss18.isChecked() is True else txt
txt = add_encoding(txt) if self.ckcss19.isChecked() is True else txt
txt = " ".join(txt.strip().split()) if self.chckbx2.isChecked() is True else txt
self.after.setValue(len(txt))
self.output.setPlainText(txt)
self.output.show()
self.output.setFocus()
self.output.selectAll()
def on_source_changed(self):
' do something when the desired source has changed '
if self.source.currentText() == 'Local File':
self.open.show()
self.infile.show()
self.inurl.hide()
self.output.hide()
elif self.source.currentText() == 'Remote URL':
self.inurl.show()
self.open.hide()
self.infile.hide()
self.output.hide()
elif self.source.currentText() == 'Clipboard':
self.output.show()
self.open.hide()
self.infile.hide()
self.inurl.hide()
self.output.setText(QApplication.clipboard().text())
else:
self.output.show()
self.open.hide()
self.infile.hide()
self.inurl.hide()
self.output.setText(self.editor_s.get_text())
def toggle_css_group(self):
' toggle on or off the css checkboxes '
if self.group1.isChecked() is True:
[a.setChecked(True) for a in iter((self.ckcss1, self.ckcss2,
self.ckcss3, self.ckcss4, self.ckcss5, self.ckcss6, self.ckcss7,
self.ckcss8, self.ckcss9, self.ckcss10, self.ckcss11, self.ckcss12,
self.ckcss13, self.ckcss14, self.ckcss15, self.ckcss16,
self.ckcss17, self.ckcss18, self.ckcss19))]
self.group1.graphicsEffect().setEnabled(False)
else:
[a.setChecked(False) for a in iter((self.ckcss1, self.ckcss2,
self.ckcss3, self.ckcss4, self.ckcss5, self.ckcss6, self.ckcss7,
self.ckcss8, self.ckcss9, self.ckcss10, self.ckcss11, self.ckcss12,
self.ckcss13, self.ckcss14, self.ckcss15, self.ckcss16,
self.ckcss17, self.ckcss18, self.ckcss19))]
self.group1.graphicsEffect().setEnabled(True)
def toggle_html_group(self):
' toggle on or off the css checkboxes '
if self.group2.isChecked() is True:
[a.setChecked(True) for a in iter((self.ckhtml0, self.ckhtml1,
self.ckhtml2, self.ckhtml4))]
self.group2.graphicsEffect().setEnabled(False)
else:
[a.setChecked(False) for a in iter((self.ckhtml0, self.ckhtml1,
self.ckhtml2, self.ckhtml4))]
self.group2.graphicsEffect().setEnabled(True)
###############################################################################
if __name__ == "__main__":
print(__doc__)
| gpl-3.0 | -5,969,184,844,555,903,000 | 45.103896 | 134 | 0.614817 | false |
michael42/androidcrypt.py | androidcrypt.py | 1 | 20341 | #!/usr/bin/python
"""
androidcrypt.py allows access to Android's encrypted partitions from a
recovery image.
Copyright (C) 2012 Michael Zugelder
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import subprocess
import sys
import os
import uu
import getpass
import binascii
from io import BytesIO
import cryptfooter
import aes
from pbkdf2 import pbkdf2_bin
# constants from vold/cryptfs.h
HASH_COUNT = 2000
KEY_LEN_BYTES = 16
IV_LEN_BYTES = 16
ADB_LINE_ENDINGS = None
FOOTER = None
MASTER_KEY = None
def main():
if not check_adb(): return
if not check_recovery(): return
if not check_dmcrypt_support(): return
if not check_dmsetup(): return
fstab_entries = get_fstab_entries()
if not fstab_entries: return
encrypted_filesystems = get_encrypted_filesystems(fstab_entries)
if not encrypted_filesystems: return
for fstab_entry in encrypted_filesystems:
if not setup_mapping(fstab_entry): return
if not mount_dmcrypt(fstab_entry): return
def check_adb():
print_progress('Checking if adb is available... ')
try:
version = subprocess.check_output(['adb', 'version'])
except OSError as e:
print_error(str(e) + '\n'
'Please make sure you have the Android SDK installed '
'and correctly set up the $PATH environment variable.')
return
print_info('found {}'.format(version.strip()))
return True
def check_recovery():
print_progress('Looking for a device in recovery mode... ')
adb_devices = subprocess.check_output(['adb', 'devices'])
devices = adb_devices.splitlines()[1:-1]
if len(devices) == 0:
print_error("No android devices found. Check 'adb devices' output.")
return
elif len(devices) > 1:
print_error('More than one device connected. This is not supported '
'yet, please connect only a single device.')
return
device = devices[0]
devid, _, state = device.partition('\t')
if state != 'recovery':
print_error(("Device '{}' is in '{}' state, please reboot into "
"recovery.").format(devid, state))
return
print_info('found {}'.format(devid))
return True
def check_dmcrypt_support():
if not check_kernel_config(): return
if not check_proc_crypto(): return
return True
def check_kernel_config():
required = ['CONFIG_DM_CRYPT', 'CONFIG_CRYPTO_AES',
'CONFIG_CRYPTO_CBC', 'CONFIG_CRYPTO_SHA256']
print_progress('Getting kernel config... ')
try:
config = adb_shell('zcat /proc/config.gz')
print_info('ok')
except AdbShellException:
print_info('could not load config, skipping checks')
return True
config_lines = config.splitlines()
def contains_config_line(config_lines, config):
for line in config_lines:
if line.startswith(req + '='):
return True
return False
for req in required:
print_progress('Checking the kernel for {}... '.format(req))
if contains_config_line(config_lines, req):
print_info('ok')
else:
print_error("The recovery kernel doesn't support the necessars "
"crypto features. You could try to boot an updated "
"version of the recovery with fastboot.")
return
return True
def check_proc_crypto():
print_progress('Getting /proc/crypto... ')
try:
crypto = adb_shell('grep name /proc/crypto | cut -d: -f2')
print_info('ok')
except AdbShellException as e:
print_error('could not get crypto support data: ' + e.output)
return
names = [ name.strip() for name in crypto.splitlines() ]
required_names = ['aes', 'sha256']
for required in required_names:
print_progress("Checking if '{}' is available... ".format(required))
if required in names:
print_info('ok')
else:
print_error('Required crypto mode not detected.')
return
# It seems that /proc/crypto does not initially supply the complete list
# of all supported crypto modes. Directly after booting, there is no
# 'cbc(aes)', but after setting up the mapping with dmsetup, it is
# listed, despite not loading any new kernel modules.
print_progress("Checking if 'cbc(aes)' is available... ")
if 'cbc(aes)' in names:
print_info('ok')
else:
# try it nevertheless
print_info('ignoring')
return True
def check_dmsetup(auto_install = True):
print_progress('Checking if dmsetup exists... ')
try:
adb_shell('[ -f "$(which dmsetup)" ]')
print_info('binary found')
except:
if auto_install:
print_info('not found')
return install_dmsetup()
else:
print_error("Binary was copied but still doesn't exist")
return
if not chmod_dmsetup(): return
print_progress('Checking dmsetup version... ')
try:
version = adb_shell('dmsetup --version')
except AdbShellException as e:
print_error(str(e))
return
lines = version.splitlines()
try:
library = lines[0].split(':')[1].strip()
driver = lines[1].split(':')[1].strip()
print_info('lib: {}, driver: {}'.format(library, driver))
return True
except Exception as e:
print_error(str(e) + '\n'
'Output was:\n' + version)
def install_dmsetup():
print_progress('Installing dmsetup binary... ')
try:
pushed = subprocess.check_output(
['adb', 'push', 'dmsetup', '/sbin/'],
stderr=subprocess.STDOUT)
print_info(pushed.strip())
return check_dmsetup(auto_install = False)
except subprocess.CalledProcessError as e:
print_error('adb push reported the following error:')
print(e.output)
return
def chmod_dmsetup():
""""
Makes dmsetup executable, this is necessary when the source file was not
executable to begin with, as on Windows systems or when the executable
bit got lost.
"""
print_progress('Checking dmsetup permissions... ')
try:
adb_shell('[ -x "$(which dmsetup)" ] || chmod +x "$(which dmsetup)"')
except AdbShellException as e:
print_error('Could not make dmsetup executable, reason:\n' + str(e))
return
print_info('success')
return True
class FstabEntry():
def __init__(self, line):
try:
fields = line.split()
self.block_dev = fields[0]
self.block_dev_name = os.path.basename(self.block_dev)
self.mount_point = fields[1]
self.fs = fields[2]
except:
raise Exception('Malformed fstab line: ' + line)
def __str__(self):
return '{} -> {} ({})' \
.format(self.block_dev_name, self.mount_point, self.fs)
def get_fstab_entries():
print_progress('Getting partition config... ')
fstab = adb_shell('cat /etc/fstab')
try:
fstab_entries = [FstabEntry(line) for line in fstab.splitlines()]
except Exception as e:
print_error(e)
return
name_set = set([e.block_dev_name for e in fstab_entries])
if len(name_set) < len(fstab_entries):
print_error('There are duplicate block device names.')
return
print_info('found {} partitions:'.format(len(fstab_entries)))
encrypted, ignored = filter_encrypted_fstab_entries(fstab_entries)
for entry in ignored:
print(' {}, ignoring'.format(entry))
for entry in encrypted:
print(' {}, potentially encrypted'.format(entry))
return encrypted
def filter_encrypted_fstab_entries(fstab):
names = ['userdata', 'media']
mount_points = ['/data', '/sdcard']
interesting = [e for e in fstab
if e.block_dev_name in names or e.mount_point in mount_points]
ignored = [e for e in fstab if not e in interesting]
return interesting, ignored
def get_encrypted_filesystems(fstab_entries):
encrypted_filesystems = []
for fstab_entry in fstab_entries:
encrypted = check_if_encrypted(fstab_entry)
if encrypted == True:
encrypted_filesystems.append(fstab_entry)
elif encrypted == False:
pass
else:
# an error from check_if_encrypted
return
return encrypted_filesystems
def check_if_encrypted(fstab_entry):
print_progress('Trying to mount {}... '.format(fstab_entry.mount_point))
mounts = adb_shell('cat /proc/mounts')
mount_entries = [ line.split() for line in mounts.splitlines() ]
mount_entry = [ e for e in mount_entries
if e[1] == fstab_entry.mount_point ]
if mount_entry:
print_info('already mounted')
return False
try:
mount(fstab_entry.block_dev, fstab_entry.mount_point)
print_info('success -> not encrypted')
return False
except Exception as e:
if str(e) == "Invalid argument":
print_info("error -> probably encrypted")
return True
else:
print_error(e)
return None # unknown error, don't continue
def setup_mapping(fstab_entry):
global FOOTER # for crypt_type_name
global MASTER_KEY
if not FOOTER:
FOOTER = block_dev_get_crypto_footer(fstab_entry.block_dev)
if FOOTER == None: return
if not FOOTER:
print_info('not found, looking in /efs')
if not setup_efs(): return
FOOTER = load_footer_from_efs()
if not FOOTER: return
if not MASTER_KEY:
MASTER_KEY = decrypt_master_key(FOOTER.encrypted_master_key,
FOOTER.salt)
return dmsetup_create(fstab_entry.block_dev, fstab_entry.block_dev_name,
FOOTER.crypt_type_name, MASTER_KEY)
def decrypt_master_key(encrypted_master_key, salt):
passphrase = getpass.getpass("Passphrase: ")
key, iv = get_key_and_iv(passphrase, salt)
return aes128_cbc_decrypt(encrypted_master_key, key, iv)
def dmsetup_create(source_device, target_name, crypto_algorithm, master_key):
print_progress('Calling dmcrypt to set up device... ')
size = block_dev_get_size_in_512_bytes(source_device)
keystr = binascii.hexlify(master_key)
table = '0 %d crypt %s %s 0 %s 0' \
% (size, crypto_algorithm, keystr, source_device)
cmd = "dmsetup create {} --table '{}'".format(target_name, table)
try:
adb_shell(cmd)
except AdbShellException as e:
print_error('Error calling dmsetup, output was:\n' + e.output)
return
print_info('success')
return True
def get_key_and_iv(passphrase, salt):
keyiv = pbkdf2_bin(passphrase, salt,
iterations = HASH_COUNT, keylen = 32)
key = keyiv[0:KEY_LEN_BYTES]
iv = keyiv[KEY_LEN_BYTES:IV_LEN_BYTES+KEY_LEN_BYTES]
return key, iv
def aes128_cbc_decrypt(data, key, iv):
moo = aes.AESModeOfOperation()
cbc = moo.modeOfOperation["CBC"]
aes128 = moo.aes.keySize["SIZE_128"]
def str_to_list(s): return map(ord, s)
data = str_to_list(data)
key = str_to_list(key)
iv = str_to_list(iv)
return moo.decrypt(data, 16, cbc, key, aes128, iv)
def load_footer_from_efs():
print_progress('Loading footer file from /efs... ')
footer_files = adb_shell("find /efs -name '*_footer'").splitlines()
if len(footer_files) == 0:
print_error('No footers found.')
return
elif len(footer_files) > 1:
print_error('Multiple footers ({}) found, not yet supported.' \
.format(footer_files))
return
footer_file = footer_files[0]
footer_text = adb_shell('cat {} | uuencode -'.format(footer_file))
footer_bytes = BytesIO()
uu.decode(BytesIO(footer_text), footer_bytes)
footer_bytes.seek(0)
try:
footer = cryptfooter.CryptFooter(footer_bytes)
except cryptfooter.ValidationException as e:
print_error(e.message)
print_info('success')
return footer
def block_dev_get_crypto_footer(block_dev):
"""
Looks for a crypto footer at the end of a block device and returns the
footer object if there is one.
If there is not footer, False is returned.
If there were any errors, None is returned
"""
shortname = os.path.basename(block_dev)
print_progress('Checking if {} has a crypto footer... '.format(shortname))
size = block_dev_get_size_in_512_bytes(block_dev)
if not size: return
if size*512 < 16*1024:
print_error('Size of {} is just {} bytes.'.format(size*512))
return
# FIXME busybox seems to be compiled without large file support and fails
# to supply sane data at the end of partitions larger than 2 GiB.
skip = size - 16*1024/512
footer_text = adb_shell(('dd if={} bs=512 count=32 skip={} 2>/dev/null'
'| uuencode -')
.format(block_dev, skip))
footer_bytes = BytesIO()
uu.decode(BytesIO(footer_text), footer_bytes)
footer_bytes.seek(0)
try:
return cryptfooter.CryptFooter(footer_bytes)
except cryptfooter.ValidationException as e:
return False
def block_dev_get_size_in_512_bytes(block_dev):
# block_dev is probably symlink, but the real device name is needed to
# get the size from /sys/block
real_dev = follow_symlink(block_dev)
# remove the /dev prefix
shortname = os.path.basename(real_dev)
# now just use a crude hack to get around the annoying partition naming
# (sda1 vs. mmcblk0[p]1)
try:
return long(adb_shell('cat < $(find /sys|grep {}/size)'.format(shortname)))
except AdbShellException as e:
print_error(('Could not get the size of {}.\n'
'Error output:\n' + e.output).format(shortname))
def follow_symlink(link):
return adb_shell("readlink -f '{}'".format(link))
def setup_efs():
print_progress('Checking /efs mount point... ')
try:
adb_shell('[ -d /efs ] || mkdir /efs')
except AdbShellException as e:
print_error('Could not create /efs directory.\n' + e.output)
return
mtab = adb_shell('cat /proc/mounts')
mtab_entries = [line.split() for line in mtab.splitlines()]
efs_entry = [ e for e in mtab_entries if e[1] == "/efs" ]
if efs_entry:
print_info('is already mounted')
return True
else:
print_info('not mounted')
dev = get_efs_block_device()
if not dev: return
return mount_efs(dev)
def mount_efs(name):
print_progress('Trying to mount /efs... ')
if name.startswith('/'):
block_dev = name
elif name.startswith('mtd'):
block_dev = '/dev/block/mtdblock{}'.format(name[3:])
else:
print_error("could not get device path from name '{}'".format(name))
return
print_progress('from {}... '.format(block_dev))
try:
mount(block_dev, '/efs', options = 'ro')
except Exception as e:
print_error(e)
return
print_info('success')
return True
def get_efs_block_device():
blk_dev = scan_etc_fstab()
if blk_dev: return blk_dev
blk_dev = scan_etc_recovery_fstab()
if blk_dev: return blk_dev
blk_dev = scan_proc_mtd()
if blk_dev: return blk_dev
print_error('Could not find the device that is mounted to /efs.')
def scan_proc_mtd():
print_progress('Looking into /proc/mtd... ')
mtd = adb_shell('cat /proc/mtd')
mtd_entries = [ line.split() for line in mtd.splitlines() ]
efs_entry = [ e[0].rstrip(':') for e in mtd_entries if e[3] == '"efs"' ]
if efs_entry:
efs_entry = efs_entry[0]
print_info('found it: {}'.format(efs_entry))
return efs_entry
else:
print_info('not listed')
#def scan_sys_devices():
# print_progress('Brute force /sys/devices search... ')
# name = adb_shell("find /sys/devices -name 'name'|xargs grep -l ^efs$"
# "; true") # always exit code 123?
def scan_etc_fstab():
print_progress('Looking into /etc/fstab... ')
fstab = adb_shell('cat /etc/fstab')
fstab_entries = [line.split() for line in fstab.splitlines()
if not line.strip().startswith('#')
if not len(line.strip()) == 0]
efs_entry = [ e for e in fstab_entries if e[1] == "/efs" ]
if efs_entry:
dev = efs_entry[0][0]
print_info('found it: {}'.format(dev))
return dev
else:
print_info('not listed')
def scan_etc_recovery_fstab():
print_progress('Looking into /etc/recovery.fstab... ')
rfstab = adb_shell('cat /etc/recovery.fstab')
rfstab_entries = [line.split() for line in rfstab.splitlines()
if not line.strip().startswith('#')
if not len(line.strip()) == 0]
efs_entry = [ e for e in rfstab_entries if e[0] == "/efs" ]
if efs_entry:
dev = efs_entry[0][2]
print_info('found it: {}'.format(dev))
return dev
else:
print_info('not listed')
def mount_dmcrypt(fstab_entry):
name = fstab_entry.block_dev_name
mount_point = fstab_entry.mount_point
print_progress('Mounting {} on {}... '.format(name, mount_point))
try:
mount('/dev/mapper/{}'.format(name), fstab_entry.mount_point)
except Exception as e:
print_error('Could not mount decrypted device. This most likely '
'means you got the passphrase wrong.\n'
'Output: '+ str(e))
return
print_info('SUCCESS')
return True
def mount(block_dev, mount_point, options = None, fstype = None):
flag_options = '-o {}'.format(options) if options else ''
flag_type = '-t {}'.format(fstype) if fstype else ''
try:
adb_shell('mount {} {} {} {}' \
.format(flag_options, flag_type, block_dev, mount_point))
return True
except AdbShellException as e:
# mount: mounting /... on /... failed: Device or resource busy
parts = e.output.split(':')
raise Exception(parts[2].strip())
class AdbShellException(Exception):
def __init__(self, exit_code, output):
self.exit_code = exit_code
self.output = output
def __str__(self):
return "exit code={}, output={}" \
.format(self.exit_code, repr(self.output));
def adb_shell_init():
"""
Print an empty string to see what adb outputs, because it varies between
operating systems. On Windows, the line delimiter seems to be '\r\r\n'.
"""
global ADB_LINE_ENDINGS
if ADB_LINE_ENDINGS:
raise Exception('adb shell already initialized')
else:
ADB_LINE_ENDINGS = subprocess.check_output(['adb', 'shell', 'echo'])
def adb_shell(cmd):
if not ADB_LINE_ENDINGS: adb_shell_init()
cmd = '({}); RET=$?; echo; echo $RET'.format(cmd)
raw = subprocess.check_output(['adb', 'shell', cmd])
lines = raw.split(ADB_LINE_ENDINGS)
exit_code = int(lines[-2])
output = '\n'.join(lines[:-3])
if exit_code == 0:
return output
else:
raise AdbShellException(exit_code, output)
def print_progress(action):
sys.stdout.write(str(action))
sys.stdout.flush()
def print_error(error):
sys.stderr.write('error\n')
sys.stderr.write(str(error) + '\n')
sys.stderr.flush()
def print_info(status):
sys.stdout.write(str(status) + '\n')
sys.stdout.flush()
if __name__ == '__main__':
main()
| gpl-2.0 | -4,168,781,029,270,006,000 | 29.726586 | 83 | 0.610049 | false |
arcosta/sci-synergy | code/web/scisynergy_flask/quiz_controller.py | 1 | 1310 | """
Created on 01/10/2018
@author: aurelio
"""
from flask import render_template, request, session, redirect, url_for
from scisynergy_flask import app
from .models import Researcher
from flask.helpers import make_response
def insert_answer(userid, idx, form):
pass
@app.route('/questionario', methods=['GET', 'POST'])
@app.route('/quiz', methods=['GET', 'POST'])
def quiz():
if request.method == 'POST':
for i in request.form.keys():
# g.db.insert_answer(1, i, request.form[i])
userid = request.cookies.get('userid')
if userid is None:
userid = 1
insert_answer(userid, i, request.form[i])
return render_template('thanks.html')
else:
userid = request.cookies.get('userid')
if userid is not None:
r = Researcher().find(userid)
return render_template('quiz.html', user=r)
else:
return render_template('quiz.html')
@app.route('/startquiz')
def startquiz():
idx = request.args.get('id')
r = Researcher().find(idx)
if r is not None:
resp = make_response(render_template('index.html', name=r.name))
resp.set_cookie('userid', str(r.userid))
return resp
else:
return render_template('index.html', name=None)
| mit | -6,686,533,588,751,310,000 | 25.734694 | 72 | 0.607634 | false |
arthurlogilab/taurus | tests/modules/test_GrinderExecutor.py | 1 | 3707 | '''
Created on Mar 23, 2015
@author: Coeurl
'''
import shutil
import os
from tests import setup_test_logging, BZTestCase, __dir__
from bzt.modules.grinder import GrinderExecutor
from tests.mocks import EngineEmul
from bzt.utils import BetterDict
setup_test_logging()
class TestGrinderExecutor(BZTestCase):
def test_install_Grinder(self):
path = os.path.abspath(__dir__() + "/../../build/tmp/grinder-taurus/lib/grinder.jar")
shutil.rmtree(os.path.dirname(os.path.dirname(path)), ignore_errors=True)
grinder_link = GrinderExecutor.DOWNLOAD_LINK
grinder_version = GrinderExecutor.VERSION
GrinderExecutor.DOWNLOAD_LINK = "file:///" + __dir__() + "/../data/grinder-{version}_{version}-binary.zip"
GrinderExecutor.VERSION = "3.11"
self.assertFalse(os.path.exists(path))
obj = GrinderExecutor()
obj.engine = EngineEmul()
obj.settings.merge({"path": path})
obj.execution = BetterDict()
obj.execution.merge({"scenario": {
"script": "tests/grinder/helloworld.py",
"properties-file": "tests/grinder/grinder.properties",
"properties": {"grinder.useConsole": "false"}}})
obj.prepare()
self.assertTrue(os.path.exists(path))
obj.prepare()
GrinderExecutor.DOWNLOAD_LINK = grinder_link
GrinderExecutor.VERSION = grinder_version
def test_grinder_widget(self):
obj = GrinderExecutor()
obj.engine = EngineEmul()
obj.execution.merge({"scenario": {"script": "tests/grinder/helloworld.py"}})
obj.prepare()
obj.get_widget()
self.assertEqual(obj.widget.script_name.text, "Script: helloworld.py")
def test_resource_files_collection_remote(self):
obj = GrinderExecutor()
obj.engine = EngineEmul()
obj.execution.merge({"scenario": {"script": "tests/grinder/helloworld.py",
"properties-file": "tests/grinder/grinder.properties"}})
res_files = obj.resource_files()
artifacts = os.listdir(obj.engine.artifacts_dir)
self.assertEqual(len(res_files), 2)
self.assertEqual(len(artifacts), 2)
def test_resource_files_collection_local(self):
obj = GrinderExecutor()
obj.engine = EngineEmul()
obj.execution.merge({"scenario": {"script": "tests/grinder/helloworld.py",
"properties-file": "tests/grinder/grinder.properties"}})
obj.prepare()
artifacts = os.listdir(obj.engine.artifacts_dir)
self.assertEqual(len(artifacts), 2)
def test_resource_files_collection_invalid(self):
obj = GrinderExecutor()
obj.engine = EngineEmul()
obj.execution.merge({"scenario": {"script": "tests/grinder/helloworld.py",
"properties-file": "tests/grinder/grinder_invalid.properties"}})
res_files = obj.resource_files()
artifacts = os.listdir(obj.engine.artifacts_dir)
self.assertEqual(len(res_files), 2)
self.assertEqual(len(artifacts), 2)
self.assertIn("helloworld.py", open(os.path.join(obj.engine.artifacts_dir,
"grinder_invalid.properties")).read())
def test_resource_files_collection_noscript(self):
obj = GrinderExecutor()
obj.engine = EngineEmul()
obj.execution.merge({"scenario": {"properties-file": "tests/grinder/grinder.properties"}})
res_files = obj.resource_files()
artifacts = os.listdir(obj.engine.artifacts_dir)
self.assertEqual(len(res_files), 2)
self.assertEqual(len(artifacts), 2)
| apache-2.0 | 302,198,232,061,096,640 | 38.860215 | 114 | 0.622336 | false |
Royce/GammaJS | support/web/cwf/urls/section.py | 1 | 8769 | from django.conf.urls.defaults import include, patterns
from django.views.generic.simple import redirect_to
class Section(object):
def __init__(self, name, obj=None, target=None, redirectTo=None,
match=None, values=None, new=None, valuesAsSet=True, compareFunc=None,
needsAuth=False, perms=None, display=True, alias=None,
parent=None, package=None, root=False, active=True, sortByAlias=True,
extraContext=None, condition=None):
self.contents = []
self.contentsDict = {}
self.url = '/'
#set everything passed in to a self.xxx attribute
import inspect
args, _, _, _ = inspect.getargvalues(inspect.currentframe())
for arg in args:
setattr(self, arg, locals()[arg])
if not self.alias:
self.alias = self.name.capitalize()
if hasattr(self, 'setup'):
self.setup()
def rootAncestor(self):
if self.parent:
return self.parent.rootAncestor()
else:
return self
########################
### UTILITY
########################
def show(self):
parentShow = True
if self.parent:
parentShow = self.parent.show()
if parentShow:
if self.condition:
if callable(self.condition):
return self.condition()
else:
return self.condition
else:
return True
else:
return False
def appear(self):
return self.display and self.show()
def getSects(self, section):
if callable(section):
for sect in section():
if sect:
yield sect
elif type(section) in (list, tuple):
for sect in section:
if sect:
yield sect
else:
if section:
yield section
########################
### MENU STUFF
########################
def getChildrenMenu(self, *args, **kwargs):
if any(part for part in self.contents):
return self.childrenMenuGen(*args, **kwargs)
else:
return None
def childrenMenuGen(self, request, path, used):
for part in self.contents:
if type(part) is tuple:
part, _ = part
for p in part.getMenu(request, path, used):
yield p
def getMenu(self, request, path, used):
selected = False
resultUsed = used
if self.values:
#determine values
valuesToUse = list(value for value in self.values(used.split('/')))
else:
valuesToUse = None
if valuesToUse and any(value for value in valuesToUse):
if self.valuesAsSet:
valuesToUse = set(valuesToUse)
def getValues(values):
if self.new:
values = [self.new(path, value) for value in values]
else:
values = [(value, value) for value in values]
return values
if self.compareFunc:
if self.sortByAlias:
valuesToUse = getValues(valuesToUse)
valuesToUse = sorted(valuesToUse, self.compareFunc)
else:
valuesToUse = sorted(valuesToUse, self.compareFunc)
valuesToUse = getValues(valuesToUse)
else:
valuesToUse = getValues(valuesToUse)
#for all values, create items in the menu
for alias, match in valuesToUse:
url = '%s/%s' % (used, match)
args = [alias, url, self]
#determine if this item has been selected
if len(path) != 0:
if unicode(match).lower() == path[0].lower():
selected = True
resultUsed += '/%s' % path[0]
else:
selected = False
args += [selected]
#If there is a chance of children, add the generator function, otherwise add nothing
if any(part for part in self):
args += [self.getChildrenMenu(request, path[1:], resultUsed)]
else:
args += [None]
yield args
else:
if not self.values:
for p in self.singleFillMenu(request, path, used):
yield p
if len(path) != 0:
url = '%s/%s' % (used, path[0])
if not self.parent:
gen = self.getChildrenMenu(request, path[1:], url)
if gen:
for p in gen:
yield p
def singleFillMenu(self, request, path, used):
url = '%s/%s' % (used, self.name)
selected = False
if hasattr(self, 'base'):
args = [self.base.alias, url, self.base]
if len(path) != 0:
if unicode(self.name) == path[0]:
selected = True
#Make sure the base item isn't selected unnecessarily
if not self.parent:
if path[-1] == '' and len(path) > 2:
selected = False
elif path[-1] != '' and len(path) > 1:
selected = False
args += [selected]
if self.parent:
args += [self.getChildrenMenu(request, path[1:], url)]
else:
args += [[]]
else:
if len(path) != 0:
if unicode(self.name) == path[0]:
selected = True
args = [self.alias, url, self, selected]
if selected:
args += [self.getChildrenMenu(request, path[1:], url)]
else:
args += [[]]
yield args
########################
### URL PATTERNS
########################
def getPatterns(self, includesOnly=False):
l = []
for part in self.getPatternList():
l.append(part)
return patterns('', *l)
def getPattern(self, name, includeAs=None):
l = []
for p in self.contentsDict[name].getInclude(includeAs):
l.append(p)
return patterns('', *l)
def getPatternList(self, isBase=False):
if self.redirectTo:
yield ('^%s$' % self.url, redirect_to, {'url' : str(self.redirectTo)})
else:
if hasattr(self, 'base'):
if hasattr(self.base, 'getInclude'):
for p in self.base.getInclude(base=True):
yield p
else:
for p in self.base.getPatternList(isBase=True):
yield p
for part in self.contents:
if type(part) is tuple:
part, includeAs = part
if hasattr(part, 'getInclude'):
for p in part.getInclude(includeAs):
yield p
else:
for p in part.getPatternList():
yield p
########################
### SPECIAL
########################
def __iter__(self):
if hasattr(self, 'base'):
if hasattr(self.base, 'part'):
for s in self.base:
yield s
else:
yield self.base
for sect in self.contents:
if type(sect) is tuple:
section, _ = sect
else:
section = sect
if hasattr(section, 'part'):
for s in section:
yield s
else:
yield section
def __getitem__(self, key):
return self.contentsDict[key]
def __unicode__(self):
return "<CWF Section %s : %s : %s>" % (self.name, self.alias, self.url)
def __repr__(self):
return unicode(self)
| mit | 8,235,032,723,141,327,000 | 31.120879 | 100 | 0.430608 | false |
TheEndarkenedOne/PrussianBlue | src/PrussianBlue/ccproto.py | 1 | 13774 | #!/usr/bin/env python
#
# This file is part of PrussianBlue.
# Copyright 2016, William Ewing. All rights reserved.
#
# PrussianBlue is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PrussianBlue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PrussianBlue. If not, see <http://www.gnu.org/licenses/>.
#
"""CyanChat command encapsulation, encoding, and decoding."""
import re
class ProtocolError(Exception):
""""""
pass
class ChatUser(object):
""""""
def __init__(self, user_name, user_type):
""""""
self.__name = user_name
self.__type = user_type
def __repr__(self):
return "ChatUser({!r}, {!r})".format(self.name, self.type)
@property
def name(self):
""""""
return self.__name
@property
def type(self):
""""""
return self.__type
def cc_encode(self, proto_ver=1):
""""""
if not self.name:
return u''
return u'%d%s' % (self.type, self.name)
@classmethod
def cc_decode(cls, message):
""""""
# Handle umpty userstring.
if not message:
return cls(None, None)
# Decode userstring.
try:
type = int(message[0], 10)
except:
raise ProtocolError("Userstring must begin with a decimial digit.")
user = message[1:]
return cls(user, type)
class ChatUserAddr(ChatUser):
""""""
def __init__(self, user_name, user_type, user_addr):
""""""
ChatUser.__init__(self, user_name, user_type)
self.__addr = user_addr
def __repr__(self):
return "ChatUserAddr({!r}, {!r}, {!r})".format(
self.name, self.type, self.addr)
@property
def addr(self):
""""""
return self.__addr
def cc_encode(self, proto_ver=1):
""""""
return u'{}{},{}'.format(self.type, self.name, self.addr)
@classmethod
def cc_decode(cls, message):
""""""
try:
user_part, addr = message.split(u',', 1)
except:
raise ProtocolError("Malformed [user,address] string.")
user = ChatUser.cc_decode(user_part)
return cls(user.name, user.type, addr)
class ChatMesg(object):
""""""
def __init__(self, message_text, message_type):
""""""
self.__text = message_text
self.__type = message_type
def __repr__(self):
return "ChatMesg({!r}, {!r})".format(self.__text, self.__type)
@property
def text(self):
""""""
return self.__text
@property
def type(self):
""""""
return self.__type
def cc_encode(self, proto_ver=1):
""""""
type = 1 if proto_ver == 0 else self.type
return u'^{}{}'.format(type, self.text)
@classmethod
def cc_decode(cls, message):
""""""
if not message or not message[0] == '^':
raise ProtocolError("Messages not prefixed by a carat (^).")
try:
type, text = int(message[1], 10), message[2:]
except (IndexError, ValueError):
raise ProtocolError("Messages not prefixed by a numeric type id.")
return cls(text, type)
class ChatCommand(object):
""""""
def __init__(self, command_id):
""""""
self.__command_id = command_id
def __repr__(self):
""""""
return "<ChatCommand({})>".format(
','.join('{!r}'.format(arg) for arg in self._args()))
def _args(self):
""""""
return [self.command_id]
@property
def command_id(self):
""""""
return self.__command_id
def cc_encode(self, proto_ver=1):
""""""
def encode_arg(arg):
has_encode = hasattr(arg, "cc_encode")
if has_encode:
return arg.cc_encode(proto_ver)
else:
return unicode(arg)
args = self._args()
command_args = [u'{:02}'.format(args[0])] + \
[encode_arg(arg) for arg in args[1:]]
return u'|'.join(command_args)
@staticmethod
def cc_decode(commands, message, proto_ver=1):
""""""
CMD_REGEX = re.compile(r'^(\d{2})(?:\|(.+))?$')
try:
cmd, args = CMD_REGEX.match(message).groups()
cmd = int(cmd, 10)
except AttributeError:
raise ProtocolError("Malformed command prefix.")
try:
cmd_class = commands[cmd]
except KeyError:
raise ProtocolError("Unkown command type: %d." % cmd)
return cmd_class.from_args(args, proto_ver)
@classmethod
def from_args(cls, args, proto_ver=1):
""""""
if args:
raise ProtocolError("%s does not accept arguments." %
(cls.__name__))
return cls()
class ClientSetName(ChatCommand):
""""""
CMD_ID = 10
CMD_NAME = "C_NAME"
def __init__(self, name):
""""""
ChatCommand.__init__(self, self.CMD_ID)
self.__name = name
def _args(self):
return [self.command_id, self.name]
@property
def name(self):
""""""
return self.__name
@classmethod
def from_args(cls, args, proto_ver=1):
""""""
if not args:
raise ProtocolError("%s requires exactly one argument." % \
(cls.__name__))
return cls(args)
class ClientClearName(ChatCommand):
""""""
CMD_ID = 15
CMD_NAME = "C_NAME_REMOVE"
def __init__(self):
""""""
ChatCommand.__init__(self, self.CMD_ID)
class ClientWhisper(ChatCommand):
""""""
CMD_ID = 20
CMD_NAME = "C_SND_MSG"
def __init__(self, user, message):
""""""
ChatCommand.__init__(self, self.CMD_ID)
self.__user = user
self.__message = message
def _args(self):
return [self.command_id, self.user, self.message]
@property
def user(self):
""""""
return self.__user
@property
def message(self):
""""""
return self.__message
@classmethod
def from_args(cls, args, proto_ver=1):
""""""
try:
user, mesg = args.split('|', 1)
except (ValueError, AttributeError):
raise ProtocolError("%s requires exactly two arguments." %
(cls.__name__))
user = ChatUser.cc_decode(user)
mesg = ChatMesg.cc_decode(mesg)
return cls(user, mesg)
class ClientBroadcast(ChatCommand):
""""""
CMD_ID = 30
CMD_NAME = "C_SND_ALL"
def __init__(self, message):
""""""
ChatCommand.__init__(self, self.CMD_ID)
self.__message = message
def _args(self):
return [self.command_id, self.message]
@property
def message(self):
""""""
return self.__message
@classmethod
def from_args(cls, args, proto_ver=1):
""""""
if not args:
raise ProtocolError("%s takes exactly one argument." %
(cls.__name__))
mesg = ChatMesg.cc_decode(args)
return cls(mesg)
class ClientAnnounce(ChatCommand):
""""""
CMD_ID = 40
CMD_NAME = "C_ANNOUNCE_MSG"
def __init__(self, protocol):
""""""
ChatCommand.__init__(self, self.CMD_ID)
self.__protocol = protocol
def _args(self):
if self.protocol == 0:
return [self.command_id]
return [self.command_id, self.protocol]
@property
def protocol(self):
""""""
return self.__protocol
@classmethod
def from_args(cls, args, proto_ver=1):
if not args:
return cls(0)
protocol = 0
try:
if args:
assert args.isdigit()
protocol = int(args, 10)
except:
raise ProtocolError("Protocol version must be a base-10 integer.")
return cls(protocol)
class ClientIgnore(ChatCommand):
""""""
CMD_ID = 70
CMD_NAME = "C_IGNORE"
def __init__(self, name):
""""""
ChatCommand.__init__(self, self.CMD_ID)
self.__name = name
def _args(self):
return [self.command_id, self.name]
@property
def name(self):
""""""
return self.__name
@classmethod
def from_args(cls, args, proto_ver=1):
if not args:
raise ProtocolError("%s requires exactly one argument." % \
(cls.__name__))
return cls(args)
CLIENT_COMMAND_SET = {
ClientSetName,
ClientClearName,
ClientWhisper,
ClientBroadcast,
ClientAnnounce,
ClientIgnore
}
class ServerRejectName(ChatCommand):
""""""
CMD_ID = 10
CMD_NAME = "S_NAME_NOSET"
def __init__(self):
""""""
ChatCommand.__init__(self, self.CMD_ID)
class ServerAcceptName(ChatCommand):
""""""
CMD_ID = 11
CMD_NAME = "S_NAME_SET"
def __init__(self):
""""""
ChatCommand.__init__(self, self.CMD_ID)
class ServerWhisper(ChatCommand):
""""""
CMD_ID = 21
CMD_NAME = "S_SEND_MSG"
def __init__(self, user, message):
""""""
ChatCommand.__init__(self, self.CMD_ID)
self.__user = user
self.__message = message
def _args(self):
return [self.command_id, self.user, self.message]
@property
def user(self):
""""""
return self.__user
@property
def message(self):
""""""
return self.__message
@classmethod
def from_args(cls, args, proto_ver=1):
try:
user, mesg = args.split('|', 1)
except:
raise ProtocolError("% requires exactly two arguments." % \
(cls.__name__))
user = ChatUser.cc_decode(user)
mesg = ChatMesg.cc_decode(mesg)
return cls(user, mesg)
class ServerBroadcast(ChatCommand):
""""""
CMD_ID = 31
CMD_NAME = "S_SEND_ALL"
def __init__(self, user, message):
""""""
ChatCommand.__init__(self, self.CMD_ID)
self.__user = user
self.__message = message
def _args(self):
return [self.command_id, self.user, self.message]
@property
def user(self):
""""""
return self.__user
@property
def message(self):
""""""
return self.__message
@classmethod
def from_args(cls, args, proto_ver=1):
try:
user, mesg = args.split('|', 1)
except:
raise ProtocolError("%r require exactly two arguments." % \
(cls.__name__))
user = ChatUser.cc_decode(user)
mesg = ChatMesg.cc_decode(mesg)
return cls(user, mesg)
class ServerUserList(ChatCommand):
""""""
CMD_ID = 35
CMD_NAME = "S_WHO_LIST"
def __init__(self, users):
""""""
ChatCommand.__init__(self, self.CMD_ID)
self.__users = users
def _args(self):
return [self.command_id] + self.users
@property
def users(self):
""""""
return self.__users
@classmethod
def from_args(cls, args, proto_ver=1):
if not args:
raise ProtocolError("%r takes at least one argument." % \
(cls.__name__))
users = [ChatUserAddr.cc_decode(arg) for arg in args.split('|')]
return cls(users)
class ServerLobbyMessage(ChatCommand):
""""""
CMD_ID = 40
CMD_NAME = "S_LOBBY_MSG"
def __init__(self, messages):
""""""
ChatCommand.__init__(self, self.CMD_ID)
self.__messages = messages
def _args(self):
return [self.command_id] + self.messages
@property
def messages(self):
""""""
return self.__messages
def cc_encode(self, proto_ver=1):
""""""
return u"{:02}|{:1}{}".format(self.command_id, proto_ver,
u"|".join(self.messages))
@classmethod
def from_args(cls, args, proto_ver=1):
try:
vers, msgs = args[0], args[1:]
except:
raise ProtocolError("Malformed lobby announcements message.")
messages = [ChatMesg.cc_decode(msgs) for arg in args.split('|')]
return cls(messages)
class ServerIgonre(ChatCommand):
""""""
CMD_ID = 70
CMD_NAME = "S_IGNORE"
def __init__(self, name):
""""""
ChatCommand.__init__(self, self.CMD_ID)
self.__name = name
def _args(self):
return [self.command_id, self.name]
@property
def name(self):
""""""
return self.__name
@classmethod
def from_args(cls, args, proto_ver=1):
if not args:
raise ProtocolError('%r takes exactly one argument.' % \
(cls.__name__))
return cls(args)
SERVER_COMMAND_SET = {
ServerRejectName,
ServerAcceptName,
ServerWhisper,
ServerBroadcast,
ServerUserList,
ServerLobbyMessage,
ServerIgonre
}
CLIENT_COMMANDS = {cmd.CMD_ID: cmd for cmd in CLIENT_COMMAND_SET}
SERVER_COMMANDS = {cmd.CMD_ID: cmd for cmd in SERVER_COMMAND_SET}
| agpl-3.0 | -4,088,800,119,861,771,300 | 23.207381 | 79 | 0.524031 | false |
alsur/django-admin-auto-tests | admin_auto_tests/test_model.py | 1 | 6404 | from unittest import SkipTest
import factory
from django.contrib.auth import get_user_model
try:
from django.core.urlresolvers import reverse
except:
from django.urls import reverse
from django.forms import model_to_dict
from django.test import TestCase
from admin_auto_tests.utils import test_base_class
def create_factory(model_class, **kwargs):
class Factory(factory.django.DjangoModelFactory):
class Meta:
model = model_class
return Factory
class AdminTestMixIn(object):
field_values = None
model = None
def create(self, commit=True, model=None, follow_fk=True, generate_fk=True, field_values=None):
model = model or self.model
field_values = field_values or self.field_values
instance = create_factory(model, follow_fk=follow_fk, generate_fk=generate_fk,
field_values=field_values)()
return instance
def create_user(self, is_staff=False, is_superuser=False, is_active=True):
return self.create(model=get_user_model(), field_values=dict(
is_staff=is_staff, is_superuser=is_superuser, is_active=is_active
))
def setUp(self):
super(AdminTestMixIn, self).setUp()
self.client.force_login(self.create_user(is_staff=True, is_superuser=True))
class ModelAdminTestMixIn(AdminTestMixIn):
add_status_code = 200
changelist_status_code = 200
change_status_code = 200
delete_status_code = 200
form_data_exclude_fields = ()
form_data_update = {}
skip_add = False
skip_create = False
skip_change = False
skip_delete = False
def get_form_data_update(self):
return dict(self.form_data_update)
def get_add_url(self):
return reverse('admin:{model._meta.app_label}_{model._meta.model_name}_add'.format(model=self.model))
def get_changelist_url(self):
return reverse('admin:{model._meta.app_label}_{model._meta.model_name}_changelist'.format(model=self.model))
def get_change_url(self, instance=None):
instance = instance or self.create()
return reverse('admin:{model._meta.app_label}_{model._meta.model_name}_change'.format(model=self.model),
args=(instance.pk,))
def get_delete_url(self, instance=None):
instance = instance or self.create()
return reverse('admin:{model._meta.app_label}_{model._meta.model_name}_delete'.format(model=self.model),
args=(instance.pk,))
def create_instance_data(self):
instance = self.create(False)
return {x: y for x, y in filter(lambda x: x[1], model_to_dict(instance).items())
if not x in self.form_data_exclude_fields}
def create_form_instance_data(self, response, instance_data=None):
fields = {key: value.initial for key, value in
response.context_data['adminform'].form.fields.items() if value.initial is not None}
for formset in response.context_data['inline_admin_formsets']:
formset = list(formset)[0].formset
for field in formset.forms[0].visible_fields() + formset.empty_form.visible_fields():
if field.value() is not None:
fields[field.html_name] = field.value()
# fields[field.html_name] = field.value() if field.value() is not None else ''
for key, value in formset.management_form.initial.items():
fields['{}-{}'.format(formset.prefix, key)] = value
fields.update(instance_data or self.create_instance_data())
return fields
def test_changelist_view(self):
response = self.client.get(self.get_changelist_url())
self.assertEqual(response.status_code, self.changelist_status_code)
def test_add_view(self):
if self.add_status_code != 200 or self.skip_add:
raise SkipTest('Required status code != 200' if self.add_status_code != 200 else 'Skip add is enabled')
response = self.client.get(self.get_add_url())
self.assertEqual(response.status_code, self.add_status_code)
def test_add(self):
if self.add_status_code != 200 or self.skip_add:
raise SkipTest('Required status code != 200' if self.add_status_code != 200 else 'Skip add is enabled')
response = self.client.get(self.get_add_url())
instance_data = self.create_instance_data()
data = self.create_form_instance_data(response, instance_data)
data['_continue'] = ''
data.update(self.get_form_data_update())
print(data)
response = self.client.post(self.get_add_url(), data, follow=True)
self.assertEqual(response.status_code, self.add_status_code)
if response.context_data.get('errors'):
self.assertEqual(len(response.context_data['errors']), 0,
' * '.join(['{}: {}'.format(x, ', '.join(y))
for x, y in response.context_data['adminform'].form.errors.items()]))
if 'original' not in response.context_data:
self.fail('Instance is not created.')
def test_change_view(self):
if self.skip_change:
raise SkipTest('Skip change is enabled')
response = self.client.get(self.get_change_url())
self.assertEqual(response.status_code, self.change_status_code)
def test_change(self):
if self.change_status_code != 200 or self.skip_change:
raise SkipTest('Required status code != 200' if self.change_status_code != 200
else 'Skip change is enabled')
instance = self.create()
response = self.client.get(self.get_change_url(instance))
new_data = self.create_form_instance_data(response)
response = self.client.post(self.get_change_url(instance), new_data, follow=True)
self.assertEqual(response.status_code, self.change_status_code)
def test_delete_view(self):
if self.skip_delete:
raise SkipTest('Skip delete is enabled')
response = self.client.get(self.get_delete_url())
self.assertEqual(response.status_code, self.delete_status_code)
class AdminTestCase(AdminTestMixIn, TestCase):
pass
AdminTestCase = test_base_class(AdminTestCase)
class ModelAdminTestCase(ModelAdminTestMixIn, TestCase):
pass
ModelAdminTestCase = test_base_class(ModelAdminTestCase)
| mit | 8,385,466,556,995,353,000 | 40.584416 | 116 | 0.645378 | false |
jonasrothfuss/DeepEpisodicMemory | data_prep/model_input.py | 1 | 5217 | """Convert data to TFRecords file format with example protos. An Example is a mostly-normalized data format for
storing data for training and inference. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.python.platform import gfile
from settings import FLAGS
def read_and_decode(filename_queue):
"""Creates one image sequence"""
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
image_seq = []
video_id = None
for imageCount in range(FLAGS.num_images):
path = 'blob' + '/' + str(imageCount)
feature_dict = {
path: tf.FixedLenFeature([], tf.string),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64),
'id': tf.FixedLenFeature([], tf.string),
'metadata': tf.FixedLenFeature([], tf.string)
}
features = tf.parse_single_example(
serialized_example,
features=feature_dict)
image_buffer = tf.reshape(features[path], shape=[])
image = tf.decode_raw(image_buffer, tf.uint8)
image = tf.reshape(image, tf.pack([FLAGS.height, FLAGS.width, FLAGS.num_depth]))
image = tf.reshape(image, [1, FLAGS.height, FLAGS.width, FLAGS.num_depth])
image_seq.append(image)
if features:
video_id = features['id']
image_seq = tf.concat(0, image_seq)
return image_seq, video_id, features
def create_batch(directory, mode, batch_size, num_epochs, overall_images_count, standardize=True):
""" If mode equals 'train": Reads input data num_epochs times and creates batch
If mode equals 'valid': Creates one large batch with all validation tensors.
batch_size will be ignored and num_epochs will be set to 1 in this case.
If mode equals 'test': #TODO
:arg
;param directory: path to directory where train/valid tfrecord files are stored
;param modus: for differentiating data (train|valid|test)
;param batch_size: number of batches that will be created
;param num_epochs: number of times to read the input data, or 0/None for endless
:returns
A batch array of shape(s, i, h, w, c) where:
s: batch size
i: length of image sequence
h: height of image
w: width of image
c: depth of image
"""
path = os.path.abspath(directory)
if mode == 'train':
data_filter = FLAGS.train_files
elif mode == 'valid':
data_filter = FLAGS.valid_files
elif mode == 'test':
data_filter = FLAGS.test_files
filenames = gfile.Glob(os.path.join(path, data_filter))
if not filenames:
raise RuntimeError('No data files found.')
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer(
filenames, num_epochs=num_epochs)
# sharing the same file even when multiple reader threads used
image_seq_tensor, video_id, features = read_and_decode(filename_queue)
if mode == 'valid' or mode == 'test':
if not batch_size:
batch_size = get_number_of_records(filenames)
assert batch_size > 0
image_seq_batch, video_id_batch, metadata_batch = tf.train.batch(
[image_seq_tensor, video_id, features['metadata']], batch_size=batch_size, num_threads=FLAGS.num_threads,
capacity=100 * batch_size)
# -- training -- get shuffled batches
else:
# Shuffle the examples and collect them into batch_size batches.
# (Internally uses a RandomShuffleQueue.)
# We run this in two threads to avoid being a bottleneck.
image_seq_batch, video_id_batch = tf.train.shuffle_batch(
[image_seq_tensor, video_id], batch_size=batch_size, num_threads=FLAGS.num_threads,
capacity=60*8* batch_size,
# Ensures a minimum amount of shuffling of examples.
min_after_dequeue=10*8*batch_size)
metadata_batch = None
return image_seq_batch, video_id_batch, metadata_batch
def get_number_of_records(filenames):
"""Iterates through the tfrecords files given by the list 'filenames'
and returns the number of available videos
:param filenames a list with absolute paths to the .tfrecords files
:return number of found videos (int)
"""
filename_queue_val = tf.train.string_input_producer(
filenames, num_epochs=1)
image_seq_tensor_val = read_and_decode(filename_queue_val)
num_examples = 0
# create new session to determine batch_size for validation/test data
with tf.Session() as sess_valid:
init_op = tf.group(tf.local_variables_initializer())
sess_valid.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
try:
while True:
sess_valid.run([image_seq_tensor_val])
num_examples += 1
except tf.errors.OutOfRangeError as e:
coord.request_stop(e)
finally:
coord.request_stop()
coord.join(threads)
return num_examples
| mit | -3,275,228,045,268,129,300 | 33.098039 | 119 | 0.648074 | false |
mbedmicro/pyOCD | src/analyzer/generate_python.py | 1 | 1298 | # pyOCD debugger
# Copyright (c) 2006-2015 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
from struct import unpack
INPUT_FILENAME = sys.argv[1]
OUTPUT_FILENAME = sys.argv[2]
with open(INPUT_FILENAME, "rb") as f:
data = f.read()
words = len(data) // 4
if len(data) % 4 != 0:
print("Warning: input length not word aligned")
str = "<L%i" % words
print("Data length %i" % len(data))
data = unpack("<%iL" % words, data)
str = "analyzer = (\n "
count = 0
for val in data:
if count % 8 == 7:
str += "0x{:08x},\n ".format(val)
else:
str += "0x{:08x}, ".format(val)
count += 1
str += "\n )"
data = str
with open(OUTPUT_FILENAME, "w") as f:
f.write(data)
| apache-2.0 | -3,642,950,460,871,087,000 | 27.844444 | 74 | 0.670262 | false |
gloaec/trifle | src/trifle/anyconfig/parser.py | 1 | 3640 | #
# Copyright (C) 2011 - 2013 Satoru SATOH <ssato @ redhat.com>
# License: MIT
#
"""Misc parsers"""
import re
INT_PATTERN = re.compile(r"^(\d|([1-9]\d+))$")
BOOL_PATTERN = re.compile(r"^(true|false)$", re.I)
STR_PATTERN = re.compile(r"^['\"](.*)['\"]$")
def parse_single(s):
"""
Very simple parser to parse expressions represent some single values.
:param s: a string to parse
:return: Int | Bool | String
>>> parse_single(None)
''
>>> parse_single("0")
0
>>> parse_single("123")
123
>>> parse_single("True")
True
>>> parse_single("a string")
'a string'
>>> parse_single("0.1")
'0.1'
>>> parse_single(" a string contains extra whitespaces ")
'a string contains extra whitespaces'
"""
def matched(pat, s):
return pat.match(s) is not None
if s is None:
return ''
s = s.strip()
if not s:
return ''
if matched(BOOL_PATTERN, s):
return bool(s)
if matched(INT_PATTERN, s):
return int(s)
if matched(STR_PATTERN, s):
return s[1:-1]
return s
def parse_list(s, sep=","):
"""
Simple parser to parse expressions reprensent some list values.
:param s: a string to parse
:param sep: Char to separate items of list
:return: [Int | Bool | String]
>>> parse_list("")
[]
>>> parse_list("1")
[1]
>>> parse_list("a,b")
['a', 'b']
>>> parse_list("1,2")
[1, 2]
>>> parse_list("a,b,")
['a', 'b']
"""
return [parse_single(x) for x in s.split(sep) if x]
def parse_attrlist_0(s, avs_sep=":", vs_sep=",", as_sep=";"):
"""
Simple parser to parse expressions in the form of
[ATTR1:VAL0,VAL1,...;ATTR2:VAL0,VAL2,..].
:param s: input string
:param avs_sep: char to separate attribute and values
:param vs_sep: char to separate values
:param as_sep: char to separate attributes
:return: a list of tuples of (key, value | [value])
where key = (Int | String | ...),
value = (Int | Bool | String | ...) | [Int | Bool | String | ...]
>>> parse_attrlist_0("a:1")
[('a', 1)]
>>> parse_attrlist_0("a:1;b:xyz")
[('a', 1), ('b', 'xyz')]
>>> parse_attrlist_0("requires:bash,zsh")
[('requires', ['bash', 'zsh'])]
>>> parse_attrlist_0("obsoletes:sysdata;conflicts:sysdata-old")
[('obsoletes', 'sysdata'), ('conflicts', 'sysdata-old')]
"""
def attr_and_values(s):
for rel in parse_list(s, as_sep):
if avs_sep not in rel or rel.endswith(avs_sep):
continue
(_attr, _values) = parse_list(rel, avs_sep)
if vs_sep in str(_values):
_values = parse_list(_values, vs_sep)
if _values:
yield (_attr, _values)
return [(a, vs) for a, vs in attr_and_values(s)]
def parse_attrlist(s, avs_sep=":", vs_sep=",", as_sep=";"):
"""
Simple parser to parse expressions in the form of
[ATTR1:VAL0,VAL1,...;ATTR2:VAL0,VAL2,..].
:param s: input string
:param avs_sep: char to separate attribute and values
:param vs_sep: char to separate values
:param as_sep: char to separate attributes
>>> parse_attrlist("requires:bash,zsh")
{'requires': ['bash', 'zsh']}
"""
return dict(parse_attrlist_0(s, avs_sep, vs_sep, as_sep))
def parse(s, lsep=",", avsep=":", vssep=",", avssep=";"):
"""Generic parser"""
if avsep in s:
return parse_attrlist(s, avsep, vssep, avssep)
elif lsep in s:
return parse_list(s, lsep)
else:
return parse_single(s)
# vim:sw=4:ts=4:et:
| gpl-3.0 | -2,545,860,632,176,178,000 | 23.931507 | 79 | 0.547802 | false |
AnnieJumpCannon/RAVE | article/figures/plot_kordopatis_calibration_sample.py | 1 | 3739 |
"""
Plot giant abundances w.r.t. GES.
"""
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
from matplotlib.ticker import MaxNLocator
from matplotlib.colors import LogNorm
from mpl_toolkits.axes_grid1 import make_axes_locatable
try:
rave_cannon_dr1, kordopatis_comparisons
except NameError: # Do you know who I am? That's Jeff Vader!
from rave_io import get_cannon_dr1, get_kordopatis_comparisons
rave_cannon_dr1 = get_cannon_dr1()
kordopatis_comparisons = get_kordopatis_comparisons()
from astropy.table import join
data_table = join(rave_cannon_dr1, kordopatis_comparisons, keys=("Name", ))
else:
print("Warning: Using pre-loaded data.")
ok = data_table["QC"]# * (data_table["R"] > 10)
latex_labels = {
"TEFF_2": r"$T_{\rm eff}$ $[{\rm K}]$ $({\rm Literature})$",
"TEFF_1": r"$T_{\rm eff}$ $[{\rm K}]$ $({\rm \it{RAVE}}{\rm -on})$",
"LOGG_1": r"$\log{g}$ $({\rm \it{RAVE}}{\rm -on})$",
"LOGG_2": r"$\log{g}$ $({\rm Literature})$",
"FE_H": r"$[{\rm Fe/H}]$ $({\rm \it{RAVE}}{\rm -on})$",
"FEH": r"$[{\rm Fe/H}]$ $({\rm Literature})$"
}
cannon_labels = ("TEFF_1", "LOGG_1", "FE_H")
literature_labels = ("TEFF_2", "LOGG_2", "FEH")
limits = {
"TEFF_1": [3500, 7500],
"LOGG_1": [0, 5.5],
"FE_H": [-3.5, 0.75]
}
kwds = dict(cmap="plasma", vmin=np.nanmin(data_table["snr"]), vmax=np.nanmax(data_table["snr"]))
K = len(cannon_labels)
factor = 3.5
lbdim = 0.25 * factor
tdim = 0.1 * factor
rdim = 0.2 * factor
whspace = 0.05
yspace = factor
xspace = factor * K + factor * (K - 1) * whspace + lbdim * (K - 1)
xdim = lbdim + xspace + rdim
ydim = lbdim + yspace + tdim
fig, axes = plt.subplots(1, K, figsize=(xdim, ydim))
fig.subplots_adjust(
left=lbdim/xdim, bottom=lbdim/ydim, right=(xspace + lbdim)/xdim,
top=(yspace + lbdim)/ydim, wspace=whspace, hspace=whspace)
for i, (ax, cannon_label, literature_label) \
in enumerate(zip(axes, cannon_labels, literature_labels)):
x = data_table[literature_label]
y = data_table[cannon_label]
c = data_table["snr"]
#xerr = data_table["e_{}".format(literature_label)]
yerr = data_table["E_{}".format(cannon_label).strip("_1")]
ax.errorbar(x[ok], y[ok], yerr=yerr[ok], fmt=None, ecolor="#666666",
zorder=-1)
scat = ax.scatter(x[ok], y[ok], c=c[ok], s=50, **kwds)
_ = ax.scatter([-999], [-9999], c=[0], **kwds)
for ax, cannon_label, literature_label in zip(axes, cannon_labels, literature_labels):
lims = limits[cannon_label]
ax.plot(lims, lims, c="#666666", zorder=-1, linestyle=":")
ax.set_xlim(lims)
ax.set_ylim(lims)
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.yaxis.set_major_locator(MaxNLocator(6))
ax.set_xlabel(latex_labels[literature_label])
ax.set_ylabel(latex_labels[cannon_label])
axes[0].set_xticks([4000, 5000, 6000, 7000])
axes[0].set_yticks([4000, 5000, 6000, 7000])
axes[-1].set_xticks([-3.5, -2.5, -1.5, -0.5, 0.5])
axes[-1].set_yticks([-3.5, -2.5, -1.5, -0.5, 0.5])
fig.tight_layout()
cbar = plt.colorbar(_,
cax=fig.add_axes([0.93, fig.subplotpars.bottom, 0.02, fig.subplotpars.top - fig.subplotpars.bottom]))
cbar.set_label(r"${\rm S/N}$ ${\rm RAVE}$ $[{\rm pixel}^{-1}]$")
fig.subplots_adjust(right=0.90)
fig.savefig("kordopatis-calibration.pdf", dpi=300)
fig.savefig("kordopatis-calibration.png")
for ref in set(data_table["REF"]):
for cannon_label, literature_label in zip(cannon_labels, literature_labels):
match = data_table["REF"] == ref
x = data_table[cannon_label][match]
y = data_table[literature_label][match]
diff = y - x
print(ref, np.isfinite(diff).sum(), cannon_label, np.nanmean(diff), np.nanstd(diff))
| mit | 6,726,250,664,844,613,000 | 27.112782 | 105 | 0.625836 | false |
pombredanne/product-definition-center | pdc/apps/osbs/views.py | 1 | 2944 | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from rest_framework import mixins
from rest_framework import viewsets
from pdc.apps.common import viewsets as common_viewsets
from pdc.apps.auth.permissions import APIPermission
from . import filters
from . import models
from . import serializers
class OSBSViewSet(common_viewsets.StrictQueryParamMixin,
common_viewsets.ChangeSetUpdateModelMixin,
mixins.ListModelMixin,
common_viewsets.MultiLookupFieldMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
"""
## Metadata for OpenShift Build Service
This viewset provides a list of all components relevant to OSBS. This
connection is realized through the `has_osbs` flag on [release component
types]($URL:releasecomponenttype-list$). The components appear in this API
automatically when they are assigned the proper type. Records here can only
be changed, they can't be created or deleted.
Currently there is just one flag tracked here:
* `autorebuild`: This flag indicates whether the component should be
automatically rebuilt when its dependencies change. If the value in PDC
is `null`, it indicates that the client should use its default value.
"""
queryset = models.OSBSRecord.objects.filter(component__type__has_osbs=True).order_by('component__id')
serializer_class = serializers.OSBSSerializer
filter_class = filters.OSBSFilter
permission_classes = (APIPermission,)
lookup_fields = (('component__release__release_id', r'[^/]+'),
('component__name', r'[^/]+'))
def retrieve(self, request, **kwargs):
"""
__Method__: `GET`
__URL__: $LINK:osbs-detail:release_id}/{component_name$
__Response__:
%(SERIALIZER)s
"""
return super(OSBSViewSet, self).retrieve(request, **kwargs)
def list(self, request, **kwargs):
"""
__Method__: `GET`
__URL__: $LINK:osbs-list$
__Query params__:
%(FILTERS)s
__Response__:
%(SERIALIZER)s
"""
return super(OSBSViewSet, self).list(request, **kwargs)
def update(self, request, **kwargs):
"""
__Method__: `PUT`
__URL__: $LINK:osbs-detail:release_id}/{component_name$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
"""
return super(OSBSViewSet, self).update(request, **kwargs)
def partial_update(self, request, **kwargs):
"""
__Method__: `PATCH`
__URL__: $LINK:osbs-detail:release_id}/{component_name$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
"""
return super(OSBSViewSet, self).partial_update(request, **kwargs)
| mit | -6,829,052,093,177,501,000 | 27.582524 | 105 | 0.619905 | false |
persandstrom/home-assistant | homeassistant/components/shell_command.py | 2 | 3372 | """
Exposes regular shell commands as services.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/shell_command/
"""
import asyncio
import logging
import shlex
import voluptuous as vol
from homeassistant.exceptions import TemplateError
from homeassistant.core import ServiceCall
from homeassistant.helpers import config_validation as cv, template
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
DOMAIN = 'shell_command'
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
cv.slug: cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
@asyncio.coroutine
def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up the shell_command component."""
conf = config.get(DOMAIN, {})
cache = {}
@asyncio.coroutine
def async_service_handler(service: ServiceCall) -> None:
"""Execute a shell command service."""
cmd = conf[service.service]
if cmd in cache:
prog, args, args_compiled = cache[cmd]
elif ' ' not in cmd:
prog = cmd
args = None
args_compiled = None
cache[cmd] = prog, args, args_compiled
else:
prog, args = cmd.split(' ', 1)
args_compiled = template.Template(args, hass)
cache[cmd] = prog, args, args_compiled
if args_compiled:
try:
rendered_args = args_compiled.async_render(service.data)
except TemplateError as ex:
_LOGGER.exception("Error rendering command template: %s", ex)
return
else:
rendered_args = None
if rendered_args == args:
# No template used. default behavior
# pylint: disable=no-member
create_process = asyncio.subprocess.create_subprocess_shell(
cmd,
loop=hass.loop,
stdin=None,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
else:
# Template used. Break into list and use create_subprocess_exec
# (which uses shell=False) for security
shlexed_cmd = [prog] + shlex.split(rendered_args)
# pylint: disable=no-member
create_process = asyncio.subprocess.create_subprocess_exec(
*shlexed_cmd,
loop=hass.loop,
stdin=None,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
process = yield from create_process
stdout_data, stderr_data = yield from process.communicate()
if stdout_data:
_LOGGER.debug("Stdout of command: `%s`, return code: %s:\n%s",
cmd, process.returncode, stdout_data)
if stderr_data:
_LOGGER.debug("Stderr of command: `%s`, return code: %s:\n%s",
cmd, process.returncode, stderr_data)
if process.returncode != 0:
_LOGGER.exception("Error running command: `%s`, return code: %s",
cmd, process.returncode)
for name in conf.keys():
hass.services.async_register(DOMAIN, name, async_service_handler)
return True
| apache-2.0 | -705,549,576,785,525,800 | 31.737864 | 77 | 0.588078 | false |
erudit/eruditorg | tests/unit/core/subscription/test_import_restrictions.py | 1 | 8836 | import datetime
import pytest
from django.core.management import call_command
from django.contrib.auth import get_user_model
from erudit.models import Organisation
from erudit.test.factories import OrganisationFactory
from core.accounts.models import LegacyAccountProfile
from core.subscription.models import (
JournalAccessSubscription,
JournalAccessSubscriptionPeriod,
InstitutionReferer,
)
from core.subscription.restriction.models import Revueabonne
from core.subscription.test.factories import (
JournalAccessSubscriptionFactory,
InstitutionIPAddressRange,
)
from core.subscription.restriction.test.factories import (
AbonneFactory,
RevueFactory,
RevueabonneFactory,
IpabonneFactory,
)
from core.subscription.management.commands import import_restrictions
from core.accounts.test.factories import LegacyAccountProfileFactory
from erudit.test.factories import JournalFactory
@pytest.mark.django_db
def test_import_subscriber():
# Verify that we properly import subscriber information
# Setup
abonne1 = AbonneFactory.create()
subscription_qs = Revueabonne.objects
command = import_restrictions.Command()
command.import_restriction_subscriber(abonne1, subscription_qs)
# Run & check
accprofile = LegacyAccountProfile.objects.get(legacy_id=abonne1.abonneid)
org = accprofile.organisation
assert org.name == abonne1.abonne
assert org.account_id == str(abonne1.abonneid)
assert org.sushi_requester_id == abonne1.requesterid
assert accprofile.user.username == "restriction-{}".format(abonne1.abonneid)
assert accprofile.user.email == abonne1.courriel
assert accprofile.legacy_id == str(abonne1.abonneid)
@pytest.mark.django_db
def test_import_journal():
# Verify that journal subscriptions are properly imported
# Setup
journal1 = JournalFactory.create()
abonne1 = AbonneFactory.create()
revue1 = RevueFactory.create(titrerevabr=journal1.code)
RevueabonneFactory.create(
abonneid=abonne1.abonneid, revueid=revue1.revueid, anneeabonnement=2018
)
subscription_qs = Revueabonne.objects
command = import_restrictions.Command()
command.import_restriction_subscriber(abonne1, subscription_qs)
# Run & check
assert JournalAccessSubscription.objects.count() == 1
sub = JournalAccessSubscription.objects.first()
assert sub.get_subscription_type() == JournalAccessSubscription.TYPE_INSTITUTIONAL
assert sub.journals.filter(pk=journal1.pk).exists()
assert sub.organisation.name == abonne1.abonne
@pytest.mark.django_db
def test_assign_user_to_existing_organisation():
"""If an organisation has the LegacyOrganisationId of the restriction user to import,
add the created user to this organisation"""
org = OrganisationFactory()
journal1 = JournalFactory.create()
abonne1 = AbonneFactory.create()
abonne1.abonneid = org.account_id
revue1 = RevueFactory.create(titrerevabr=journal1.code)
RevueabonneFactory.create(
abonneid=abonne1.abonneid, revueid=revue1.revueid, anneeabonnement=2018
)
subscription_qs = Revueabonne.objects
command = import_restrictions.Command()
command.import_restriction_subscriber(abonne1, subscription_qs)
# test that no new organisation has been created
assert Organisation.objects.count() == 1
assert Organisation.objects.first().members.count() == 1
@pytest.mark.django_db
def test_import_can_rename_organisation():
organisation = OrganisationFactory()
organisation.name = "old name"
organisation.save()
journal1 = JournalFactory.create()
abonne1 = AbonneFactory.create()
abonne1.abonne = "new name"
abonne1.abonneid = organisation.account_id
revue1 = RevueFactory.create(titrerevabr=journal1.code)
RevueabonneFactory.create(
abonneid=abonne1.abonneid, revueid=revue1.revueid, anneeabonnement=2018
)
subscription_qs = Revueabonne.objects
command = import_restrictions.Command()
command.import_restriction_subscriber(abonne1, subscription_qs)
assert Organisation.objects.count() == 1
organisation = Organisation.objects.first()
assert organisation.name == "new name"
@pytest.mark.django_db
def test_user_email_is_updated_when_updated_at_the_source():
profile = LegacyAccountProfileFactory()
abonne = AbonneFactory()
profile.organisation = OrganisationFactory()
profile.legacy_id = abonne.abonneid
abonne.courriel = "[email protected]"
profile.save()
journal = JournalFactory()
revue1 = RevueFactory.create(titrerevabr=journal.code)
RevueabonneFactory.create(
abonneid=abonne.abonneid, revueid=revue1.revueid, anneeabonnement=2018
)
subscription_qs = Revueabonne.objects
command = import_restrictions.Command()
command.import_restriction_subscriber(abonne, subscription_qs)
profile = LegacyAccountProfile.objects.first()
assert profile.user.email == abonne.courriel
@pytest.mark.django_db
def test_import_deletions():
# Verify that subscription deletions are properly imported, that is, that deletions propagate.
# Setup
journal1 = JournalFactory.create()
abonne1 = AbonneFactory.create()
revue1 = RevueFactory.create(titrerevabr=journal1.code)
sub1 = RevueabonneFactory.create(
abonneid=abonne1.abonneid,
revueid=revue1.revueid,
anneeabonnement=datetime.datetime.now().year,
)
assert JournalAccessSubscriptionPeriod.objects.count() == 0
call_command("import_restrictions", *[], **{})
subscription = JournalAccessSubscription.objects.first()
assert subscription.journals.count() == 1
assert subscription.journalaccesssubscriptionperiod_set.count() == 1
sub1.delete()
call_command("import_restrictions", *[], **{})
# Run & check
assert JournalAccessSubscription.objects.count() == 1
assert JournalAccessSubscription.objects.first().journals.count() == 0
@pytest.mark.django_db
def test_delete_existing_subscriptions():
journal = JournalFactory()
organisation = OrganisationFactory()
JournalAccessSubscriptionFactory(valid=True, type="individual")
LegacyAccountProfileFactory(legacy_id=1179, organisation=organisation)
subscription = JournalAccessSubscriptionFactory(
organisation=organisation,
valid=True,
)
subscription.journals.add(journal)
subscription.save()
call_command("import_restrictions", *[], **{})
assert subscription.journals.count() == 0
@pytest.mark.django_db
def test_import_deletion_will_not_modify_individual_subscriptions():
individual_subscription = JournalAccessSubscriptionFactory(valid=True, type="individual")
assert individual_subscription.journals.count() == 1
call_command("import_restrictions", *[], **{})
assert individual_subscription.journals.count() == 1
@pytest.mark.django_db
def test_existing_organisation_is_renamed_properly():
abonne1 = AbonneFactory.create()
abonne1.save()
revue1 = RevueFactory.create(titrerevabr=JournalFactory())
RevueabonneFactory.create(abonneid=abonne1.abonneid, revueid=revue1.revueid)
call_command("import_restrictions", *[], **{})
assert Organisation.objects.filter(name=abonne1.abonne).count() == 1
abonne1.abonne = "new name"
abonne1.save()
call_command("import_restrictions", *[], **{})
assert Organisation.objects.filter(name=abonne1.abonne).count() == 1
@pytest.mark.django_db
def test_can_skip_subscribers_with_no_email():
journal = JournalFactory()
abonne1 = AbonneFactory.create(courriel="")
abonne1.save()
IpabonneFactory.create(abonneid=abonne1.pk)
revue1 = RevueFactory.create(titrerevabr=journal.code)
RevueabonneFactory.create(abonneid=abonne1.abonneid, revueid=revue1.revueid)
call_command("import_restrictions", *[], **{"dry_run": False})
assert LegacyAccountProfile.objects.count() == 0
assert JournalAccessSubscriptionPeriod.objects.count() == 0
@pytest.mark.django_db
def test_dry_run_mode_does_not_create_anything():
journal = JournalFactory()
abonne1 = AbonneFactory.create(referer="http://www.erudit.org/")
abonne1.save()
IpabonneFactory.create(abonneid=abonne1.pk)
revue1 = RevueFactory.create(titrerevabr=journal.code)
RevueabonneFactory.create(abonneid=abonne1.abonneid, revueid=revue1.revueid)
call_command("import_restrictions", *[], **{"dry_run": True})
assert get_user_model().objects.count() == 0
assert InstitutionReferer.objects.count() == 0
assert LegacyAccountProfile.objects.count() == 0
assert JournalAccessSubscriptionPeriod.objects.count() == 0
assert Organisation.objects.count() == 0
assert JournalAccessSubscription.objects.count() == 0
assert InstitutionIPAddressRange.objects.count() == 0
| gpl-3.0 | -2,459,048,570,420,822,000 | 33.248062 | 98 | 0.741512 | false |
Barrog/C4-Datapack | data/jscript/quests/235_MimirsElixir/__init__.py | 1 | 5604 | # Mimir's Elixir version 0.1
# by Fulminus
print "importing quests:",
import sys
from net.sf.l2j.gameserver.model.quest import State
from net.sf.l2j.gameserver.model.quest import QuestState
from net.sf.l2j.gameserver.model.quest.jython import QuestJython as JQuest
#Quest info
QUEST_NUMBER,QUEST_NAME,QUEST_DESCRIPTION = 235,"MimirsElixir","Mimir's Elixir"
DROP_RATE = 45
#prerequisites:
STAR_OF_DESTINY = 5011
MINLEVEL = 75
#Quest items
PURE_SILVER = 6320
TRUE_GOLD = 6321
SAGES_STONE = 6322
BLOOD_FIRE = 6318
MIMIRS_ELIXIR = 6319
SCROLL_ENCHANT_WEAPON_A = 729
#Messages
default = "<html><head><body>I have nothing to say to you.</body></html>"
#NPCs
LADD,JOAN=7721,7718
#Mobs, cond, Drop
DROPLIST = {
965: [3,SAGES_STONE], #Chimera Piece
1090: [6,BLOOD_FIRE] #Bloody Guardian
}
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
if event == "1" :
st.setState(PROGRESS)
st.set("cond","1")
htmltext = "7166-02a.htm"
elif event == "7718_1" :
st.set("cond","3")
htmltext = "7718-01a.htm"
return htmltext
def onTalk (self,npc,st):
htmltext = default
id = st.getState()
npcId = npc.getNpcId()
cond = st.getInt("cond")
if npcId == LADD :
if id == CREATED :
st.set("cond","0")
if st.getPlayer().getLevel() < MINLEVEL :
st.exitQuest(1)
htmltext = "7166-01.htm" #not qualified
elif not st.getQuestItemsCount(STAR_OF_DESTINY) :
st.exitQuest(1)
htmltext = "7166-01a.htm" #not qualified
elif int(st.get("cond"))==0 :
htmltext = "7166-02.htm" # Successful start: Bring me Pure silver from Reagents quest
elif id == COMPLETED :
htmltext = "<html><head><body>You have already completed this quest.</body></html>"
# was asked to get pure silver but has not done so yet. Repeat: get pure silver
elif cond==1 and not st.getQuestItemsCount(PURE_SILVER) :
htmltext = "7166-03.htm" # Bring me Pure silver from Reagents quest
# got the pure silver and came back. Ask for TrueGold.
elif cond==1 and st.getQuestItemsCount(PURE_SILVER) :
st.set("cond","2")
htmltext = "7166-04.htm" # Bring me True Gold from Joan
elif 1<cond<5 :
htmltext = "7166-05.htm" # Where is my GOLD?! Bring to me first.
# got the true gold...look for Blood fire
elif cond==5 :
st.set("cond","6")
htmltext = "7166-06.htm" # find Blood Fire from "bloody guardians"
# still looking for blood fire?
elif cond==6 :
htmltext = "7166-07.htm" # find Blood Fire from "bloody guardians"
# Ah, you got the blood fire! Time to mix them up!
elif cond==7 and st.getQuestItemsCount(PURE_SILVER) and st.getQuestItemsCount(TRUE_GOLD):
htmltext = "7166-08.htm" # what are you standing there for? Go to the cauldron and mix them...
# you idiot, how did you lose your quest items?
elif cond==7 :
htmltext = "7166-09.htm" # Well...you already know what to do...go get the 3 items...
st.set("cond","3") # start over...yay...
# cond for this quest is set to 8 from Supplier or Reagents, when you create Mimir's Elixir.
# Finally, all is done...time to learn how to use the Elixir...
elif cond==8 :
htmltext = "7166-10.htm" # here's what you do...
#st.takeItems(MIMIRS_ELIXIR,-1) #disabled for compatibility with the way java expects things at Grand Master.
st.giveItems(SCROLL_ENCHANT_WEAPON_A,1)
st.setState(COMPLETED)
st.unset("cond")
elif npcId == JOAN :
# first time talking to Joan: You ask for True Gold, she sends you for Sage's stone
if cond==2 :
htmltext = "7718-01.htm" # You want True Gold? Please get the sage's stone. Kill Chimera!
# Why are you back alraedy? You don't have the stone.
elif cond==3 :
htmltext = "7718-02.htm" # you haven't gotten the sage's stone yet?
# aha! Here is the sage's stone! Cool, now we can make true gold
elif cond==4 :
st.takeItems(SAGES_STONE,-1)
st.giveItems(TRUE_GOLD,1)
st.set("cond","5")
htmltext = "7718-03.htm" # here you go...take the gold. Now go back to ladd.
elif cond>=5 :
htmltext = "7718-04.htm" # Go back to ladd already!
return htmltext
def onKill (self,npc,st) :
npcId = npc.getNpcId()
drop = st.getRandom(100)
cond = int(st.get("cond"))
dropcond = DROPLIST[npcId][0]
if drop < DROP_RATE :
if st.getQuestItemsCount(DROPLIST[npcId][1]) == 0 :
st.giveItems(DROPLIST[npcId][1],1)
st.playSound("ItemSound.quest_itemget")
st.set("cond",str(cond+1))
return
# Quest class and state definition
QUEST = Quest(QUEST_NUMBER, str(QUEST_NUMBER)+"_"+QUEST_NAME, QUEST_DESCRIPTION)
CREATED = State('Start', QUEST)
PROGRESS = State('Progress', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
# Quest NPC starter initialization
QUEST.addStartNpc(LADD)
# Quest initialization
CREATED.addTalkId(LADD)
PROGRESS.addTalkId(LADD)
PROGRESS.addTalkId(JOAN)
for i in DROPLIST.keys():
PROGRESS.addKillId(i)
print str(QUEST_NUMBER)+": "+QUEST_DESCRIPTION
| gpl-2.0 | -8,237,252,240,355,856,000 | 37.383562 | 122 | 0.607602 | false |
zycdragonball/tensorflow | tensorflow/python/profiler/tfprof_logger.py | 1 | 6882 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logging tensorflow::tfprof::OpLog.
OpLog is used to add extra model information for offline analysis by tfprof.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import six
from tensorflow.core.profiler import tfprof_log_pb2
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
TRAINABLE_VARIABLES = '_trainable_variables'
REGISTERED_FLOP_STATS = 'flops'
def _fill_missing_graph_shape(graph, run_meta):
"""Fill Tensor shapes in 'graph' with run time shape from 'run_meta'."""
for dev_stat in run_meta.step_stats.dev_stats:
for node_stat in dev_stat.node_stats:
if not node_stat.output:
continue
try:
op = graph.get_operation_by_name(node_stat.node_name)
except KeyError as e:
# Graph doesn't contains the node_stat, usually RecvTensor.
continue
if len(node_stat.output) != len(op.outputs):
# For example, conditional op has only 1 output at run time.
continue
for (i, node_stat_out) in enumerate(node_stat.output):
if op.outputs[i].get_shape().is_fully_defined():
continue
node_stat_dims = node_stat_out.tensor_description.shape.dim
node_stat_shape = tensor_shape.TensorShape(
[d.size for d in node_stat_dims])
try:
op.outputs[i].set_shape(op.outputs[i].get_shape().merge_with(
node_stat_shape))
except ValueError as e:
sys.stderr.write('Node %s incompatible shapes: %s.\n' %
(node_stat.node_name, e))
return graph
def _get_logged_ops(graph, run_meta=None, add_trace=True,
add_trainable_var=True):
"""Extract trainable model parameters and FLOPs for ops from a Graph.
Args:
graph: tf.Graph.
run_meta: RunMetadata proto used to complete shape information.
add_trace: Whether to add op trace information.
add_trainable_var: Whether to assign tf.trainable_variables() op type
'_trainable_variables'.
Returns:
logged_ops: dict mapping from op_name to OpLogEntry.
"""
if run_meta:
graph = _fill_missing_graph_shape(graph, run_meta)
op_missing_shape = 0
logged_ops = {}
# TODO(xpan): Work with Profiler more efficiently.
for op in graph.get_operations():
try:
stats = ops.get_stats_for_node_def(
graph, op.node_def, REGISTERED_FLOP_STATS)
except ValueError:
# Catch Exception When shape is incomplete. Skip it.
op_missing_shape += 1
stats = None
entry = tfprof_log_pb2.OpLogEntry()
entry.name = op.name
add_entry = False
if stats and stats.value:
entry.float_ops = int(stats.value)
add_entry = True
if add_trace:
for tb in op.traceback:
trace = entry.code_def.traces.add()
trace.file = tb[0] if tb[0] else 'none'
trace.lineno = tb[1] if tb[1] else -1
trace.function = tb[2] if tb[2] else 'none'
trace.line = tb[3] if tb[3] else 'none'
add_entry = True
if add_entry:
logged_ops[entry.name] = entry
if add_trainable_var:
for v in graph.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES):
if v.op.name not in logged_ops:
entry = tfprof_log_pb2.OpLogEntry()
entry.name = v.op.name
entry.types.append(TRAINABLE_VARIABLES)
logged_ops[entry.name] = entry
else:
logged_ops[v.op.name].types.append(TRAINABLE_VARIABLES)
if op_missing_shape > 0 and not run_meta:
sys.stderr.write('%d ops no flops stats due to incomplete shapes.\n' %
op_missing_shape)
return logged_ops
def _merge_default_with_oplog(graph, op_log=None, run_meta=None,
add_trace=True, add_trainable_var=True):
"""Merge the tfprof default extra info with caller's op_log.
Args:
graph: tf.Graph.
op_log: OpLog proto.
run_meta: RunMetadata proto used to complete shape information.
add_trace: Whether to add op trace information.
add_trainable_var: Whether to assign tf.trainable_variables() op type
'_trainable_variables'.
Returns:
tmp_op_log: Merged OpLog proto.
"""
tmp_op_log = tfprof_log_pb2.OpLog()
logged_ops = _get_logged_ops(
graph, run_meta, add_trace=add_trace, add_trainable_var=add_trainable_var)
if not op_log:
tmp_op_log.log_entries.extend(logged_ops.values())
else:
all_ops = dict()
for entry in op_log.log_entries:
all_ops[entry.name] = entry
for op_name, entry in six.iteritems(logged_ops):
if op_name in all_ops:
all_ops[op_name].types.extend(entry.types)
if entry.float_ops > 0 and all_ops[op_name].float_ops == 0:
all_ops[op_name].float_ops = entry.float_ops
if entry.code_def.traces and not all_ops[op_name].code_def.traces:
all_ops[op_name].code_def.MergeFrom(entry.code_def)
else:
all_ops[op_name] = entry
tmp_op_log.log_entries.extend(all_ops.values())
return tmp_op_log
def write_op_log(graph, log_dir, op_log=None, run_meta=None, add_trace=True):
"""Log provided 'op_log', and add additional model information below.
The API also assigns ops in tf.trainable_variables() an op type called
'_trainable_variables'.
The API also logs 'flops' statistics for ops with op.RegisterStatistics()
defined. flops calculation depends on Tensor shapes defined in 'graph',
which might not be complete, 'run_meta', if provided, completes the shape
information with best effort.
Args:
graph: tf.Graph.
log_dir: directory to write the log file.
op_log: (Optional) OpLog proto to be written. If not provided, an new
one is created.
run_meta: (Optional) RunMetadata proto that helps flops computation using
run time shape information.
add_trace: Whether to add op trace information. Used to support "code" view.
"""
op_log = _merge_default_with_oplog(graph, op_log, run_meta, add_trace)
with gfile.Open(os.path.join(log_dir, 'tfprof_log'), 'w') as log:
log.write(op_log.SerializeToString())
| apache-2.0 | 3,209,182,756,314,235,400 | 35.802139 | 80 | 0.661 | false |
juliogonzalez/ebs-tools | lib/dateutils.py | 1 | 2873 | # ebs-tools, a set of tools to manage EBS volumes and snapshots
#
# Copyright (C) 2014 Julio Gonzalez Gil <[email protected]>
#
# This file is part of ebs-tools (http://github.com/juliogonzalez/ebs-tools)
#
# ebs-tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ebs-tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ebs-tools. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime, timedelta
def timedelta_months(date, nmonths):
""" Substract a number of days from a given date and then get then
first day of that month
Needed as timedelta function from standard python doesn't support
months and we don't want to rely on more third party modules
Args:
date: A datetime.date object with the initial date
nmonths: The number of months to substract
Returns:
A datetime.date object with the first day of the month for then
substraction
"""
first_day = datetime(date.year, date.month, 1)
for i in range(0, nmonths):
first_day = datetime(first_day.year, first_day.month, 1)
first_day = first_day - timedelta(days=1)
first_day = datetime(first_day.year, first_day.month, 1)
return(first_day)
def timedelta_to_strf(date, days=0, seconds=0, microseconds=0,
milliseconds=0, minutes=0, hours=0, weeks=0):
""" Perform a timedelta and return result as string (UTC)
Args:
date: A datetime.date object with the initial date
days: An integer with the number of days to substract
seconds: An integer with the number of seconds to substract
microseconds: An integer with the number of microseconds
to substract
milliseconds: An integer with the number of milliseconds
to substract
minutes: An integer with the number of minutes to substract
hours: An integer with the number of hours to substract
weeks: An integer with the number of weeks to substract
Returns:
A string with the result, as UTC
"""
diff = date - timedelta(days, seconds, microseconds,
milliseconds, minutes, hours, weeks)
if diff.utcoffset() is None:
return diff.strftime('%Y-%m-%dT%H:%M:%S.000Z')
else:
return diff.strftime('%Y-%m-%dT%H:%M:%S.%z')
| gpl-3.0 | -6,829,628,543,422,717,000 | 40.637681 | 76 | 0.661678 | false |
nesl/sos-2x | modules/unit_test/modules/kernel/post_raw/source_trick/reciever/source_trick_reciever.py | 1 | 3085 | import sys
import os
import pysos
import signal
# these two variables should be changed depending on the test drivers PID
# and the type of message it will be sending, If you are using the generic_test.c
# then it is likely these two values can stay the same
TEST_MODULE = 0x81
MSG_TEST_DATA= 33
ALARM_LEN = 60
START_DATA = 100
FINAL_DATA = 200
TEST_FAIL = 155
TEST_PASS = 255
# variables holding new and old sensor values
# this can be replaces with whatever you want since this is specific to
# what the test driver expects for data
oldstate = {}
state = {}
# a signal handler that will go off for an alarm
# it is highly suggested that you use this since it is the easiest way to test if your
# node has entered panic mode via the script
def panic_handler(signum, frame):
print "it is highly likely that your node has entered panic mode"
print "please reset the node"
sys.exit(1)
# message handler for messages of type MSG_DATA_READY
def generic_test(msg):
""" Small example of test driver usage. It simulates a virtual
dice and shows which side of the dice is up.
"""
global oldstate
global state
print "message recieved"
signal.alarm(ALARM_LEN)
#unpack the values we are expecting, in this case it is a node id, the acclerometer id,
# and a value from the sensor
(node_id, node_state, data) = pysos.unpack("<BBB", msg['data'])
if node_id not in state.keys():
state[node_id] = 0
oldstate[node_id] = 0
# these are some simple calculations to test the sensor value we have gotten
# this is the part which you need to fill in in order to verify that the function is working
if (node_state == START_DATA):
print "initialization began correctly"
if (node_state == 0):
state[node_id] = data
if (node_state == TEST_FAIL):
print >> sys.stderr, "the test for item %d has failed" %data
if (node_state == TEST_PASS):
print "the test for item %d has passed" %data
if (node_state == 1 and state[node_id] != data):
print >> sys.stderr, " a message was lost somewhere on node %d before count %d" %(node_id,data)
if (node_state == FINAL_DATA):
print "finalization worked correctly"
if __name__ == "__main__":
# here we set up a connection to sossrv using the pysos module
# and begin listening for messages
# we also register our function above with the server so that it is called
# when the appropriate message type is recieved
srv = pysos.sossrv()
srv.register_trigger(generic_test, sid=TEST_MODULE, type=MSG_TEST_DATA)
# register the signal handler and begin an alarm that will wait for 60 seconds before going off
# other times for the alarm might be good, use your own judgement based on your test
signal.signal(signal.SIGALRM, panic_handler)
signal.alarm(ALARM_LEN)
# we do this so since the test_suite application has information regarding the amount of time
# each test should be run. after the amount of time specified in test.lst, test_suite will
# end this script and move to another test
while(1):
continue
| bsd-3-clause | -7,762,477,223,178,849,000 | 35.72619 | 99 | 0.706969 | false |
MitchTalmadge/Emoji-Tools | src/main/resources/PythonScripts/fontTools/ttLib/woff2_test.py | 2 | 27241 | from __future__ import print_function, division, absolute_import, unicode_literals
from fontTools.misc.py23 import *
from fontTools import ttLib
from .woff2 import (WOFF2Reader, woff2DirectorySize, woff2DirectoryFormat,
woff2FlagsSize, woff2UnknownTagSize, woff2Base128MaxSize, WOFF2DirectoryEntry,
getKnownTagIndex, packBase128, base128Size, woff2UnknownTagIndex,
WOFF2FlavorData, woff2TransformedTableTags, WOFF2GlyfTable, WOFF2LocaTable,
WOFF2Writer)
import unittest
from fontTools.misc import sstruct
import os
import random
import copy
from collections import OrderedDict
haveBrotli = False
try:
import brotli
haveBrotli = True
except ImportError:
pass
# Python 3 renamed 'assertRaisesRegexp' to 'assertRaisesRegex', and fires
# deprecation warnings if a program uses the old name.
if not hasattr(unittest.TestCase, 'assertRaisesRegex'):
unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
current_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
data_dir = os.path.join(current_dir, 'testdata')
TTX = os.path.join(data_dir, 'TestTTF-Regular.ttx')
OTX = os.path.join(data_dir, 'TestOTF-Regular.otx')
METADATA = os.path.join(data_dir, 'test_woff2_metadata.xml')
TT_WOFF2 = BytesIO()
CFF_WOFF2 = BytesIO()
def setUpModule():
if not haveBrotli:
raise unittest.SkipTest("No module named brotli")
assert os.path.exists(TTX)
assert os.path.exists(OTX)
# import TT-flavoured test font and save it as WOFF2
ttf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
ttf.importXML(TTX)
ttf.flavor = "woff2"
ttf.save(TT_WOFF2, reorderTables=None)
# import CFF-flavoured test font and save it as WOFF2
otf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
otf.importXML(OTX)
otf.flavor = "woff2"
otf.save(CFF_WOFF2, reorderTables=None)
class WOFF2ReaderTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.file = BytesIO(CFF_WOFF2.getvalue())
cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
cls.font.importXML(OTX)
def setUp(self):
self.file.seek(0)
def test_bad_signature(self):
with self.assertRaisesRegex(ttLib.TTLibError, 'bad signature'):
WOFF2Reader(BytesIO(b"wOFF"))
def test_not_enough_data_header(self):
incomplete_header = self.file.read(woff2DirectorySize - 1)
with self.assertRaisesRegex(ttLib.TTLibError, 'not enough data'):
WOFF2Reader(BytesIO(incomplete_header))
def test_incorrect_compressed_size(self):
data = self.file.read(woff2DirectorySize)
header = sstruct.unpack(woff2DirectoryFormat, data)
header['totalCompressedSize'] = 0
data = sstruct.pack(woff2DirectoryFormat, header)
with self.assertRaises(brotli.error):
WOFF2Reader(BytesIO(data + self.file.read()))
def test_incorrect_uncompressed_size(self):
decompress_backup = brotli.decompress
brotli.decompress = lambda data: b"" # return empty byte string
with self.assertRaisesRegex(ttLib.TTLibError, 'unexpected size for decompressed'):
WOFF2Reader(self.file)
brotli.decompress = decompress_backup
def test_incorrect_file_size(self):
data = self.file.read(woff2DirectorySize)
header = sstruct.unpack(woff2DirectoryFormat, data)
header['length'] -= 1
data = sstruct.pack(woff2DirectoryFormat, header)
with self.assertRaisesRegex(
ttLib.TTLibError, "doesn't match the actual file size"):
WOFF2Reader(BytesIO(data + self.file.read()))
def test_num_tables(self):
tags = [t for t in self.font.keys() if t not in ('GlyphOrder', 'DSIG')]
data = self.file.read(woff2DirectorySize)
header = sstruct.unpack(woff2DirectoryFormat, data)
self.assertEqual(header['numTables'], len(tags))
def test_table_tags(self):
tags = set([t for t in self.font.keys() if t not in ('GlyphOrder', 'DSIG')])
reader = WOFF2Reader(self.file)
self.assertEqual(set(reader.keys()), tags)
def test_get_normal_tables(self):
woff2Reader = WOFF2Reader(self.file)
specialTags = woff2TransformedTableTags + ('head', 'GlyphOrder', 'DSIG')
for tag in [t for t in self.font.keys() if t not in specialTags]:
origData = self.font.getTableData(tag)
decompressedData = woff2Reader[tag]
self.assertEqual(origData, decompressedData)
def test_reconstruct_unknown(self):
reader = WOFF2Reader(self.file)
with self.assertRaisesRegex(ttLib.TTLibError, 'transform for table .* unknown'):
reader.reconstructTable('ZZZZ')
class WOFF2ReaderTTFTest(WOFF2ReaderTest):
""" Tests specific to TT-flavored fonts. """
@classmethod
def setUpClass(cls):
cls.file = BytesIO(TT_WOFF2.getvalue())
cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
cls.font.importXML(TTX)
def setUp(self):
self.file.seek(0)
def test_reconstruct_glyf(self):
woff2Reader = WOFF2Reader(self.file)
reconstructedData = woff2Reader['glyf']
self.assertEqual(self.font.getTableData('glyf'), reconstructedData)
def test_reconstruct_loca(self):
woff2Reader = WOFF2Reader(self.file)
reconstructedData = woff2Reader['loca']
self.assertEqual(self.font.getTableData('loca'), reconstructedData)
self.assertTrue(hasattr(woff2Reader.tables['glyf'], 'data'))
def test_reconstruct_loca_not_match_orig_size(self):
reader = WOFF2Reader(self.file)
reader.tables['loca'].origLength -= 1
with self.assertRaisesRegex(
ttLib.TTLibError, "'loca' table doesn't match original size"):
reader.reconstructTable('loca')
def normalise_table(font, tag, padding=4):
""" Return normalised table data. Keep 'font' instance unmodified. """
assert tag in ('glyf', 'loca', 'head')
assert tag in font
if tag == 'head':
origHeadFlags = font['head'].flags
font['head'].flags |= (1 << 11)
tableData = font['head'].compile(font)
if font.sfntVersion in ("\x00\x01\x00\x00", "true"):
assert {'glyf', 'loca', 'head'}.issubset(font.keys())
origIndexFormat = font['head'].indexToLocFormat
if hasattr(font['loca'], 'locations'):
origLocations = font['loca'].locations[:]
else:
origLocations = []
glyfTable = ttLib.newTable('glyf')
glyfTable.decompile(font.getTableData('glyf'), font)
glyfTable.padding = padding
if tag == 'glyf':
tableData = glyfTable.compile(font)
elif tag == 'loca':
glyfTable.compile(font)
tableData = font['loca'].compile(font)
if tag == 'head':
glyfTable.compile(font)
font['loca'].compile(font)
tableData = font['head'].compile(font)
font['head'].indexToLocFormat = origIndexFormat
font['loca'].set(origLocations)
if tag == 'head':
font['head'].flags = origHeadFlags
return tableData
def normalise_font(font, padding=4):
""" Return normalised font data. Keep 'font' instance unmodified. """
# drop DSIG but keep a copy
DSIG_copy = copy.deepcopy(font['DSIG'])
del font['DSIG']
# ovverride TTFont attributes
origFlavor = font.flavor
origRecalcBBoxes = font.recalcBBoxes
origRecalcTimestamp = font.recalcTimestamp
origLazy = font.lazy
font.flavor = None
font.recalcBBoxes = False
font.recalcTimestamp = False
font.lazy = True
# save font to temporary stream
infile = BytesIO()
font.save(infile)
infile.seek(0)
# reorder tables alphabetically
outfile = BytesIO()
reader = ttLib.sfnt.SFNTReader(infile)
writer = ttLib.sfnt.SFNTWriter(
outfile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData)
for tag in sorted(reader.keys()):
if tag in woff2TransformedTableTags + ('head',):
writer[tag] = normalise_table(font, tag, padding)
else:
writer[tag] = reader[tag]
writer.close()
# restore font attributes
font['DSIG'] = DSIG_copy
font.flavor = origFlavor
font.recalcBBoxes = origRecalcBBoxes
font.recalcTimestamp = origRecalcTimestamp
font.lazy = origLazy
return outfile.getvalue()
class WOFF2DirectoryEntryTest(unittest.TestCase):
def setUp(self):
self.entry = WOFF2DirectoryEntry()
def test_not_enough_data_table_flags(self):
with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'flags'"):
self.entry.fromString(b"")
def test_not_enough_data_table_tag(self):
incompleteData = bytearray([0x3F, 0, 0, 0])
with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'tag'"):
self.entry.fromString(bytes(incompleteData))
def test_table_reserved_flags(self):
with self.assertRaisesRegex(ttLib.TTLibError, "bits 6-7 are reserved"):
self.entry.fromString(bytechr(0xC0))
def test_loca_zero_transformLength(self):
data = bytechr(getKnownTagIndex('loca')) # flags
data += packBase128(random.randint(1, 100)) # origLength
data += packBase128(1) # non-zero transformLength
with self.assertRaisesRegex(
ttLib.TTLibError, "transformLength of the 'loca' table must be 0"):
self.entry.fromString(data)
def test_fromFile(self):
unknownTag = Tag('ZZZZ')
data = bytechr(getKnownTagIndex(unknownTag))
data += unknownTag.tobytes()
data += packBase128(random.randint(1, 100))
expectedPos = len(data)
f = BytesIO(data + b'\0'*100)
self.entry.fromFile(f)
self.assertEqual(f.tell(), expectedPos)
def test_transformed_toString(self):
self.entry.tag = Tag('glyf')
self.entry.flags = getKnownTagIndex(self.entry.tag)
self.entry.origLength = random.randint(101, 200)
self.entry.length = random.randint(1, 100)
expectedSize = (woff2FlagsSize + base128Size(self.entry.origLength) +
base128Size(self.entry.length))
data = self.entry.toString()
self.assertEqual(len(data), expectedSize)
def test_known_toString(self):
self.entry.tag = Tag('head')
self.entry.flags = getKnownTagIndex(self.entry.tag)
self.entry.origLength = 54
expectedSize = (woff2FlagsSize + base128Size(self.entry.origLength))
data = self.entry.toString()
self.assertEqual(len(data), expectedSize)
def test_unknown_toString(self):
self.entry.tag = Tag('ZZZZ')
self.entry.flags = woff2UnknownTagIndex
self.entry.origLength = random.randint(1, 100)
expectedSize = (woff2FlagsSize + woff2UnknownTagSize +
base128Size(self.entry.origLength))
data = self.entry.toString()
self.assertEqual(len(data), expectedSize)
class DummyReader(WOFF2Reader):
def __init__(self, file, checkChecksums=1, fontNumber=-1):
self.file = file
for attr in ('majorVersion', 'minorVersion', 'metaOffset', 'metaLength',
'metaOrigLength', 'privLength', 'privOffset'):
setattr(self, attr, 0)
class WOFF2FlavorDataTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
assert os.path.exists(METADATA)
with open(METADATA, 'rb') as f:
cls.xml_metadata = f.read()
cls.compressed_metadata = brotli.compress(cls.xml_metadata, mode=brotli.MODE_TEXT)
# make random byte strings; font data must be 4-byte aligned
cls.fontdata = bytes(bytearray(random.sample(range(0, 256), 80)))
cls.privData = bytes(bytearray(random.sample(range(0, 256), 20)))
def setUp(self):
self.file = BytesIO(self.fontdata)
self.file.seek(0, 2)
def test_get_metaData_no_privData(self):
self.file.write(self.compressed_metadata)
reader = DummyReader(self.file)
reader.metaOffset = len(self.fontdata)
reader.metaLength = len(self.compressed_metadata)
reader.metaOrigLength = len(self.xml_metadata)
flavorData = WOFF2FlavorData(reader)
self.assertEqual(self.xml_metadata, flavorData.metaData)
def test_get_privData_no_metaData(self):
self.file.write(self.privData)
reader = DummyReader(self.file)
reader.privOffset = len(self.fontdata)
reader.privLength = len(self.privData)
flavorData = WOFF2FlavorData(reader)
self.assertEqual(self.privData, flavorData.privData)
def test_get_metaData_and_privData(self):
self.file.write(self.compressed_metadata + self.privData)
reader = DummyReader(self.file)
reader.metaOffset = len(self.fontdata)
reader.metaLength = len(self.compressed_metadata)
reader.metaOrigLength = len(self.xml_metadata)
reader.privOffset = reader.metaOffset + reader.metaLength
reader.privLength = len(self.privData)
flavorData = WOFF2FlavorData(reader)
self.assertEqual(self.xml_metadata, flavorData.metaData)
self.assertEqual(self.privData, flavorData.privData)
def test_get_major_minorVersion(self):
reader = DummyReader(self.file)
reader.majorVersion = reader.minorVersion = 1
flavorData = WOFF2FlavorData(reader)
self.assertEqual(flavorData.majorVersion, 1)
self.assertEqual(flavorData.minorVersion, 1)
class WOFF2WriterTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False, flavor="woff2")
cls.font.importXML(OTX)
cls.tags = [t for t in cls.font.keys() if t != 'GlyphOrder']
cls.numTables = len(cls.tags)
cls.file = BytesIO(CFF_WOFF2.getvalue())
cls.file.seek(0, 2)
cls.length = (cls.file.tell() + 3) & ~3
cls.setUpFlavorData()
@classmethod
def setUpFlavorData(cls):
assert os.path.exists(METADATA)
with open(METADATA, 'rb') as f:
cls.xml_metadata = f.read()
cls.compressed_metadata = brotli.compress(cls.xml_metadata, mode=brotli.MODE_TEXT)
cls.privData = bytes(bytearray(random.sample(range(0, 256), 20)))
def setUp(self):
self.file.seek(0)
self.writer = WOFF2Writer(BytesIO(), self.numTables, self.font.sfntVersion)
def test_DSIG_dropped(self):
self.writer['DSIG'] = b"\0"
self.assertEqual(len(self.writer.tables), 0)
self.assertEqual(self.writer.numTables, self.numTables-1)
def test_no_rewrite_table(self):
self.writer['ZZZZ'] = b"\0"
with self.assertRaisesRegex(ttLib.TTLibError, "cannot rewrite"):
self.writer['ZZZZ'] = b"\0"
def test_num_tables(self):
self.writer['ABCD'] = b"\0"
with self.assertRaisesRegex(ttLib.TTLibError, "wrong number of tables"):
self.writer.close()
def test_required_tables(self):
font = ttLib.TTFont(flavor="woff2")
with self.assertRaisesRegex(ttLib.TTLibError, "missing required table"):
font.save(BytesIO())
def test_head_transform_flag(self):
headData = self.font.getTableData('head')
origFlags = byteord(headData[16])
woff2font = ttLib.TTFont(self.file)
newHeadData = woff2font.getTableData('head')
modifiedFlags = byteord(newHeadData[16])
self.assertNotEqual(origFlags, modifiedFlags)
restoredFlags = modifiedFlags & ~0x08 # turn off bit 11
self.assertEqual(origFlags, restoredFlags)
def test_tables_sorted_alphabetically(self):
expected = sorted([t for t in self.tags if t != 'DSIG'])
woff2font = ttLib.TTFont(self.file)
self.assertEqual(expected, list(woff2font.reader.keys()))
def test_checksums(self):
normFile = BytesIO(normalise_font(self.font, padding=4))
normFile.seek(0)
normFont = ttLib.TTFont(normFile, checkChecksums=2)
w2font = ttLib.TTFont(self.file)
# force reconstructing glyf table using 4-byte padding
w2font.reader.padding = 4
for tag in [t for t in self.tags if t != 'DSIG']:
w2data = w2font.reader[tag]
normData = normFont.reader[tag]
if tag == "head":
w2data = w2data[:8] + b'\0\0\0\0' + w2data[12:]
normData = normData[:8] + b'\0\0\0\0' + normData[12:]
w2CheckSum = ttLib.sfnt.calcChecksum(w2data)
normCheckSum = ttLib.sfnt.calcChecksum(normData)
self.assertEqual(w2CheckSum, normCheckSum)
normCheckSumAdjustment = normFont['head'].checkSumAdjustment
self.assertEqual(normCheckSumAdjustment, w2font['head'].checkSumAdjustment)
def test_calcSFNTChecksumsLengthsAndOffsets(self):
normFont = ttLib.TTFont(BytesIO(normalise_font(self.font, padding=4)))
for tag in self.tags:
self.writer[tag] = self.font.getTableData(tag)
self.writer._normaliseGlyfAndLoca(padding=4)
self.writer._setHeadTransformFlag()
self.writer.tables = OrderedDict(sorted(self.writer.tables.items()))
self.writer._calcSFNTChecksumsLengthsAndOffsets()
for tag, entry in normFont.reader.tables.items():
self.assertEqual(entry.offset, self.writer.tables[tag].origOffset)
self.assertEqual(entry.length, self.writer.tables[tag].origLength)
self.assertEqual(entry.checkSum, self.writer.tables[tag].checkSum)
def test_bad_sfntVersion(self):
for i in range(self.numTables):
self.writer[bytechr(65 + i)*4] = b"\0"
self.writer.sfntVersion = 'ZZZZ'
with self.assertRaisesRegex(ttLib.TTLibError, "bad sfntVersion"):
self.writer.close()
def test_calcTotalSize_no_flavorData(self):
expected = self.length
self.writer.file = BytesIO()
for tag in self.tags:
self.writer[tag] = self.font.getTableData(tag)
self.writer.close()
self.assertEqual(expected, self.writer.length)
self.assertEqual(expected, self.writer.file.tell())
def test_calcTotalSize_with_metaData(self):
expected = self.length + len(self.compressed_metadata)
flavorData = self.writer.flavorData = WOFF2FlavorData()
flavorData.metaData = self.xml_metadata
self.writer.file = BytesIO()
for tag in self.tags:
self.writer[tag] = self.font.getTableData(tag)
self.writer.close()
self.assertEqual(expected, self.writer.length)
self.assertEqual(expected, self.writer.file.tell())
def test_calcTotalSize_with_privData(self):
expected = self.length + len(self.privData)
flavorData = self.writer.flavorData = WOFF2FlavorData()
flavorData.privData = self.privData
self.writer.file = BytesIO()
for tag in self.tags:
self.writer[tag] = self.font.getTableData(tag)
self.writer.close()
self.assertEqual(expected, self.writer.length)
self.assertEqual(expected, self.writer.file.tell())
def test_calcTotalSize_with_metaData_and_privData(self):
metaDataLength = (len(self.compressed_metadata) + 3) & ~3
expected = self.length + metaDataLength + len(self.privData)
flavorData = self.writer.flavorData = WOFF2FlavorData()
flavorData.metaData = self.xml_metadata
flavorData.privData = self.privData
self.writer.file = BytesIO()
for tag in self.tags:
self.writer[tag] = self.font.getTableData(tag)
self.writer.close()
self.assertEqual(expected, self.writer.length)
self.assertEqual(expected, self.writer.file.tell())
def test_getVersion(self):
# no version
self.assertEqual((0, 0), self.writer._getVersion())
# version from head.fontRevision
fontRevision = self.font['head'].fontRevision
versionTuple = tuple(int(i) for i in str(fontRevision).split("."))
entry = self.writer.tables['head'] = ttLib.newTable('head')
entry.data = self.font.getTableData('head')
self.assertEqual(versionTuple, self.writer._getVersion())
# version from writer.flavorData
flavorData = self.writer.flavorData = WOFF2FlavorData()
flavorData.majorVersion, flavorData.minorVersion = (10, 11)
self.assertEqual((10, 11), self.writer._getVersion())
class WOFF2WriterTTFTest(WOFF2WriterTest):
@classmethod
def setUpClass(cls):
cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False, flavor="woff2")
cls.font.importXML(TTX)
cls.tags = [t for t in cls.font.keys() if t != 'GlyphOrder']
cls.numTables = len(cls.tags)
cls.file = BytesIO(TT_WOFF2.getvalue())
cls.file.seek(0, 2)
cls.length = (cls.file.tell() + 3) & ~3
cls.setUpFlavorData()
def test_normaliseGlyfAndLoca(self):
normTables = {}
for tag in ('head', 'loca', 'glyf'):
normTables[tag] = normalise_table(self.font, tag, padding=4)
for tag in self.tags:
tableData = self.font.getTableData(tag)
self.writer[tag] = tableData
if tag in normTables:
self.assertNotEqual(tableData, normTables[tag])
self.writer._normaliseGlyfAndLoca(padding=4)
self.writer._setHeadTransformFlag()
for tag in normTables:
self.assertEqual(self.writer.tables[tag].data, normTables[tag])
class WOFF2LocaTableTest(unittest.TestCase):
def setUp(self):
self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
font['head'] = ttLib.newTable('head')
font['loca'] = WOFF2LocaTable()
font['glyf'] = WOFF2GlyfTable()
def test_compile_short_loca(self):
locaTable = self.font['loca']
locaTable.set(list(range(0, 0x20000, 2)))
self.font['glyf'].indexFormat = 0
locaData = locaTable.compile(self.font)
self.assertEqual(len(locaData), 0x20000)
def test_compile_short_loca_overflow(self):
locaTable = self.font['loca']
locaTable.set(list(range(0x20000 + 1)))
self.font['glyf'].indexFormat = 0
with self.assertRaisesRegex(
ttLib.TTLibError, "indexFormat is 0 but local offsets > 0x20000"):
locaTable.compile(self.font)
def test_compile_short_loca_not_multiples_of_2(self):
locaTable = self.font['loca']
locaTable.set([1, 3, 5, 7])
self.font['glyf'].indexFormat = 0
with self.assertRaisesRegex(ttLib.TTLibError, "offsets not multiples of 2"):
locaTable.compile(self.font)
def test_compile_long_loca(self):
locaTable = self.font['loca']
locaTable.set(list(range(0x20001)))
self.font['glyf'].indexFormat = 1
locaData = locaTable.compile(self.font)
self.assertEqual(len(locaData), 0x20001 * 4)
def test_compile_set_indexToLocFormat_0(self):
locaTable = self.font['loca']
# offsets are all multiples of 2 and max length is < 0x10000
locaTable.set(list(range(0, 0x20000, 2)))
locaTable.compile(self.font)
newIndexFormat = self.font['head'].indexToLocFormat
self.assertEqual(0, newIndexFormat)
def test_compile_set_indexToLocFormat_1(self):
locaTable = self.font['loca']
# offsets are not multiples of 2
locaTable.set(list(range(10)))
locaTable.compile(self.font)
newIndexFormat = self.font['head'].indexToLocFormat
self.assertEqual(1, newIndexFormat)
# max length is >= 0x10000
locaTable.set(list(range(0, 0x20000 + 1, 2)))
locaTable.compile(self.font)
newIndexFormat = self.font['head'].indexToLocFormat
self.assertEqual(1, newIndexFormat)
class WOFF2GlyfTableTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
font.importXML(TTX)
cls.tables = {}
cls.transformedTags = ('maxp', 'head', 'loca', 'glyf')
for tag in reversed(cls.transformedTags): # compile in inverse order
cls.tables[tag] = font.getTableData(tag)
infile = BytesIO(TT_WOFF2.getvalue())
reader = WOFF2Reader(infile)
cls.transformedGlyfData = reader.tables['glyf'].loadData(
reader.transformBuffer)
cls.glyphOrder = ['.notdef'] + ["glyph%.5d" % i for i in range(1, font['maxp'].numGlyphs)]
def setUp(self):
self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False)
font.setGlyphOrder(self.glyphOrder)
font['head'] = ttLib.newTable('head')
font['maxp'] = ttLib.newTable('maxp')
font['loca'] = WOFF2LocaTable()
font['glyf'] = WOFF2GlyfTable()
for tag in self.transformedTags:
font[tag].decompile(self.tables[tag], font)
def test_reconstruct_glyf_padded_4(self):
glyfTable = WOFF2GlyfTable()
glyfTable.reconstruct(self.transformedGlyfData, self.font)
glyfTable.padding = 4
data = glyfTable.compile(self.font)
normGlyfData = normalise_table(self.font, 'glyf', glyfTable.padding)
self.assertEqual(normGlyfData, data)
def test_reconstruct_glyf_padded_2(self):
glyfTable = WOFF2GlyfTable()
glyfTable.reconstruct(self.transformedGlyfData, self.font)
glyfTable.padding = 2
data = glyfTable.compile(self.font)
normGlyfData = normalise_table(self.font, 'glyf', glyfTable.padding)
self.assertEqual(normGlyfData, data)
def test_reconstruct_glyf_unpadded(self):
glyfTable = WOFF2GlyfTable()
glyfTable.reconstruct(self.transformedGlyfData, self.font)
data = glyfTable.compile(self.font)
self.assertEqual(self.tables['glyf'], data)
def test_reconstruct_glyf_incorrect_glyphOrder(self):
glyfTable = WOFF2GlyfTable()
badGlyphOrder = self.font.getGlyphOrder()[:-1]
self.font.setGlyphOrder(badGlyphOrder)
with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"):
glyfTable.reconstruct(self.transformedGlyfData, self.font)
def test_reconstruct_glyf_missing_glyphOrder(self):
glyfTable = WOFF2GlyfTable()
del self.font.glyphOrder
numGlyphs = self.font['maxp'].numGlyphs
del self.font['maxp']
glyfTable.reconstruct(self.transformedGlyfData, self.font)
expected = [".notdef"]
expected.extend(["glyph%.5d" % i for i in range(1, numGlyphs)])
self.assertEqual(expected, glyfTable.glyphOrder)
def test_reconstruct_loca_padded_4(self):
locaTable = self.font['loca'] = WOFF2LocaTable()
glyfTable = self.font['glyf'] = WOFF2GlyfTable()
glyfTable.reconstruct(self.transformedGlyfData, self.font)
glyfTable.padding = 4
glyfTable.compile(self.font)
data = locaTable.compile(self.font)
normLocaData = normalise_table(self.font, 'loca', glyfTable.padding)
self.assertEqual(normLocaData, data)
def test_reconstruct_loca_padded_2(self):
locaTable = self.font['loca'] = WOFF2LocaTable()
glyfTable = self.font['glyf'] = WOFF2GlyfTable()
glyfTable.reconstruct(self.transformedGlyfData, self.font)
glyfTable.padding = 2
glyfTable.compile(self.font)
data = locaTable.compile(self.font)
normLocaData = normalise_table(self.font, 'loca', glyfTable.padding)
self.assertEqual(normLocaData, data)
def test_reconstruct_loca_unpadded(self):
locaTable = self.font['loca'] = WOFF2LocaTable()
glyfTable = self.font['glyf'] = WOFF2GlyfTable()
glyfTable.reconstruct(self.transformedGlyfData, self.font)
glyfTable.compile(self.font)
data = locaTable.compile(self.font)
self.assertEqual(self.tables['loca'], data)
def test_reconstruct_glyf_header_not_enough_data(self):
with self.assertRaisesRegex(ttLib.TTLibError, "not enough 'glyf' data"):
WOFF2GlyfTable().reconstruct(b"", self.font)
def test_reconstruct_glyf_table_incorrect_size(self):
msg = "incorrect size of transformed 'glyf'"
with self.assertRaisesRegex(ttLib.TTLibError, msg):
WOFF2GlyfTable().reconstruct(self.transformedGlyfData + b"\x00", self.font)
with self.assertRaisesRegex(ttLib.TTLibError, msg):
WOFF2GlyfTable().reconstruct(self.transformedGlyfData[:-1], self.font)
def test_transform_glyf(self):
glyfTable = self.font['glyf']
data = glyfTable.transform(self.font)
self.assertEqual(self.transformedGlyfData, data)
def test_transform_glyf_incorrect_glyphOrder(self):
glyfTable = self.font['glyf']
badGlyphOrder = self.font.getGlyphOrder()[:-1]
del glyfTable.glyphOrder
self.font.setGlyphOrder(badGlyphOrder)
with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"):
glyfTable.transform(self.font)
glyfTable.glyphOrder = badGlyphOrder
with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"):
glyfTable.transform(self.font)
def test_transform_glyf_missing_glyphOrder(self):
glyfTable = self.font['glyf']
del glyfTable.glyphOrder
del self.font.glyphOrder
numGlyphs = self.font['maxp'].numGlyphs
del self.font['maxp']
glyfTable.transform(self.font)
expected = [".notdef"]
expected.extend(["glyph%.5d" % i for i in range(1, numGlyphs)])
self.assertEqual(expected, glyfTable.glyphOrder)
def test_roundtrip_glyf_reconstruct_and_transform(self):
glyfTable = WOFF2GlyfTable()
glyfTable.reconstruct(self.transformedGlyfData, self.font)
data = glyfTable.transform(self.font)
self.assertEqual(self.transformedGlyfData, data)
def test_roundtrip_glyf_transform_and_reconstruct(self):
glyfTable = self.font['glyf']
transformedData = glyfTable.transform(self.font)
newGlyfTable = WOFF2GlyfTable()
newGlyfTable.reconstruct(transformedData, self.font)
newGlyfTable.padding = 4
reconstructedData = newGlyfTable.compile(self.font)
normGlyfData = normalise_table(self.font, 'glyf', newGlyfTable.padding)
self.assertEqual(normGlyfData, reconstructedData)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | 2,429,157,727,839,692,300 | 35.369826 | 92 | 0.740869 | false |
FrederichRiver/neutrino | applications/old_code/wasserschlange.py | 1 | 5137 | #!/usr/bin/python3
import torch
import torch.nn as nn
import torch.optim as optim
from mysql.libmysql8_dev import MySQLBase
"""
1.create network
2.create training set
3.training
4.test
5.running
"""
from events import EventStockPrice
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from data_feature import financeData, ma, ma26, MACD
import pywt
input_size = 4
hidden_size = 4*input_size
seq_len = 10
batch_size = 1
def wavelet_nr(df):
"""TODO: Docstring for wavelet_nr.
:df: TODO
:returns: TODO
"""
db4 = pywt.wavelet('db4')
if type(df) is not types.NoneType:
coeffs = pywt.wavedec(df, db4)
coeffs[-1] = 0
coeefs[-2] = 0
meta = pywt.waverec(coeffs, db4)
return meta
class LSTM101(nn.Module):
def __init__(self, *args, **kwargs):
super(LSTM101, self).__init__()
self.lstm = nn.LSTM(input_size,
hidden_size,
batch_first=True)
self.lstm2 = nn.LSTM(hidden_size,
hidden_size,
batch_first=True)
self.lstm3 = nn.LSTM(hidden_size,
hidden_size,
batch_first=True)
w1 = torch.zeros(1, batch_size, hidden_size)
h1 = torch.zeros(1, batch_size, hidden_size)
w2 = torch.zeros(1, batch_size, hidden_size)
h2 = torch.zeros(1, batch_size, hidden_size)
w3 = torch.zeros(1, batch_size, hidden_size)
h3 = torch.zeros(1, batch_size, hidden_size)
c1 = torch.zeros(1, batch_size, hidden_size)
c2 = torch.zeros(1, batch_size, hidden_size)
c3 = torch.zeros(1, batch_size, hidden_size)
nn.init.xavier_uniform_(w1, 1)
nn.init.xavier_uniform_(h1, 1)
nn.init.xavier_uniform_(w2, 1)
nn.init.xavier_uniform_(h2, 1)
nn.init.xavier_uniform_(w3, 1)
nn.init.xavier_uniform_(h3, 1)
self.predict = nn.Linear(hidden_size, 1)
self.hidden = (h1, c1)
self.hidden2 = (h2, c2)
self.hidden3 = (h3, c3)
def forward(self, x):
_out, _ = self.lstm(x, self.hidden)
#_out2, _ = self.lstm2(_out, self.hidden2)
#_out3, _ = self.lstm3(_out2, self.hidden3)
_out4 = self.predict(_out)
return _out4
class trainSet2(Dataset):
def __init__(self, x):
self.data = []
self.label = []
self.seq_len = seq_len
self._run(x)
def __getitem__(self, i):
return self.data[i], self.label[i]
def __len__(self):
return len(self.data)
def _run(self, x):
n = int(len(x)/(self.seq_len+1))
for i in range(n):
t = i * self.seq_len
self.data.append(x[t:t+self.seq_len-1, :4])
self.label.append(x[t+1:t+self.seq_len, -1])
# print(self.data[0],self.label[0])
return self.data, self.label
def get_stock_data(stock_code):
"""TODO: Docstring for get_stock_data.
:returns: TODO
"""
n = input_size
fd = financeData()
prices = fd._get_stock_data(stock_code,
'close_price, open_price, high_price, low_price')
prices = ma(prices, 7)
prices = ma26(prices)
prices = MACD(prices)
prices['result'] = prices['close_price'].shift(-1)
# print(prices.head)
return prices
def model_create(first=True):
if first:
model = LSTM101(input_size, hidden_size, batch_first=True)
else:
model = torch.load('lstm.pkl')
return model
def data_get():
"""TODO: Docstring for data_get.
:returns: TODO
"""
prices = get_stock_data('SH600001')
prices = prices.as_matrix()
t = torch.from_numpy(prices).float()
return t
def training(model, t):
import time
for param in model.parameters():
param.requires_grad = True
model.train(mode=True)
loss_function = nn.MSELoss()
train_set = trainSet2(t)
train_data = DataLoader(train_set,
batch_size=batch_size,
shuffle=True,
drop_last=True)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
EPOCH = 20
for epoch in range(EPOCH):
for step, (x, y) in enumerate(train_data):
result = model(x)
optimizer.zero_grad
loss = loss_function(torch.squeeze(result), torch.squeeze(y))
loss.backward()
optimizer.step()
print("EPOCH:{0}".format(epoch), loss,
time.strftime('%H:%M:%S', time.localtime()))
torch.save(model, 'lstm.pkl')
torch.save(model, 'lstm.pkl')
return model
def testing(model, t):
"""TODO: Docstring for testing.
:returns: TODO
"""
model.eval
test_set = trainSet2(t)
test_data = DataLoader(test_set, batch_size)
for x, y in test_data:
result = model(x)
print(result)
print(y)
break
if __name__ == '__main__':
model = model_create(first=True)
t = data_get()
training(model, t)
testing(model, t)
| bsd-3-clause | 2,432,283,490,311,748,600 | 26.470588 | 81 | 0.559081 | false |
srkiyengar/NewGripper | src/screen_print.py | 1 | 1424 | __author__ = 'srkiyengar'
import pygame
#Acknowledgement - code modified from http://www.pygame.org/docs/ sample
# Define some colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
RED = (255, 0, 0)
# This is a simple class that will help us print to the screen
# It has nothing to do with the joysticks, just outputing the
# information on a screen (pygame.display)
class TextPrint:
def __init__(self):
self.reset()
self.font = pygame.font.Font(None, 20)
def Screenprint(self, screen, textString):
textBitmap = self.font.render(textString, True, BLACK)
screen.blit(textBitmap, [self.x, self.y])
self.y += self.line_height
def reset(self):
self.x = 10
self.y = 10
self.line_height = 15
def indent(self):
self.x += 10
def unindent(self):
self.x -= 10
def Yspace(self):
self.y += 10
class CounterPrint:
def __init__(self):
self.reset()
self.font = pygame.font.Font(None, 100)
def Screenprint(self, screen, textString):
textBitmap = self.font.render(textString, True, RED)
screen.blit(textBitmap, [self.x, self.y])
def reset(self):
self.x = 10
self.y = 350
self.line_height = 65
def indent(self):
self.x += 10
def unindent(self):
self.x -= 10
def Yspace(self):
self.y += 100
| mit | 209,540,489,297,353,800 | 20.253731 | 72 | 0.582163 | false |
cvium/Flexget | flexget/plugins/input/rss.py | 1 | 21300 | from __future__ import unicode_literals, division, absolute_import
import os
import logging
import urlparse
import xml.sax
import posixpath
import httplib
from datetime import datetime
import dateutil.parser
import feedparser
from requests import RequestException
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.cached_input import cached
from flexget.utils.tools import decode_html
from flexget.utils.pathscrub import pathscrub
log = logging.getLogger('rss')
feedparser.registerDateHandler(lambda date_string: dateutil.parser.parse(date_string).timetuple())
def fp_field_name(name):
"""Translates literal field name to the sanitized one feedparser will use."""
return name.replace(':', '_').lower()
class InputRSS(object):
"""
Parses RSS feed.
Hazzlefree configuration for public rss feeds::
rss: <url>
Configuration with basic http authentication::
rss:
url: <url>
username: <name>
password: <password>
Advanced usages:
You may wish to clean up the entry by stripping out all non-ascii characters.
This can be done by setting ascii value to yes.
Example::
rss:
url: <url>
ascii: yes
In case RSS-feed uses some nonstandard field for urls and automatic detection fails
you can configure plugin to use url from any feedparser entry attribute.
Example::
rss:
url: <url>
link: guid
If you want to keep information in another rss field attached to the flexget entry,
you can use the other_fields option.
Example::
rss:
url: <url>
other_fields: [date]
You can disable few possibly annoying warnings by setting silent value to
yes on feeds where there are frequently invalid items.
Example::
rss:
url: <url>
silent: yes
You can group all the links of an item, to make the download plugin tolerant
to broken urls: it will try to download each url until one works.
Links are enclosures plus item fields given by the link value, in that order.
The value to set is "group_links".
Example::
rss:
url: <url>
group_links: yes
"""
schema = {
'type': ['string', 'object'],
# Simple form, just url or file
'anyOf': [{'format': 'url'}, {'format': 'file'}],
# Advanced form, with options
'properties': {
'url': {'type': 'string', 'anyOf': [{'format': 'url'}, {'format': 'file'}]},
'username': {'type': 'string'},
'password': {'type': 'string'},
'title': {'type': 'string'},
'link': one_or_more({'type': 'string'}),
'silent': {'type': 'boolean', 'default': False},
'ascii': {'type': 'boolean', 'default': False},
'filename': {'type': 'boolean'},
'group_links': {'type': 'boolean', 'default': False},
'all_entries': {'type': 'boolean', 'default': True},
'other_fields': {'type': 'array', 'items': {
# Items can be a string, or a dict with a string value
'type': ['string', 'object'], 'additionalProperties': {'type': 'string'}
}}
},
'required': ['url'],
'additionalProperties': False
}
def build_config(self, config):
"""Set default values to config"""
if isinstance(config, basestring):
config = {'url': config}
else:
# Make a copy so that original config is not modified
config = dict(config)
# set the default link value to 'auto'
config.setdefault('link', 'auto')
# Convert any field names from the config to format feedparser will use for 'link', 'title' and 'other_fields'
if config['link'] != 'auto':
if not isinstance(config['link'], list):
config['link'] = [config['link']]
config['link'] = map(fp_field_name, config['link'])
config.setdefault('title', 'title')
config['title'] = fp_field_name(config['title'])
if config.get('other_fields'):
other_fields = []
for item in config['other_fields']:
if isinstance(item, basestring):
key, val = item, item
else:
key, val = item.items()[0]
other_fields.append({fp_field_name(key): val.lower()})
config['other_fields'] = other_fields
# set default value for group_links as deactivated
config.setdefault('group_links', False)
# set default for all_entries
config.setdefault('all_entries', True)
return config
def process_invalid_content(self, task, data, url):
"""If feedparser reports error, save the received data and log error."""
if data is None:
log.critical('Received empty page - no content')
return
ext = 'xml'
if b'<html>' in data.lower():
log.critical('Received content is HTML page, not an RSS feed')
ext = 'html'
if b'login' in data.lower() or b'username' in data.lower():
log.critical('Received content looks a bit like login page')
if b'error' in data.lower():
log.critical('Received content looks a bit like error page')
received = os.path.join(task.manager.config_base, 'received')
if not os.path.isdir(received):
os.mkdir(received)
filename = task.name
sourcename = urlparse.urlparse(url).netloc
if sourcename:
filename += '-' + sourcename
filename = pathscrub(filename, filename=True)
filepath = os.path.join(received, '%s.%s' % (filename, ext))
with open(filepath, 'w') as f:
f.write(data)
log.critical('I have saved the invalid content to %s for you to view', filepath)
def add_enclosure_info(self, entry, enclosure, filename=True, multiple=False):
"""Stores information from an rss enclosure into an Entry."""
entry['url'] = enclosure['href']
# get optional meta-data
if 'length' in enclosure:
try:
entry['size'] = int(enclosure['length'])
except:
entry['size'] = 0
if 'type' in enclosure:
entry['type'] = enclosure['type']
# TODO: better and perhaps join/in download plugin?
# Parse filename from enclosure url
basename = posixpath.basename(urlparse.urlsplit(entry['url']).path)
# If enclosure has size OR there are multiple enclosures use filename from url
if (entry.get('size') or multiple and basename) and filename:
entry['filename'] = basename
log.trace('filename `%s` from enclosure', entry['filename'])
@cached('rss')
@plugin.internet(log)
def on_task_input(self, task, config):
config = self.build_config(config)
log.debug('Requesting task `%s` url `%s`', task.name, config['url'])
# Used to identify which etag/modified to use
url_hash = str(hash(config['url']))
# set etag and last modified headers if config has not changed since
# last run and if caching wasn't disabled with --no-cache argument.
all_entries = (config['all_entries'] or task.config_modified or
task.options.nocache or task.options.retry)
headers = {}
if not all_entries:
etag = task.simple_persistence.get('%s_etag' % url_hash, None)
if etag:
log.debug('Sending etag %s for task %s', etag, task.name)
headers['If-None-Match'] = etag
modified = task.simple_persistence.get('%s_modified' % url_hash, None)
if modified:
if not isinstance(modified, basestring):
log.debug('Invalid date was stored for last modified time.')
else:
headers['If-Modified-Since'] = modified
log.debug('Sending last-modified %s for task %s', headers['If-Modified-Since'], task.name)
# Get the feed content
if config['url'].startswith(('http', 'https', 'ftp', 'file')):
# Get feed using requests library
auth = None
if 'username' in config and 'password' in config:
auth = (config['username'], config['password'])
try:
# Use the raw response so feedparser can read the headers and status values
response = task.requests.get(config['url'], timeout=60, headers=headers, raise_status=False, auth=auth)
content = response.content
except RequestException as e:
raise plugin.PluginError('Unable to download the RSS for task %s (%s): %s' %
(task.name, config['url'], e))
if config.get('ascii'):
# convert content to ascii (cleanup), can also help with parsing problems on malformed feeds
content = response.text.encode('ascii', 'ignore')
# status checks
status = response.status_code
if status == 304:
log.verbose('%s hasn\'t changed since last run. Not creating entries.', config['url'])
# Let details plugin know that it is ok if this feed doesn't produce any entries
task.no_entries_ok = True
return []
elif status == 401:
raise plugin.PluginError('Authentication needed for task %s (%s): %s' %
(task.name, config['url'], response.headers['www-authenticate']), log)
elif status == 404:
raise plugin.PluginError('RSS Feed %s (%s) not found' % (task.name, config['url']), log)
elif status == 500:
raise plugin.PluginError('Internal server exception on task %s (%s)' % (task.name, config['url']), log)
elif status != 200:
raise plugin.PluginError('HTTP error %s received from %s' % (status, config['url']), log)
# update etag and last modified
if not config['all_entries']:
etag = response.headers.get('etag')
if etag:
task.simple_persistence['%s_etag' % url_hash] = etag
log.debug('etag %s saved for task %s', etag, task.name)
if response.headers.get('last-modified'):
modified = response.headers['last-modified']
task.simple_persistence['%s_modified' % url_hash] = modified
log.debug('last modified %s saved for task %s', modified, task.name)
else:
# This is a file, open it
with open(config['url'], 'rb') as f:
content = f.read()
if config.get('ascii'):
# Just assuming utf-8 file in this case
content = content.decode('utf-8', 'ignore').encode('ascii', 'ignore')
if not content:
log.error('No data recieved for rss feed.')
return
try:
rss = feedparser.parse(content)
except LookupError as e:
raise plugin.PluginError('Unable to parse the RSS (from %s): %s' % (config['url'], e))
# check for bozo
ex = rss.get('bozo_exception', False)
if ex or rss.get('bozo'):
if rss.entries:
msg = 'Bozo error %s while parsing feed, but entries were produced, ignoring the error.' % type(ex)
if config.get('silent', False):
log.debug(msg)
else:
log.verbose(msg)
else:
if isinstance(ex, feedparser.NonXMLContentType):
# see: http://www.feedparser.org/docs/character-encoding.html#advanced.encoding.nonxml
log.debug('ignoring feedparser.NonXMLContentType')
elif isinstance(ex, feedparser.CharacterEncodingOverride):
# see: ticket 88
log.debug('ignoring feedparser.CharacterEncodingOverride')
elif isinstance(ex, UnicodeEncodeError):
raise plugin.PluginError('Feed has UnicodeEncodeError while parsing...')
elif isinstance(ex, (xml.sax._exceptions.SAXParseException, xml.sax._exceptions.SAXException)):
# save invalid data for review, this is a bit ugly but users seem to really confused when
# html pages (login pages) are received
self.process_invalid_content(task, content, config['url'])
if task.options.debug:
log.error('bozo error parsing rss: %s' % ex)
raise plugin.PluginError('Received invalid RSS content from task %s (%s)' % (task.name,
config['url']))
elif isinstance(ex, httplib.BadStatusLine) or isinstance(ex, IOError):
raise ex # let the @internet decorator handle
else:
# all other bozo errors
self.process_invalid_content(task, content, config['url'])
raise plugin.PluginError('Unhandled bozo_exception. Type: %s (task: %s)' %
(ex.__class__.__name__, task.name), log)
log.debug('encoding %s', rss.encoding)
last_entry_id = ''
if not all_entries:
# Test to make sure entries are in descending order
if rss.entries and rss.entries[0].get('published_parsed') and rss.entries[-1].get('published_parsed'):
if rss.entries[0]['published_parsed'] < rss.entries[-1]['published_parsed']:
# Sort them if they are not
rss.entries.sort(key=lambda x: x['published_parsed'], reverse=True)
last_entry_id = task.simple_persistence.get('%s_last_entry' % url_hash)
# new entries to be created
entries = []
# Dict with fields to grab mapping from rss field name to FlexGet field name
fields = {'guid': 'guid',
'author': 'author',
'description': 'description',
'infohash': 'torrent_info_hash'}
# extend the dict of fields to grab with other_fields list in config
for field_map in config.get('other_fields', []):
fields.update(field_map)
# field name for url can be configured by setting link.
# default value is auto but for example guid is used in some feeds
ignored = 0
for entry in rss.entries:
# Check if title field is overridden in config
title_field = config.get('title', 'title')
# ignore entries without title
if not entry.get(title_field):
log.debug('skipping entry without title')
ignored += 1
continue
# Set the title from the source field
entry.title = entry[title_field]
# Check we haven't already processed this entry in a previous run
if last_entry_id == entry.title + entry.get('guid', ''):
log.verbose('Not processing entries from last run.')
# Let details plugin know that it is ok if this task doesn't produce any entries
task.no_entries_ok = True
break
# remove annoying zero width spaces
entry.title = entry.title.replace(u'\u200B', u'')
# helper
# TODO: confusing? refactor into class member ...
def add_entry(ea):
ea['title'] = entry.title
# fields dict may be modified during this loop, so loop over a copy (fields.items())
for rss_field, flexget_field in fields.items():
if rss_field in entry:
if not isinstance(getattr(entry, rss_field), basestring):
# Error if this field is not a string
log.error('Cannot grab non text field `%s` from rss.', rss_field)
# Remove field from list of fields to avoid repeated error
del fields[rss_field]
continue
if not getattr(entry, rss_field):
log.debug('Not grabbing blank field %s from rss for %s.', rss_field, ea['title'])
continue
try:
ea[flexget_field] = decode_html(entry[rss_field])
if rss_field in config.get('other_fields', []):
# Print a debug message for custom added fields
log.debug('Field `%s` set to `%s` for `%s`', rss_field, ea[rss_field], ea['title'])
except UnicodeDecodeError:
log.warning('Failed to decode entry `%s` field `%s`', ea['title'], rss_field)
# Also grab pubdate if available
if hasattr(entry, 'published_parsed') and entry.published_parsed:
ea['rss_pubdate'] = datetime(*entry.published_parsed[:6])
# store basic auth info
if 'username' in config and 'password' in config:
ea['download_auth'] = (config['username'], config['password'])
entries.append(ea)
# create from enclosures if present
enclosures = entry.get('enclosures', [])
if len(enclosures) > 1 and not config.get('group_links'):
# There is more than 1 enclosure, create an Entry for each of them
log.debug('adding %i entries from enclosures', len(enclosures))
for enclosure in enclosures:
if 'href' not in enclosure:
log.debug('RSS-entry `%s` enclosure does not have URL', entry.title)
continue
# There is a valid url for this enclosure, create an Entry for it
ee = Entry()
self.add_enclosure_info(ee, enclosure, config.get('filename', True), True)
add_entry(ee)
# If we created entries for enclosures, we should not create an Entry for the main rss item
continue
# create flexget entry
e = Entry()
if not isinstance(config.get('link'), list):
# If the link field is not a list, search for first valid url
if config['link'] == 'auto':
# Auto mode, check for a single enclosure url first
if len(entry.get('enclosures', [])) == 1 and entry['enclosures'][0].get('href'):
self.add_enclosure_info(e, entry['enclosures'][0], config.get('filename', True))
else:
# If there is no enclosure url, check link, then guid field for urls
for field in ['link', 'guid']:
if entry.get(field):
e['url'] = entry[field]
break
else:
if entry.get(config['link']):
e['url'] = entry[config['link']]
else:
# If link was passed as a list, we create a list of urls
for field in config['link']:
if entry.get(field):
e.setdefault('url', entry[field])
if entry[field] not in e.setdefault('urls', []):
e['urls'].append(entry[field])
if config.get('group_links'):
# Append a list of urls from enclosures to the urls field if group_links is enabled
e.setdefault('urls', [e['url']]).extend(
[enc.href for enc in entry.get('enclosures', []) if enc.get('href') not in e['urls']])
if not e.get('url'):
log.debug('%s does not have link (%s) or enclosure', entry.title, config['link'])
ignored += 1
continue
add_entry(e)
# Save last spot in rss
if rss.entries:
log.debug('Saving location in rss feed.')
try:
task.simple_persistence['%s_last_entry' % url_hash] = (rss.entries[0].title +
rss.entries[0].get('guid', ''))
except AttributeError:
log.debug('rss feed location saving skipped: no title information in first entry')
if ignored:
if not config.get('silent'):
log.warning('Skipped %s RSS-entries without required information (title, link or enclosures)', ignored)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(InputRSS, 'rss', api_ver=2)
| mit | -5,759,711,510,930,233,000 | 43.747899 | 119 | 0.546432 | false |
open-aerospace/barrowman | docs/conf.py | 1 | 8416 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# barrowman documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import barrowman
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'barrowman'
copyright = u'2016, Nathan Bergey et al.'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = barrowman.__version__
# The full version, including alpha/beta/rc tags.
release = barrowman.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'theme'
html_theme_path = ['.'] # make sphinx search for themes in current dir
html_add_permalinks = ""
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'barrowmandoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'barrowman.tex',
u'barrowman Documentation',
u'Nathan Bergey et al.', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'barrowman',
u'barrowman Documentation',
[u'Nathan Bergey et al.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'barrowman',
u'barrowman Documentation',
u'Nathan Bergey et al.',
'barrowman',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 | 7,253,371,041,730,075,000 | 29.715328 | 75 | 0.704373 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.