repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
n3wb13/OpenNfrGui-5.0-1 | lib/python/Plugins/Extensions/NFR4XBoot/ubi_reader/ui/common.py | 5 | 3812 | import os
from ubi_io import leb_virtual_file
from ubifs import ubifs, walk, output
from ubifs.defines import PRINT_UBIFS_KEY_HASH, PRINT_UBIFS_COMPR
from ubi.defines import PRINT_VOL_TYPE_LIST, UBI_VTBL_AUTORESIZE_FLG
output_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'output')
def extract_files(ubifs, out_path, perms = False):
try:
inodes = {}
walk.index(ubifs, ubifs.master_node.root_lnum, ubifs.master_node.root_offs, inodes)
for dent in inodes[1]['dent']:
output.dents(ubifs, inodes, dent, out_path, perms)
except Exception as e:
import traceback
ubifs.log.write('%s' % e)
traceback.print_exc()
def get_ubi_params(ubi):
ubi_flags = {'min_io_size': '-m',
'max_bud_bytes': '-j',
'leb_size': '-e',
'default_compr': '-x',
'sub_page_size': '-s',
'fanout': '-f',
'key_hash': '-k',
'orph_lebs': '-p',
'log_lebs': '-l',
'max_leb_cnt': '-c',
'peb_size': '-p',
'sub_page_size': '-s',
'vid_hdr_offset': '-O',
'version': '-x',
'image_seq': '-Q',
'alignment': '-a',
'vol_id': '-n',
'name': '-N'}
ubi_params = {}
ubi_args = {}
ini_params = {}
for image in ubi.images:
img_seq = image.image_seq
ubi_params[img_seq] = {}
ubi_args[img_seq] = {}
ini_params[img_seq] = {}
for volume in image.volumes:
ubi_args[img_seq][volume] = {}
ini_params[img_seq][volume] = {}
ini_params[img_seq][volume]['vol_type'] = PRINT_VOL_TYPE_LIST[image.volumes[volume].vol_rec.vol_type]
if image.volumes[volume].vol_rec.flags == UBI_VTBL_AUTORESIZE_FLG:
ini_params[img_seq][volume]['vol_flags'] = 'autoresize'
else:
ini_params[img_seq][volume]['vol_flags'] = image.volumes[volume].vol_rec.flags
ini_params[img_seq][volume]['vol_id'] = image.volumes[volume].vol_id
ini_params[img_seq][volume]['vol_name'] = image.volumes[volume].name.rstrip('\x00')
ini_params[img_seq][volume]['vol_alignment'] = image.volumes[volume].vol_rec.alignment
ini_params[img_seq][volume]['vol_size'] = image.volumes[volume].vol_rec.reserved_pebs * ubi.leb_size
ufsfile = leb_virtual_file(ubi, image.volumes[volume])
uubifs = ubifs(ufsfile)
for key, value in uubifs.superblock_node:
if key == 'key_hash':
value = PRINT_UBIFS_KEY_HASH[value]
elif key == 'default_compr':
value = PRINT_UBIFS_COMPR[value]
if key in ubi_flags:
ubi_args[img_seq][volume][key] = value
for key, value in image.volumes[volume].vol_rec:
if key == 'name':
value = value.rstrip('\x00')
if key in ubi_flags:
ubi_args[img_seq][volume][key] = value
ubi_args[img_seq][volume]['version'] = image.version
ubi_args[img_seq][volume]['vid_hdr_offset'] = image.vid_hdr_offset
ubi_args[img_seq][volume]['sub_page_size'] = ubi_args[img_seq][volume]['vid_hdr_offset']
ubi_args[img_seq][volume]['sub_page_size'] = ubi_args[img_seq][volume]['vid_hdr_offset']
ubi_args[img_seq][volume]['image_seq'] = image.image_seq
ubi_args[img_seq][volume]['peb_size'] = ubi.peb_size
ubi_args[img_seq][volume]['vol_id'] = image.volumes[volume].vol_id
ubi_params[img_seq][volume] = {'flags': ubi_flags,
'args': ubi_args[img_seq][volume],
'ini': ini_params[img_seq][volume]}
return ubi_params | gpl-2.0 | 6,523,534,966,752,915,000 | 41.83908 | 113 | 0.543284 | false |
rosenvladimirov/addons | sale_partial_invoicing/tests/test_sale_partial_invoicing.py | 1 | 14964 | # -*- coding: utf-8 -*-
#
##############################################################################
#
# Authors: Adrien Peiffer
# Copyright (c) 2015 Acsone SA/NV (http://www.acsone.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
from openerp import workflow
from openerp.tools.safe_eval import safe_eval
class testSalePartialInvoicing(common.TransactionCase):
def setUp(self):
super(testSalePartialInvoicing, self).setUp()
# tests are called before register_hook
self.env['ir.rule']._register_hook()
self.context = self.env['res.users'].context_get()
self.so_obj = self.env['sale.order']
self.so_line_obj = self.env['sale.order.line']
self.inv_line_obj = self.env['account.invoice.line']
def common_test(self):
self.sale_order = self.env.ref('sale.sale_order_1')
# I change invoice method to 'based on sale order line'
self.sale_order.invoice_method = 'manual'
# I change the quantity on the first line to 10
self.sale_order.order_line[0].product_qty = 10
# I confirm the sale order
workflow.trg_validate(self.uid, 'sale.order',
self.sale_order.id, 'sale_confirm',
self.cr)
# I check if the sale order is confirmed
self.sale_order.invalidate_cache()
self.assertEqual(self.sale_order.state, 'approved',
"Sale order's state isn't correct")
# I get lines to invoiced
sale_lines = self.sale_order.order_line
# I get menu item action
menu = self.env.ref('sale.sale_line_form_action2')
self.domain = safe_eval(menu.domain)
self.domain.extend([('order_id', '=', self.sale_order.id),
('fully_invoiced', '=', False)])
sale_line_domain = self.so_line_obj.search(self.domain)
# I check if all lines is on the view's result
self.assertEqual(sale_line_domain, sale_lines,
"lines aren't on the menu")
def test_invoice_complete_po(self):
self.common_test()
# I create the wizard to invoiced lines
ctx = self.context.copy()
ctx.update({'active_ids': self.sale_order.order_line.ids})
wizard = self.env['sale.order.line_invoice']\
.with_context(ctx).create({})
# I click on make invoice button
wizard.with_context(ctx).makeInvoices()
# I check if all lines are invoiced
invoice_lines = self.inv_line_obj\
.search([('sale_line_id', 'in', self.sale_order.ids)])
self.assertEqual(len(invoice_lines), len(self.sale_order),
"All of sale lines aren't invoiced")
# I check if the lines are no longer on the menu
sale_line_domain = self.so_line_obj.search(self.domain)
self.assertEqual(len(sale_line_domain), 0,
"Lines are still present")
def test_invoice_partial_so(self):
self.common_test()
line_to_invoice = self.sale_order.order_line[0]
ctx = self.context.copy()
ctx.update({'active_ids': line_to_invoice.id})
wizard = self.env['sale.order.line_invoice']\
.with_context(ctx).create({})
# I click on make invoice button
wizard.with_context(ctx).makeInvoices()
# I check if only the line that I chose is invoiced
invoice_lines = self.inv_line_obj\
.search([('sale_line_id', 'in', self.sale_order.ids)])
self.assertEqual(len(invoice_lines), 1,
"Number of invoiced lines isn't correct")
def test_invoice_partial_line(self):
self.common_test()
quantity_to_invoiced = 5
line_to_invoice = self.sale_order.order_line[0]
ctx = self.context.copy()
ctx.update({'active_ids': line_to_invoice.id})
wizard = self.env['sale.order.line_invoice']\
.with_context(ctx).create({})
# I change the quantity on the line that will be invoiced
wizard.line_ids[0].invoiced_qty = quantity_to_invoiced
# I click on make invoice button
wizard.with_context(ctx).makeInvoices()
# I check if only the line that I chose is invoiced
invoice_line = self.inv_line_obj\
.search([('sale_line_id', 'in', self.sale_order.ids)])
self.assertEqual(len(invoice_line), 1,
"Number of invoiced lines isn't correct")
# I check if the quantity on the invoice line is correct
self.assertEqual(invoice_line.quantity, quantity_to_invoiced,
"Quantity on invoice line isn't correct")
# I check invoiced quantity on the sale order line
self.assertEqual(line_to_invoice.invoiced_qty, invoice_line.quantity,
"Invoiced quantity isn't the same as on invoice line")
# I change the quantity on the invoice line
invoice_line.write({'quantity': quantity_to_invoiced - 1})
# I check invoiced quantity on the sale order line
self.assertEqual(line_to_invoice.invoiced_qty, invoice_line.quantity,
"Invoiced quantity isn't the same as on invoice line")
# I remove invoice line
invoice_line.unlink()
# I check invoiced quantity on the sale order line
self.assertEqual(line_to_invoice.invoiced_qty, 0,
"Invoiced quantity isn't the same as on invoice line")
def test_invoiced_status(self):
self.common_test()
# I select a line to do a partial invoicing
line_to_partial = self.sale_order.order_line[0]
# I create a wizard to invoiced all lines
ctx = self.context.copy()
ctx.update({'active_ids': self.sale_order.order_line.ids})
wizard = self.env['sale.order.line_invoice']\
.with_context(ctx).create({})
# I find the prepare line to invoiced
prepare_line = self.env['sale.order.line_invoice.line']\
.search([('so_line_id', '=', line_to_partial.id),
('wizard_id', '=', wizard.id)])
# i check if I found a line
self.assertEqual(len(prepare_line), 1,
"Number of prepare line isn't correct")
# I change the quantity to invoice on this line
prepare_line.invoiced_qty = line_to_partial.product_qty - 1
# I click on make invoice button
wizard.with_context(ctx).makeInvoices()
# I check if only the line that I chose is invoiced
invoice_lines = self.inv_line_obj\
.search([('sale_line_id', 'in', self.sale_order.ids)])
self.assertEqual(len(invoice_lines), 1,
"Number of invoiced lines isn't correct")
# I check if all lines are invoiced
invoice_lines = self.inv_line_obj\
.search([('sale_line_id', 'in', self.sale_order.ids)])
self.assertEqual(len(invoice_lines), len(self.sale_order),
"All of sale lines aren't invoiced")
# I check if the partial line is still present
sale_line_domain = self.so_line_obj.search(self.domain)
self.assertEqual(len(sale_line_domain), 1,
"Lines are still present")
self.assertEqual(sale_line_domain, line_to_partial,
"line on the menu isn't the same than invoiced line")
# I get the created invoice
invoice = invoice_lines[0].invoice_id
# I validate the invoice
workflow.trg_validate(self.uid, 'account.invoice',
invoice.id, 'invoice_open',
self.cr)
# I check invoice's state
self.assertEqual(invoice.state, 'open',
"invoice's state isn't correct")
# I check if the partial line isn't flag as invoiced
self.assertFalse(line_to_partial.invoiced)
# I check if the sale order isn't flag as invoiced
self.assertFalse(self.sale_order.invoiced)
# I invoice the last line
ctx = self.context.copy()
ctx.update({'active_ids': line_to_partial.id})
wizard = self.env['sale.order.line_invoice']\
.with_context(ctx).create({})
# I click on make invoice button
wizard.with_context(ctx).makeInvoices()
# I check if the lines are no longer on the menu
sale_line_domain = self.so_line_obj.search(self.domain)
self.assertEqual(len(sale_line_domain), 0,
"Lines are still present")
# I check if the line is invoiced
invoice_lines = self.inv_line_obj\
.search([('sale_line_id', '=', line_to_partial.id),
('invoice_id.state', '=', 'draft')])
self.assertEqual(len(invoice_lines), 1,
"Number of invoiced lines isn't correct")
# I get the created invoice
invoice = invoice_lines.invoice_id
# I validate the invoice
workflow.trg_validate(self.uid, 'account.invoice',
invoice.id, 'invoice_open',
self.cr)
# I check invoice's state
self.assertEqual(invoice.state, 'open',
"invoice's state isn't correct")
# I check if the partial line is flag as invoiced
self.assertTrue(line_to_partial.invoiced)
# I check if the sale order is flag as invoiced
self.assertTrue(self.sale_order.invoiced)
def test_cancel_quantity(self):
self.common_test()
quantity_to_invoice = 4
line_to_invoice = self.sale_order.order_line[0]
ctx = self.context.copy()
ctx.update({'active_ids': line_to_invoice.id})
wizard = self.env['sale.order.line_invoice']\
.with_context(ctx).create({})
# I change the quantity on the line that will be invoiced
wizard.line_ids[0].invoiced_qty = quantity_to_invoice
# I click on make invoice button
wizard.with_context(ctx).makeInvoices()
# I cancel quantity
quantity_to_cancel = 4
wizard = self.env['sale.order.line_cancel_quantity']\
.with_context(ctx).create({})
# I change the quantity on the line that will be cancelled
wizard.line_ids[0].cancelled_qty = quantity_to_cancel
wizard.cancel_quantity()
# I check the cancelled quantity on sale line
self.assertEqual(line_to_invoice.cancelled_qty, quantity_to_cancel)
# I check the quantity to invoiced
max_quantity = line_to_invoice.product_qty -\
line_to_invoice.invoiced_qty - line_to_invoice.cancelled_qty
wizard = self.env['sale.order.line_invoice']\
.with_context(ctx).create({})
self.assertEqual(wizard.line_ids[0].product_qty, max_quantity)
# I make an invoice for the remaining quantity
wizard.line_ids[0].invoiced_qty = max_quantity
wizard.with_context(ctx).makeInvoices()
# I check if the line is fully invoiced
self.assertTrue(line_to_invoice.fully_invoiced)
def test_cancel_full_line(self):
self.common_test()
line_to_cancel = self.sale_order.order_line[0]
lines_to_invoice = self.sale_order.order_line\
.filtered(lambda r: r.id != line_to_cancel.id)
ctx = self.context.copy()
ctx.update({'active_ids': lines_to_invoice.ids})
wizard = self.env['sale.order.line_invoice']\
.with_context(ctx).create({})
# I click on make invoice button
wizard.with_context(ctx).makeInvoices()
ctx.update({'active_ids': line_to_cancel.id})
# I check if lines are invoiced
invoice_lines = self.inv_line_obj\
.search([('sale_line_id', 'in', lines_to_invoice.ids)])
self.assertEqual(len(invoice_lines), len(lines_to_invoice.ids))
# I get the created invoice
invoice = invoice_lines[0].invoice_id
# I validate the invoice
workflow.trg_validate(self.uid, 'account.invoice',
invoice.id, 'invoice_open',
self.cr)
# I cancel quantity
quantity_to_cancel = line_to_cancel.product_qty
wizard = self.env['sale.order.line_cancel_quantity']\
.with_context(ctx).create({})
# I change the quantity on the line that will be cancelled
wizard.line_ids[0].cancelled_qty = quantity_to_cancel
wizard.cancel_quantity()
# I check the cancelled quantity on sale line
self.assertEqual(line_to_cancel.cancelled_qty, quantity_to_cancel)
# I check if the line is fully invoiced
self.assertTrue(line_to_cancel.fully_invoiced)
# I check if the sale order is invoiced
line_to_cancel.order_id.invalidate_cache()
self.assertTrue(line_to_cancel.order_id.invoiced)
def test_line_zero(self):
self.common_test()
quantity_to_invoiced = 0
line_to_invoice = self.sale_order.order_line[0]
ctx = self.context.copy()
ctx.update({'active_ids': line_to_invoice.id})
wizard = self.env['sale.order.line_invoice']\
.with_context(ctx).create({})
# I change the quantity on the line that will be invoiced
wizard.line_ids[0].invoiced_qty = quantity_to_invoiced
# I click on make invoice button
wizard.with_context(ctx).makeInvoices()
# I check if only the line that I chose is invoiced
invoice_line = self.inv_line_obj\
.search([('sale_line_id', 'in', self.sale_order.ids)])
self.assertEqual(len(invoice_line), 1,
"Number of invoiced lines isn't correct")
# I check if the quantity on the invoice line is correct
self.assertEqual(invoice_line.quantity, quantity_to_invoiced,
"Quantity on invoice line isn't correct")
# I check invoiced quantity on the sale order line
self.assertEqual(line_to_invoice.invoiced_qty, invoice_line.quantity,
"Invoiced quantity isn't the same as on invoice line")
| agpl-3.0 | 4,426,487,082,173,416,000 | 48.062295 | 79 | 0.599105 | false |
DS-CM/live-slides | src/GetImage.py | 1 | 1282 | import http.client, urllib.request, urllib.parse, urllib.error, base64, json
from pprint import pprint
class GetImage:
def __init__(self, key):
self.key = key
def getImage(self, keywords):
search_string = ""
for x in keywords:
search_string = search_string + " " + x
headers = {
# Request headers
'Ocp-Apim-Subscription-Key': self.key,
}
params = urllib.parse.urlencode({
# Request parameters
'q': search_string,
'count': '1',
'offset': '0',
'mkt': 'en-us',
'safeSearch': 'Strict',
})
try:
conn = http.client.HTTPSConnection('api.cognitive.microsoft.com')
conn.request("GET", "/bing/v5.0/images/search?%s" % params, "{body}", headers)
response = conn.getresponse()
data = json.loads(response.read().decode('utf-8'))
conn.close()
try:
return data['value'][0]['contentUrl']
except IndexError as e:
print("David wants to output this error: {}".format(e))
return None
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
| apache-2.0 | 6,372,495,451,247,394,000 | 28.136364 | 90 | 0.513261 | false |
paypal/keystone | keystone/common/sql/migrate_repo/versions/016_normalize_domain_ids.py | 1 | 19579 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Normalize for domain_id, i.e. ensure User and Project entities have the
domain_id as a first class attribute.
Both User and Project (as well as Group) entities are owned by a
domain, which is implemented as each having a domain_id foreign key
in their sql representation that points back to the respective
domain in the domain table. This domain_id attribute should also
be required (i.e. not nullable)
Adding a non_nullable foreign key attribute to a table with existing
data causes a few problems since not all DB engines support the
ability to either control the triggering of integrity constraints
or the ability to modify columns after they are created.
To get round the above inconsistencies, two versions of the
upgrade/downgrade functions are supplied, one for those engines
that support dropping columns, and one for those that don't. For
the latter we are forced to do table copy AND control the triggering
of integrity constraints.
"""
import sqlalchemy as sql
from sqlalchemy.orm import sessionmaker
from keystone import config
CONF = config.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
def _disable_foreign_constraints(session, migrate_engine):
if migrate_engine.name == 'mysql':
session.execute('SET foreign_key_checks = 0;')
def _enable_foreign_constraints(session, migrate_engine):
if migrate_engine.name == 'mysql':
session.execute('SET foreign_key_checks = 1;')
def upgrade_user_table_with_copy(meta, migrate_engine, session):
# We want to add the domain_id attribute to the user table. Since
# it is non nullable and the table may have data, easiest way is
# a table copy. Further, in order to keep foreign key constraints
# pointing at the right table, we need to be able and do a table
# DROP then CREATE, rather than ALTERing the name of the table.
# First make a copy of the user table
temp_user_table = sql.Table(
'temp_user',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('password', sql.String(128)),
sql.Column('enabled', sql.Boolean, default=True))
temp_user_table.create(migrate_engine, checkfirst=True)
user_table = sql.Table('user', meta, autoload=True)
for user in session.query(user_table):
session.execute('insert into temp_user (id, name, extra, '
'password, enabled) '
'values ( :id, :name, :extra, '
':password, :enabled);',
{'id': user.id,
'name': user.name,
'extra': user.extra,
'password': user.password,
'enabled': user.enabled})
# Now switch off constraints while we drop and then re-create the
# user table, with the additional domain_id column
_disable_foreign_constraints(session, migrate_engine)
session.execute('drop table user;')
# Need to create a new metadata stream since we are going to load a
# different version of the user table
meta2 = sql.MetaData()
meta2.bind = migrate_engine
domain_table = sql.Table('domain', meta2, autoload=True)
user_table = sql.Table(
'user',
meta2,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), nullable=False),
sql.Column('extra', sql.Text()),
sql.Column("password", sql.String(128)),
sql.Column("enabled", sql.Boolean, default=True),
sql.Column('domain_id', sql.String(64), sql.ForeignKey('domain.id'),
nullable=False),
sql.UniqueConstraint('domain_id', 'name'))
user_table.create(migrate_engine, checkfirst=True)
# Finally copy in the data from our temp table and then clean
# up by deleting our temp table
for user in session.query(temp_user_table):
session.execute('insert into user (id, name, extra, '
'password, enabled, domain_id) '
'values ( :id, :name, :extra, '
':password, :enabled, :domain_id);',
{'id': user.id,
'name': user.name,
'extra': user.extra,
'password': user.password,
'enabled': user.enabled,
'domain_id': DEFAULT_DOMAIN_ID})
_enable_foreign_constraints(session, migrate_engine)
session.execute('drop table temp_user;')
def upgrade_project_table_with_copy(meta, migrate_engine, session):
# We want to add the domain_id attribute to the project table. Since
# it is non nullable and the table may have data, easiest way is
# a table copy. Further, in order to keep foreign key constraints
# pointing at the right table, we need to be able and do a table
# DROP then CREATE, rather than ALTERing the name of the table.
# Fist make a copy of the project table
temp_project_table = sql.Table(
'temp_project',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('description', sql.Text()),
sql.Column('enabled', sql.Boolean, default=True))
temp_project_table.create(migrate_engine, checkfirst=True)
project_table = sql.Table('project', meta, autoload=True)
for project in session.query(project_table):
session.execute('insert into temp_project (id, name, extra, '
'description, enabled) '
'values ( :id, :name, :extra, '
':description, :enabled);',
{'id': project.id,
'name': project.name,
'extra': project.extra,
'description': project.description,
'enabled': project.enabled})
# Now switch off constraints while we drop and then re-create the
# project table, with the additional domain_id column
_disable_foreign_constraints(session, migrate_engine)
session.execute('drop table project;')
# Need to create a new metadata stream since we are going to load a
# different version of the project table
meta2 = sql.MetaData()
meta2.bind = migrate_engine
domain_table = sql.Table('domain', meta2, autoload=True)
project_table = sql.Table(
'project',
meta2,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('description', sql.Text()),
sql.Column('enabled', sql.Boolean, default=True),
sql.Column('domain_id', sql.String(64), sql.ForeignKey('domain.id'),
nullable=False),
sql.UniqueConstraint('domain_id', 'name'))
project_table.create(migrate_engine, checkfirst=True)
# Finally copy in the data from our temp table and then clean
# up by deleting our temp table
for project in session.query(temp_project_table):
session.execute('insert into project (id, name, extra, '
'description, enabled, domain_id) '
'values ( :id, :name, :extra, '
':description, :enabled, :domain_id);',
{'id': project.id,
'name': project.name,
'extra': project.extra,
'description': project.description,
'enabled': project.enabled,
'domain_id': DEFAULT_DOMAIN_ID})
_enable_foreign_constraints(session, migrate_engine)
session.execute('drop table temp_project;')
def downgrade_user_table_with_copy(meta, migrate_engine, session):
# For engines that don't support dropping columns, we need to do this
# as a table copy. Further, in order to keep foreign key constraints
# pointing at the right table, we need to be able and do a table
# DROP then CREATE, rather than ALTERing the name of the table.
# Fist make a copy of the user table
temp_user_table = sql.Table(
'temp_user',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('password', sql.String(128)),
sql.Column('enabled', sql.Boolean, default=True),
sql.Column('extra', sql.Text()))
temp_user_table.create(migrate_engine, checkfirst=True)
user_table = sql.Table('user', meta, autoload=True)
for user in session.query(user_table):
session.execute('insert into temp_user (id, name, '
'password, enabled, extra) '
'values ( :id, :name, '
':password, :enabled, :extra);',
{'id': user.id,
'name': user.name,
'password': user.password,
'enabled': user.enabled,
'extra': user.extra})
# Now switch off constraints while we drop and then re-create the
# user table, less the columns we wanted to drop
_disable_foreign_constraints(session, migrate_engine)
session.execute('drop table user;')
# Need to create a new metadata stream since we are going to load a
# different version of the user table
meta2 = sql.MetaData()
meta2.bind = migrate_engine
user_table = sql.Table(
'user',
meta2,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('password', sql.String(128)),
sql.Column('enabled', sql.Boolean, default=True))
user_table.create(migrate_engine, checkfirst=True)
_enable_foreign_constraints(session, migrate_engine)
# Finally copy in the data from our temp table and then clean
# up by deleting our temp table
for user in session.query(temp_user_table):
session.execute('insert into user (id, name, extra, '
'password, enabled) '
'values ( :id, :name, :extra, '
':password, :enabled);',
{'id': user.id,
'name': user.name,
'extra': user.extra,
'password': user.password,
'enabled': user.enabled})
session.execute('drop table temp_user;')
def downgrade_project_table_with_copy(meta, migrate_engine, session):
# For engines that don't support dropping columns, we need to do this
# as a table copy. Further, in order to keep foreign key constraints
# pointing at the right table, we need to be able and do a table
# DROP then CREATE, rather than ALTERing the name of the table.
# Fist make a copy of the project table
temp_project_table = sql.Table(
'temp_project',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('description', sql.Text()),
sql.Column('enabled', sql.Boolean, default=True),
sql.Column('extra', sql.Text()))
temp_project_table.create(migrate_engine, checkfirst=True)
project_table = sql.Table('project', meta, autoload=True)
for project in session.query(project_table):
session.execute('insert into temp_project (id, name, '
'description, enabled, extra) '
'values ( :id, :name, '
':description, :enabled, :extra);',
{'id': project.id,
'name': project.name,
'description': project.description,
'enabled': project.enabled,
'extra': project.extra})
# Now switch off constraints while we drop and then re-create the
# project table, less the columns we wanted to drop
_disable_foreign_constraints(session, migrate_engine)
session.execute('drop table project;')
# Need to create a new metadata stream since we are going to load a
# different version of the project table
meta2 = sql.MetaData()
meta2.bind = migrate_engine
project_table = sql.Table(
'project',
meta2,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('description', sql.Text()),
sql.Column('enabled', sql.Boolean, default=True))
project_table.create(migrate_engine, checkfirst=True)
_enable_foreign_constraints(session, migrate_engine)
# Finally copy in the data from our temp table and then clean
# up by deleting our temp table
for project in session.query(temp_project_table):
session.execute('insert into project (id, name, extra, '
'description, enabled) '
'values ( :id, :name, :extra, '
':description, :enabled);',
{'id': project.id,
'name': project.name,
'extra': project.extra,
'description': project.description,
'enabled': project.enabled})
session.execute("drop table temp_project;")
def upgrade_user_table_with_col_create(meta, migrate_engine, session):
# Create the domain_id column. We want this to be not nullable
# but also a foreign key. We can't create this right off the
# bat since any existing rows would cause an Integrity Error.
# We therefore create it nullable, fill the column with the
# default data and then set it to non nullable.
domain_table = sql.Table('domain', meta, autoload=True)
user_table = sql.Table('user', meta, autoload=True)
user_table.create_column(
sql.Column('domain_id', sql.String(64),
sql.ForeignKey('domain.id'), nullable=True))
for user in session.query(user_table).all():
values = {'domain_id': DEFAULT_DOMAIN_ID}
update = user_table.update().\
where(user_table.c.id == user.id).\
values(values)
migrate_engine.execute(update)
# Need to commit this or setting nullable to False will fail
session.commit()
user_table.columns.domain_id.alter(nullable=False)
# Finally, change the uniqueness settings for the name attribute
session.execute('ALTER TABLE "user" DROP CONSTRAINT user_name_key;')
session.execute('ALTER TABLE "user" ADD CONSTRAINT user_dom_name_unique '
'UNIQUE (domain_id, name);')
def upgrade_project_table_with_col_create(meta, migrate_engine, session):
# Create the domain_id column. We want this to be not nullable
# but also a foreign key. We can't create this right off the
# bat since any existing rows would cause an Integrity Error.
# We therefore create it nullable, fill the column with the
# default data and then set it to non nullable.
domain_table = sql.Table('domain', meta, autoload=True)
project_table = sql.Table('project', meta, autoload=True)
project_table.create_column(
sql.Column('domain_id', sql.String(64),
sql.ForeignKey('domain.id'), nullable=True))
for project in session.query(project_table).all():
values = {'domain_id': DEFAULT_DOMAIN_ID}
update = project_table.update().\
where(project_table.c.id == project.id).\
values(values)
migrate_engine.execute(update)
# Need to commit this or setting nullable to False will fail
session.commit()
project_table.columns.domain_id.alter(nullable=False)
# Finally, change the uniqueness settings for the name attribute
session.execute('ALTER TABLE project DROP CONSTRAINT tenant_name_key;')
session.execute('ALTER TABLE project ADD CONSTRAINT proj_dom_name_unique '
'UNIQUE (domain_id, name);')
def downgrade_user_table_with_col_drop(meta, migrate_engine, session):
# Revert uniqueness settings for the name attribute
session.execute('ALTER TABLE "user" DROP CONSTRAINT '
'user_dom_name_unique;')
session.execute('ALTER TABLE "user" ADD UNIQUE (name);')
session.commit()
# And now go ahead an drop the domain_id column
domain_table = sql.Table('domain', meta, autoload=True)
user_table = sql.Table('user', meta, autoload=True)
column = sql.Column('domain_id', sql.String(64),
sql.ForeignKey('domain.id'), nullable=False)
column.drop(user_table)
def downgrade_project_table_with_col_drop(meta, migrate_engine, session):
# Revert uniqueness settings for the name attribute
session.execute('ALTER TABLE project DROP CONSTRAINT '
'proj_dom_name_unique;')
session.execute('ALTER TABLE project ADD CONSTRAINT tenant_name_key '
'UNIQUE (name);')
session.commit()
# And now go ahead an drop the domain_id column
domain_table = sql.Table('domain', meta, autoload=True)
project_table = sql.Table('project', meta, autoload=True)
column = sql.Column('domain_id', sql.String(64),
sql.ForeignKey('domain.id'), nullable=False)
column.drop(project_table)
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
session = sessionmaker(bind=migrate_engine)()
if migrate_engine.name in ['sqlite', 'mysql']:
upgrade_user_table_with_copy(meta, migrate_engine, session)
upgrade_project_table_with_copy(meta, migrate_engine, session)
else:
upgrade_user_table_with_col_create(meta, migrate_engine, session)
upgrade_project_table_with_col_create(meta, migrate_engine, session)
session.commit()
session.close()
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
session = sessionmaker(bind=migrate_engine)()
if migrate_engine.name in ['sqlite', 'mysql']:
downgrade_user_table_with_copy(meta, migrate_engine, session)
downgrade_project_table_with_copy(meta, migrate_engine, session)
else:
# MySQL should in theory be able to use this path, but seems to
# have problems dropping columns which are foreign keys
downgrade_user_table_with_col_drop(meta, migrate_engine, session)
downgrade_project_table_with_col_drop(meta, migrate_engine, session)
session.commit()
session.close()
| apache-2.0 | 4,975,266,049,065,287,000 | 44.21709 | 78 | 0.623117 | false |
xfournet/intellij-community | python/helpers/typeshed/tests/pytype_test.py | 1 | 7684 | #!/usr/bin/env python
r"""Test runner for typeshed.
Depends on mypy and pytype being installed.
If pytype is installed:
1. For every pyi, do nothing if it is in pytype_blacklist.txt.
2. If the blacklist line has a "# parse only" comment run
"pytd <foo.pyi>" in a separate process.
3. If the file is not in the blacklist run
"pytype --typeshed-location=typeshed_location --module-name=foo \
--convert-to-pickle=tmp_file <foo.pyi>.
Option two will parse the file, mostly syntactical correctness. Option three
will load the file and all the builtins, typeshed dependencies. This will
also discover incorrect usage of imported modules.
"""
import argparse
import collections
import os
import re
import subprocess
import sys
parser = argparse.ArgumentParser(description='Pytype/typeshed tests.')
parser.add_argument('-n', '--dry-run', action='store_true',
help="Don't actually run tests")
parser.add_argument('--num-parallel', type=int, default=1,
help='Number of test processes to spawn')
# Default to '' so that symlinking typeshed/stdlib in cwd will work.
parser.add_argument('--typeshed-location', type=str, default='',
help='Path to typeshed installation.')
# Default to '' so that finding pytype in path will work.
parser.add_argument('--pytype-bin-dir', type=str, default='',
help='Path to directory with pytype and pytd executables.')
# Set to true to print a stack trace every time an exception is thrown.
parser.add_argument('--print-stderr', type=bool, default=False,
help='Print stderr every time an error is encountered.')
# We need to invoke python3.6. The default here works with our travis tests.
parser.add_argument('--python36-exe', type=str,
default='/opt/python/3.6/bin/python3.6',
help='Path to a python 3.6 interpreter.')
Dirs = collections.namedtuple('Dirs', ['pytype', 'typeshed'])
def main():
args = parser.parse_args()
code, runs = pytype_test(args)
if code:
print('--- exit status %d ---' % code)
sys.exit(code)
if not runs:
print('--- nothing to do; exit 1 ---')
sys.exit(1)
def get_project_dirs(args):
"""Top-level project directories for pytype executables and typeshed."""
typeshed_location = args.typeshed_location or os.getcwd()
return Dirs(args.pytype_bin_dir, typeshed_location)
class PathMatcher(object):
def __init__(self, patterns):
if patterns:
self.matcher = re.compile('(%s)$' % '|'.join(patterns))
else:
self.matcher = None
def search(self, path):
if not self.matcher:
return False
return self.matcher.search(path)
def load_blacklist(dirs):
filename = os.path.join(dirs.typeshed, 'tests', 'pytype_blacklist.txt')
skip_re = re.compile(r'^\s*([^\s#]+)\s*(?:#.*)?$')
parse_only_re = re.compile(r'^\s*([^\s#]+)\s*#\s*parse only\s*')
skip = []
parse_only = []
with open(filename) as f:
for line in f:
parse_only_match = parse_only_re.match(line)
skip_match = skip_re.match(line)
if parse_only_match:
parse_only.append(parse_only_match.group(1))
elif skip_match:
skip.append(skip_match.group(1))
return skip, parse_only
class BinaryRun(object):
def __init__(self, args, dry_run=False, env=None):
self.args = args
self.results = None
if dry_run:
self.results = (0, '', '')
else:
if env is not None:
full_env = os.environ.copy()
full_env.update(env)
else:
full_env = None
self.proc = subprocess.Popen(
self.args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=full_env)
def communicate(self):
if self.results:
return self.results
stdout, stderr = self.proc.communicate()
self.results = self.proc.returncode, stdout, stderr
return self.results
def _get_relative(filename):
top = filename.find('stdlib/')
return filename[top:]
def _get_module_name(filename):
"""Converts a filename stdlib/m.n/module/foo to module.foo."""
return '.'.join(_get_relative(filename).split(os.path.sep)[2:]).replace(
'.pyi', '').replace('.__init__', '')
def can_run(path, exe, *args):
exe = os.path.join(path, exe)
try:
BinaryRun([exe] + list(args)).communicate()
return True
except OSError:
return False
def pytype_test(args):
dirs = get_project_dirs(args)
pytype_exe = os.path.join(dirs.pytype, 'pytype')
stdlib_path = os.path.join(dirs.typeshed, 'stdlib')
if not os.path.isdir(stdlib_path):
print('Cannot find typeshed stdlib at %s '
'(specify parent dir via --typeshed_location)' % stdlib_path)
return 0, 0
if can_run(dirs.pytype, 'pytd', '-h'):
pytd_exe = os.path.join(dirs.pytype, 'pytd')
elif can_run(dirs.pytype, 'pytd_tool', '-h'):
pytd_exe = os.path.join(dirs.pytype, 'pytd_tool')
else:
print('Cannot run pytd. Did you install pytype?')
return 0, 0
wanted = re.compile(r'stdlib/.*\.pyi$')
skip, parse_only = load_blacklist(dirs)
skipped = PathMatcher(skip)
parse_only = PathMatcher(parse_only)
pytype_run = []
pytd_run = []
bad = []
for root, _, filenames in os.walk(stdlib_path):
for f in sorted(filenames):
f = os.path.join(root, f)
rel = _get_relative(f)
if wanted.search(rel):
if parse_only.search(rel):
pytd_run.append(f)
elif not skipped.search(rel):
pytype_run.append(f)
running_tests = collections.deque()
max_code, runs, errors = 0, 0, 0
files = pytype_run + pytd_run
while 1:
while files and len(running_tests) < args.num_parallel:
f = files.pop()
if f in pytype_run:
run_cmd = [
pytype_exe,
'--module-name=%s' % _get_module_name(f),
'--parse-pyi'
]
if 'stdlib/3' in f:
run_cmd += [
'-V 3.6',
'--python_exe=%s' % args.python36_exe
]
test_run = BinaryRun(
run_cmd + [f],
dry_run=args.dry_run,
env={"TYPESHED_HOME": dirs.typeshed})
elif f in pytd_run:
test_run = BinaryRun([pytd_exe, f], dry_run=args.dry_run)
else:
raise ValueError('Unknown action for file: %s' % f)
running_tests.append(test_run)
if not running_tests:
break
test_run = running_tests.popleft()
code, _, stderr = test_run.communicate()
max_code = max(max_code, code)
runs += 1
if code:
if args.print_stderr:
print(stderr)
errors += 1
# We strip off the stack trace and just leave the last line with the
# actual error; to see the stack traces use --print_stderr.
bad.append((_get_relative(test_run.args[-1]),
stderr.rstrip().rsplit('\n', 1)[-1]))
print('Ran pytype with %d pyis, got %d errors.' % (runs, errors))
for f, err in bad:
print('%s: %s' % (f, err))
return max_code, runs
if __name__ == '__main__':
main()
| apache-2.0 | -3,468,179,501,221,475,000 | 32.264069 | 80 | 0.565851 | false |
hpc/hypnotoad | hypnotoad/plugins/datamodels/ldap/ldap_plugin.py | 1 | 3400 | #
# An ldap data model plugin for hypnotoad.
#
import ldap
import logging
from hypnotoad.core import plugin
LOG = logging.getLogger('root')
class ldap_plugin(plugin.data_model_plugin):
def setup(self, config, model_version):
"""Called before the plugin is asked to do anything."""
if config.getboolean('Data Model Options', 'ldap_plugin_enabled'):
self.plugin_enabled = True
LOG.debug("LDAP plugin enabled")
ldap_url = config.get('Data Model Options', 'ldap_server')
ldap_dc = config.get('Data Model Options', 'ldap_dc')
ldap_ou_group = config.get('Data Model Options', 'ldap_ou_group')
ldap_ou_user = config.get('Data Model Options', 'ldap_ou_user')
ldap_timeout = config.getfloat(
'Data Model Options', 'ldap_timeout')
self.ldap_dn_user = "ou=" + ldap_ou_user + "," + ldap_dc
self.ldap_dn_group = "ou=" + ldap_ou_group + "," + ldap_dc
LOG.debug("URL: " + ldap_url)
LOG.debug("Base DC: " + ldap_dc)
LOG.debug("DN for groups: " + self.ldap_dn_group)
LOG.debug("DN for users: " + self.ldap_dn_user)
self.ldap_ctx = ldap.initialize(ldap_url)
self.ldap_ctx.set_option(ldap.OPT_NETWORK_TIMEOUT, ldap_timeout)
self.config = config
self.model_version = model_version
else:
self.plugin_enabled = False
def teardown(self):
"""Called to allow the plugin to free anything."""
if self.plugin_enabled:
LOG.debug("Got to ldap plugin teardown")
self.ldap_ctx.unbind_s()
def get_model(self):
"""Look up information in this data model."""
model = []
if self.plugin_enabled:
LOG.debug("Got to ldap plugin get_model")
model.append(
{'little_lang_entry': {'version': self.model_version}})
def ldap_search(dn, attrs):
return self.ldap_ctx.search_s(dn, ldap.SCOPE_SUBTREE, '(cn=*)', attrs)
users = ldap_search(self.ldap_dn_user, [
'cn', 'gidNumber', 'homeDirectory', 'uid',
'uidNumber', 'gecos', 'hpcDRMadef', 'loginShell'
])
LOG.debug("Found " + str(len(users)) + " users.")
for u in users:
dn, attrs = u
model.append({'user_entry': {
'short_name_string': attrs['uid'][0],
'full_name_string': attrs['cn'][0],
'group_id_integer': attrs['gidNumber'][0],
'user_id_integer': attrs['uidNumber'][0],
'home_directory_string': attrs['homeDirectory'][0],
'login_shell_string': attrs['loginShell'][0],
'priority_fairshare_float': '',
'priority_qos_name_array': ''
}})
groups = ldap_search(
self.ldap_dn_group, ['cn', 'hpcDRMshare', 'memberUid'])
for g in groups:
dn, attrs = g
LOG.debug("Found group with DN: " + dn)
model.append({'group_entry': {
'short_name_string': attrs['cn'][0],
'priority_fairshare_float': attrs['hpcDRMshare'][0],
}})
return model
# EOF
| bsd-3-clause | 450,297,542,380,948,160 | 32.333333 | 86 | 0.519412 | false |
hanelsofterp/green-hanel | purchase_landed_cost_assigning_before_receiving/wizard/wizard_import.py | 1 | 1127 | __author__ = 'trananhdung'
from openerp import models, fields, api
class extendPickingImportWizard(models.TransientModel):
_inherit = 'picking.import.wizard'
pickings = fields.Many2many(
comodel_name='stock.picking',
relation='distribution_import_picking_rel', column1='wizard_id',
column2='picking_id', string='Incoming shipments',
domain="[('partner_id', 'child_of', supplier),"
"('location_id.usage', '=', 'supplier'),"
"('id', 'not in', prev_pickings[0][2]),"
"('state', 'in', ('landed_cost','done'))]", required=True)
@api.multi
def action_import_picking(self):
self.ensure_one()
# for picking in self.pickings:
# for move in picking.move_lines:
# self.env['purchase.cost.distribution.line'].create(
# self._prepare_distribution_line(move))
self.pickings.write({
'distribution_id': self.env.context.get('active_id', False)
})
return super(extendPickingImportWizard, self).action_import_picking()
| gpl-3.0 | 947,077,968,450,730,200 | 38.25 | 77 | 0.582076 | false |
rambler-digital-solutions/aioriak | aioriak/datatypes/datatype.py | 1 | 4884 | from . import TYPES
from aioriak.error import ContextRequired
class Datatype:
'''
Base class for all convergent datatype wrappers. You will not use
this class directly, but it does define some methods are common to
all datatype wrappers.
'''
#: The string "name" of this datatype. Each datatype should set this.
type_name = None
def __init__(self, bucket=None, key=None, value=None, context=None):
self.bucket = bucket
self.key = key
self._context = context
if value is not None:
self._set_value(value)
else:
self._set_value(self._default_value())
self._post_init()
@property
def value(self):
'''
The pure, immutable value of this datatype, as a Python value,
which is unique for each datatype.
**NB**: Do not use this property to mutate data, as it will not
have any effect. Use the methods of the individual type to effect
changes. This value is guaranteed to be independent of any internal
data representation.
'''
return self._value
@property
def context(self):
'''
The opaque context for this type, if it was previously fetched.
:rtype: str
'''
if self._context:
return self._context[:]
@property
def modified(self):
'''
Whether this datatype has staged local modifications.
:rtype: bool
'''
raise NotImplementedError
# Lifecycle methods
async def reload(self, **params):
'''
Reloads the datatype from Riak.
.. warning: This clears any local modifications you might have
made.
:rtype: :class:`Datatype`
'''
if not self.bucket:
raise ValueError('bucket property not assigned')
if not self.key:
raise ValueError('key property not assigned')
dtype, value, context = await self.bucket._client._fetch_datatype(
self.bucket, self.key, **params)
if not dtype == self.type_name:
raise TypeError("Expected datatype {} but "
"got datatype {}".format(self.__class__,
TYPES[dtype]))
self.clear()
self._context = context
self._set_value(value)
return self
async def delete(self, **params):
'''
Deletes the datatype from Riak. See :meth:`RiakClient.delete()
<aioriak.client.RiakClient.delete>` for options.
'''
self.clear()
self._context = None
self._set_value(self._default_value())
await self.bucket._client.delete(self, **params)
return self
async def update(self, **params):
'''
Sends locally staged mutations to Riak.
:rtype: a subclass of :class:`~aioriak.datatypes.Datatype`
'''
if not self.modified:
raise ValueError("No operation to perform")
params.setdefault('return_body', True)
await self.bucket._client.update_datatype(self, **params)
self.clear()
return self
store = update
def clear(self):
'''
Removes all locally staged mutations.
'''
self._post_init()
def to_op(self):
'''
Extracts the mutation operation from this datatype, if any.
Each type must implement this method, returning the
appropriate operation, or `None` if there is no queued
mutation.
'''
raise NotImplementedError
# Private stuff
def _set_value(self, value):
self._raise_if_badtype(value)
self._value = self._coerce_value(value)
def _raise_if_badtype(self, new_value):
if not self._check_type(new_value):
raise TypeError(self._type_error_msg)
def _check_type(self, new_value):
'''
Checks that initial values of the type are appropriate. Each
type must implement this method.
:rtype: bool
'''
raise NotImplementedError
def _default_value(self):
'''
Returns what the initial value of an empty datatype should be.
'''
raise NotImplementedError
def _coerce_value(self, new_value):
'''
Coerces the input value into the internal representation for
the type. Datatypes may override this method.
'''
return new_value
def _post_init(self):
'''
Called at the end of :meth:`__init__` so that subclasses can tweak
their own setup without overriding the constructor.
'''
pass
def _require_context(self):
'''
Raises an exception if the context is not present
'''
if not self._context:
raise ContextRequired()
| mit | -50,095,760,985,934,680 | 27.231214 | 75 | 0.577805 | false |
nschaetti/pyTweetBot | pyTweetBot/tweet/RSSHunter.py | 1 | 2080 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import feedparser
from .Hunter import Hunter
from .Tweet import Tweet
import logging
from textblob import TextBlob
# Find new tweets from RSS streams
class RSSHunter(Hunter):
"""
Find new tweets from RSS streams
"""
# Constructor
def __init__(self, stream):
self._stream = stream
self._stream_url = stream['url']
logging.debug(u"Retreiving RSS stream {}".format(self._stream_url))
self._entries = feedparser.parse(self._stream_url)['entries']
self._hashtags = stream['hashtags'] if 'hashtags' in stream else list()
self._lang = stream['lang']
self._current = 0
# end __init__
# Get stream
def get_stream(self):
"""
Get stream
"""
return self._stream
# end get_stream
# To unicode
def __unicode__(self):
"""
To unicode
:return:
"""
return u"RSSHunter(stream={})".format(self._stream)
# end __unicode__
# Iterator
def __iter__(self):
"""
Iterator
:return:
"""
return self
# end __iter__
# Next
def next(self):
"""
Next
:return:
"""
if self._current >= len(self._entries):
raise StopIteration
# end if
# Found
found = False
while not found and self._current < len(self._entries):
# Get current entry
current_entry = self._entries[self._current]
# Analyze text
tweet_blob = TextBlob(current_entry['title'])
# Right language
if tweet_blob.detect_language() in self._lang:
found = True
# end if
# Next
self._current += 1
# end while
# Tweet generator
if found:
return Tweet(current_entry['title'], current_entry['links'][0]['href'], self._hashtags)
else:
raise StopIteration
# end if
# end next
# end RSSHunter
| gpl-3.0 | -6,919,431,503,497,942,000 | 21.608696 | 99 | 0.522115 | false |
Dymaxion00/KittenGroomer | fs/opt/groomer/functions_pier9.py | 1 | 2525 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from helpers import FileBase, KittenGroomerBase, main
printers = ['.STL', '.obj']
cnc = ['.nc', '.tap', '.gcode', '.dxf', '.stl', '.obj', '.iges', '.igs',
'.vrml', '.vrl', '.thing', '.step', '.stp', '.x3d']
shopbot = ['.ai', '.svg', '.dxf', '.dwg', '.eps']
omax = ['.ai', '.svg', '.dxf', '.dwg', '.eps', '.omx', '.obj']
epilog_laser = ['.ai', '.svg', '.dxf', '.dwg', '.eps']
metabeam = ['.dxf']
up = ['.upp', '.up3', '.stl', '.obj']
class FilePier9(FileBase):
def __init__(self, src_path, dst_path):
''' Init file object, set the extension '''
super(FilePier9, self).__init__(src_path, dst_path)
a, self.extension = os.path.splitext(self.src_path)
class KittenGroomerPier9(KittenGroomerBase):
def __init__(self, root_src=None, root_dst=None):
'''
Initialize the basics of the copy
'''
if root_src is None:
root_src = os.path.join(os.sep, 'media', 'src')
if root_dst is None:
root_dst = os.path.join(os.sep, 'media', 'dst')
super(KittenGroomerPier9, self).__init__(root_src, root_dst)
# The initial version will accept all the file extension for all the machines.
self.authorized_extensions = printers + cnc + shopbot + omax + epilog_laser + metabeam + up
def _print_log(self):
'''
Print the logs related to the current file being processed
'''
tmp_log = self.log_name.fields(**self.cur_file.log_details)
if not self.cur_file.log_details.get('valid'):
tmp_log.warning(self.cur_file.log_string)
else:
tmp_log.debug(self.cur_file.log_string)
def processdir(self):
'''
Main function doing the processing
'''
for srcpath in self._list_all_files(self.src_root_dir):
self.log_name.info('Processing {}', srcpath.replace(self.src_root_dir + '/', ''))
self.cur_file = FilePier9(srcpath, srcpath.replace(self.src_root_dir, self.dst_root_dir))
if self.cur_file.extension in self.authorized_extensions:
self.cur_file.add_log_details('valid', True)
self.cur_file.log_string = 'Expected extension: ' + self.cur_file.extension
self._safe_copy()
else:
self.cur_file.log_string = 'Bad extension: ' + self.cur_file.extension
self._print_log()
if __name__ == '__main__':
main(KittenGroomerPier9)
| bsd-3-clause | 8,387,122,660,357,544,000 | 36.132353 | 101 | 0.565149 | false |
zqfan/leetcode | algorithms/227. Basic Calculator II/solution.py | 1 | 1095 | class Solution(object):
def calculate(self, s):
"""
:type s: str
:rtype: int
"""
queue = collections.deque()
method = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.div
}
pri = {
operator.add: 0,
operator.sub: 0,
operator.mul: 1,
operator.div: 1
}
i = 0; n = 0
while i < len(s):
if s[i].isdigit():
n = n * 10 + int(s[i])
elif s[i] in method:
while queue and pri[method[s[i]]] <= pri[queue[-1]]:
op = queue.pop()
n1 = queue.pop()
n = op(n1, n)
queue.append(n)
queue.append(method[s[i]])
n = 0
i += 1
queue.append(n)
while len(queue) >= 3:
n1 = queue.pop()
op = queue.pop()
n2 = queue.pop()
queue.append(op(n2, n1))
return queue.pop()
| gpl-3.0 | 8,992,052,370,713,019,000 | 27.076923 | 68 | 0.368037 | false |
kailIII/emaresa | trunk.cl/client_lock_unlock/__openerp__.py | 1 | 1969 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: OpenDrive Ltda
# Copyright (c) 2013 Opendrive Ltda
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
{
'name' : 'Lock/Unlock Partners',
'version' : '1.2.5_1',
'author' : '[OpenDrive Ltda]',
'category' : 'Others/Partners',
'description' : """
Modulo bloqueo de clientes para ventas.
Crones, Fecha Hasta, Tramos de valores para centros de costos, Cheques vencidos y pagos al dia no realizados.
Created By David Acevedo Toledo (Fedoro)
""",
'website': '[http://www.opendrive.cl]',
'depends' : ['base', 'warning', 'l10n_cl_fields'],
'data': [
'security/ir.model.access.csv',
'wizard/lock_unlock_process_view.xml',
'cron_data.xml',
'tramos_view.xml',
'res_partner_view.xml'
],
'installable': True,
'auto_install': False,
}
| agpl-3.0 | 6,928,426,690,630,475,000 | 36.150943 | 109 | 0.672423 | false |
RaghavPro/Runescape-Hiscores | hiscores/forms.py | 1 | 2377 | from django import forms
from django.core.exceptions import FieldError
from .models import Skills
class SearchForm(forms.Form):
search = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Username', 'required': ''}),
max_length=12, label=False)
def clean_search(self):
search = self.cleaned_data['search']
try:
Skills.objects.get(user_name__iexact=search)
except Skills.DoesNotExist:
raise forms.ValidationError("Player does not exist.")
class CompareForm(forms.Form):
player1 = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Username', 'required': ''}),
max_length=12, label=False)
player2 = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Username', 'required': ''}),
max_length=12, label=False)
def clean_player1(self):
player1 = self.cleaned_data['player1']
try:
Skills.objects.get(user_name__iexact=player1)
except Skills.DoesNotExist:
raise forms.ValidationError("Player does not exist.")
return player1
def clean_player2(self):
player2 = self.cleaned_data['player2']
try:
Skills.objects.get(user_name__iexact=player2)
except Skills.DoesNotExist:
raise forms.ValidationError("Player does not exist.")
return player2
class SearchRankForm(forms.Form):
search_rank = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Rank', 'required': ''}),
max_length=30, label=False)
skill_exp = forms.CharField(widget=forms.HiddenInput())
def clean_search_rank(self):
rank = self.cleaned_data['search_rank']
skill_exp = self.data['skill_exp']
try:
rank = max(int(rank), 1) # Take to first rank if negative
user_name = Skills.objects.order_by("-%s" % skill_exp).values("user_name")[rank - 1]['user_name']
except IndexError:
raise forms.ValidationError("That rank does not exist.")
except FieldError:
raise forms.ValidationError("Oops, please try again.")
except ValueError:
raise forms.ValidationError("Enter a valid number.")
return user_name
| gpl-2.0 | 856,744,986,639,875,300 | 36.730159 | 109 | 0.63231 | false |
kevinr/750book-web | 750book-web-env/lib/python2.7/site-packages/kombu/transport/beanstalk.py | 1 | 3295 | """
kombu.transport.beanstalk
=========================
Beanstalk transport.
:copyright: (c) 2010 - 2012 by David Ziegler.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import socket
from Queue import Empty
from anyjson import loads, dumps
from beanstalkc import Connection, BeanstalkcException, SocketError
from . import virtual
from ..exceptions import StdChannelError
DEFAULT_PORT = 11300
__author__ = "David Ziegler <[email protected]>"
class Channel(virtual.Channel):
_client = None
def _parse_job(self, job):
item, dest = None, None
if job:
try:
item = loads(job.body)
dest = job.stats()["tube"]
except Exception:
job.bury()
else:
job.delete()
else:
raise Empty()
return item, dest
def _put(self, queue, message, **kwargs):
priority = message["properties"]["delivery_info"]["priority"]
self.client.use(queue)
self.client.put(dumps(message), priority=priority)
def _get(self, queue):
if queue not in self.client.watching():
self.client.watch(queue)
[self.client.ignore(active)
for active in self.client.watching()
if active != queue]
job = self.client.reserve(timeout=1)
item, dest = self._parse_job(job)
return item
def _get_many(self, queues, timeout=1):
# timeout of None will cause beanstalk to timeout waiting
# for a new request
if timeout is None:
timeout = 1
watching = self.client.watching()
[self.client.watch(active)
for active in queues
if active not in watching]
job = self.client.reserve(timeout=timeout)
return self._parse_job(job)
def _purge(self, queue):
if queue not in self.client.watching():
self.client.watch(queue)
[self.client.ignore(active)
for active in self.client.watching()
if active != queue]
count = 0
while 1:
job = self.client.reserve(timeout=1)
if job:
job.delete()
count += 1
else:
break
return count
def _size(self, queue):
return 0
def _open(self):
conninfo = self.connection.client
port = conninfo.port or DEFAULT_PORT
conn = Connection(host=conninfo.hostname, port=port)
conn.connect()
return conn
def close(self):
if self._client is not None:
return self._client.close()
super(Channel, self).close()
@property
def client(self):
if self._client is None:
self._client = self._open()
return self._client
class Transport(virtual.Transport):
Channel = Channel
polling_interval = 1
default_port = DEFAULT_PORT
connection_errors = (socket.error,
SocketError,
IOError)
channel_errors = (StdChannelError,
socket.error,
IOError,
SocketError,
BeanstalkcException)
| mit | 6,936,786,577,293,115,000 | 24.944882 | 69 | 0.553869 | false |
atlefren/beerdatabase | breweryname_compare.py | 1 | 1755 | # -*- coding: utf-8 -*-
import json
from beertools import BreweryNameMatcher
def read_json(filename):
with open(filename, 'r') as infile:
return json.loads(infile.read())
def get_breweries_polet():
with open('data/polet.json', 'r') as infile:
data = json.loads(infile.read())
breweries = list(set([product['Produsent'] for product in data]))
return sorted(breweries), data
def get_breweries(beer_list, property_name):
return sorted(list(set([beer[property_name] for beer in beer_list])))
def get_breweries_ratebeer():
with open('data/ratebeer.json', 'r') as infile:
data = json.loads(infile.read())
breweries = list(set([product['brewery'] for product in data]))
return sorted(breweries)
def wrap_breweries(breweries):
return [{'id': index, 'name': brewery}
for index, brewery in enumerate(breweries)]
def compare_breweries(pol_data, breweries_rb):
breweries_pol = get_breweries(pol_data, 'Produsent')
# breweries_rb = wrap_breweries(get_breweries(rb_data, 'brewery'))
matcher = BreweryNameMatcher(breweries_rb)
with open('data/nomatch.txt', 'w') as nomatch:
with open('data/match.txt', 'w') as match_file:
for brewery in breweries_pol:
match = matcher.match_name(brewery)
if match is None:
nomatch.write(brewery.encode('utf8') + '\n')
else:
string = '%s: %s' % (brewery, match['name'])
match_file.write(string.encode('utf8') + '\n')
if __name__ == '__main__':
pol_data = read_json('data/polet.json')
rb_breweries = read_json('data/rb_breweries.json')
compare_breweries(pol_data, rb_breweries)
| mit | 8,518,859,701,575,844,000 | 30.909091 | 73 | 0.614815 | false |
noironetworks/heat | heat/common/config.py | 1 | 25507 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Routines for configuring Heat."""
import os
from eventlet.green import socket
from oslo_config import cfg
from oslo_log import log as logging
from oslo_middleware import cors
from osprofiler import opts as profiler
from heat.common import exception
from heat.common.i18n import _
from heat.common import wsgi
LOG = logging.getLogger(__name__)
paste_deploy_group = cfg.OptGroup('paste_deploy')
paste_deploy_opts = [
cfg.StrOpt('flavor',
help=_("The flavor to use.")),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_("The API paste config file to use."))]
service_opts = [
cfg.IntOpt('periodic_interval',
default=60,
help=_('Seconds between running periodic tasks.')),
cfg.StrOpt('heat_metadata_server_url',
help=_('URL of the Heat metadata server. '
'NOTE: Setting this is only needed if you require '
'instances to use a different endpoint than in the '
'keystone catalog')),
cfg.StrOpt('heat_waitcondition_server_url',
help=_('URL of the Heat waitcondition server.')),
cfg.StrOpt('heat_watch_server_url',
default="",
deprecated_for_removal=True,
deprecated_reason='Heat CloudWatch Service has been removed.',
deprecated_since='10.0.0',
help=_('URL of the Heat CloudWatch server.')),
cfg.StrOpt('instance_connection_is_secure',
default="0",
help=_('Instance connection to CFN/CW API via https.')),
cfg.StrOpt('instance_connection_https_validate_certificates',
default="1",
help=_('Instance connection to CFN/CW API validate certs if '
'SSL is used.')),
cfg.StrOpt('region_name_for_services',
help=_('Default region name used to get services endpoints.')),
cfg.StrOpt('heat_stack_user_role',
default="heat_stack_user",
help=_('Keystone role for heat template-defined users.')),
cfg.StrOpt('stack_user_domain_id',
deprecated_opts=[cfg.DeprecatedOpt('stack_user_domain',
group=None)],
help=_('Keystone domain ID which contains heat '
'template-defined users. If this option is set, '
'stack_user_domain_name option will be ignored.')),
cfg.StrOpt('stack_user_domain_name',
help=_('Keystone domain name which contains heat '
'template-defined users. If `stack_user_domain_id` '
'option is set, this option is ignored.')),
cfg.StrOpt('stack_domain_admin',
help=_('Keystone username, a user with roles sufficient to '
'manage users and projects in the stack_user_domain.')),
cfg.StrOpt('stack_domain_admin_password',
secret=True,
help=_('Keystone password for stack_domain_admin user.')),
cfg.IntOpt('max_template_size',
default=524288,
help=_('Maximum raw byte size of any template.')),
cfg.IntOpt('max_nested_stack_depth',
default=5,
help=_('Maximum depth allowed when using nested stacks.')),
cfg.IntOpt('num_engine_workers',
help=_('Number of heat-engine processes to fork and run. '
'Will default to either to 4 or number of CPUs on '
'the host, whichever is greater.'))]
engine_opts = [
cfg.ListOpt('plugin_dirs',
default=['/usr/lib64/heat', '/usr/lib/heat',
'/usr/local/lib/heat', '/usr/local/lib64/heat'],
help=_('List of directories to search for plug-ins.')),
cfg.StrOpt('environment_dir',
default='/etc/heat/environment.d',
help=_('The directory to search for environment files.')),
cfg.StrOpt('template_dir',
default='/etc/heat/templates',
help=_('The directory to search for template files.')),
cfg.StrOpt('deferred_auth_method',
choices=['password', 'trusts'],
default='trusts',
deprecated_for_removal=True,
deprecated_reason='Stored password based deferred auth is '
'broken when used with keystone v3 and '
'is not supported.',
deprecated_since='9.0.0',
help=_('Select deferred auth method, '
'stored password or trusts.')),
cfg.StrOpt('reauthentication_auth_method',
choices=['', 'trusts'],
default='',
help=_('Allow reauthentication on token expiry, such that'
' long-running tasks may complete. Note this defeats'
' the expiry of any provided user tokens.')),
cfg.ListOpt('trusts_delegated_roles',
default=[],
help=_('Subset of trustor roles to be delegated to heat.'
' If left unset, all roles of a user will be'
' delegated to heat when creating a stack.')),
cfg.IntOpt('max_resources_per_stack',
default=1000,
help=_('Maximum resources allowed per top-level stack. '
'-1 stands for unlimited.')),
cfg.IntOpt('max_stacks_per_tenant',
default=100,
help=_('Maximum number of stacks any one tenant may have'
' active at one time.')),
cfg.IntOpt('action_retry_limit',
default=5,
help=_('Number of times to retry to bring a '
'resource to a non-error state. Set to 0 to disable '
'retries.')),
cfg.IntOpt('client_retry_limit',
default=2,
help=_('Number of times to retry when a client encounters an '
'expected intermittent error. Set to 0 to disable '
'retries.')),
# Server host name limit to 53 characters by due to typical default
# linux HOST_NAME_MAX of 64, minus the .novalocal appended to the name
cfg.IntOpt('max_server_name_length',
default=53,
max=53,
help=_('Maximum length of a server name to be used '
'in nova.')),
cfg.IntOpt('max_interface_check_attempts',
min=1,
default=10,
help=_('Number of times to check whether an interface has '
'been attached or detached.')),
cfg.IntOpt('event_purge_batch_size',
min=1,
default=200,
help=_("Controls how many events will be pruned whenever a "
"stack's events are purged. Set this "
"lower to keep more events at the expense of more "
"frequent purges.")),
cfg.IntOpt('max_events_per_stack',
default=1000,
help=_('Rough number of maximum events that will be available '
'per stack. Actual number of events can be a bit '
'higher since purge checks take place randomly '
'200/event_purge_batch_size percent of the time. '
'Older events are deleted when events are purged. '
'Set to 0 for unlimited events per stack.')),
cfg.IntOpt('stack_action_timeout',
default=3600,
help=_('Timeout in seconds for stack action (ie. create or'
' update).')),
cfg.IntOpt('error_wait_time',
default=240,
help=_('The amount of time in seconds after an error has'
' occurred that tasks may continue to run before'
' being cancelled.')),
cfg.IntOpt('engine_life_check_timeout',
default=2,
help=_('RPC timeout for the engine liveness check that is used'
' for stack locking.')),
cfg.BoolOpt('enable_cloud_watch_lite',
default=False,
deprecated_for_removal=True,
deprecated_reason='Heat CloudWatch Service has been removed.',
deprecated_since='10.0.0',
help=_('Enable the legacy OS::Heat::CWLiteAlarm resource.')),
cfg.BoolOpt('enable_stack_abandon',
default=False,
help=_('Enable the preview Stack Abandon feature.')),
cfg.BoolOpt('enable_stack_adopt',
default=False,
help=_('Enable the preview Stack Adopt feature.')),
cfg.BoolOpt('convergence_engine',
default=True,
help=_('Enables engine with convergence architecture. All '
'stacks with this option will be created using '
'convergence engine.')),
cfg.BoolOpt('observe_on_update',
default=False,
help=_('On update, enables heat to collect existing resource '
'properties from reality and converge to '
'updated template.')),
cfg.StrOpt('default_software_config_transport',
choices=['POLL_SERVER_CFN',
'POLL_SERVER_HEAT',
'POLL_TEMP_URL',
'ZAQAR_MESSAGE'],
default='POLL_SERVER_CFN',
help=_('Template default for how the server should receive the '
'metadata required for software configuration. '
'POLL_SERVER_CFN will allow calls to the cfn API action '
'DescribeStackResource authenticated with the provided '
'keypair (requires enabled heat-api-cfn). '
'POLL_SERVER_HEAT will allow calls to the '
'Heat API resource-show using the provided keystone '
'credentials (requires keystone v3 API, and configured '
'stack_user_* config options). '
'POLL_TEMP_URL will create and populate a '
'Swift TempURL with metadata for polling (requires '
'object-store endpoint which supports TempURL).'
'ZAQAR_MESSAGE will create a dedicated zaqar queue and '
'post the metadata for polling.')),
cfg.StrOpt('default_deployment_signal_transport',
choices=['CFN_SIGNAL',
'TEMP_URL_SIGNAL',
'HEAT_SIGNAL',
'ZAQAR_SIGNAL'],
default='CFN_SIGNAL',
help=_('Template default for how the server should signal to '
'heat with the deployment output values. CFN_SIGNAL '
'will allow an HTTP POST to a CFN keypair signed URL '
'(requires enabled heat-api-cfn). '
'TEMP_URL_SIGNAL will create a Swift TempURL to be '
'signaled via HTTP PUT (requires object-store endpoint '
'which supports TempURL). '
'HEAT_SIGNAL will allow calls to the Heat API '
'resource-signal using the provided keystone '
'credentials. ZAQAR_SIGNAL will create a dedicated '
'zaqar queue to be signaled using the provided keystone '
'credentials.')),
cfg.StrOpt('default_user_data_format',
choices=['HEAT_CFNTOOLS',
'RAW',
'SOFTWARE_CONFIG'],
default='HEAT_CFNTOOLS',
help=_('Template default for how the user_data should be '
'formatted for the server. For HEAT_CFNTOOLS, the '
'user_data is bundled as part of the heat-cfntools '
'cloud-init boot configuration data. For RAW the '
'user_data is passed to Nova unmodified. For '
'SOFTWARE_CONFIG user_data is bundled as part of the '
'software config data, and metadata is derived from any '
'associated SoftwareDeployment resources.')),
cfg.ListOpt('hidden_stack_tags',
default=['data-processing-cluster'],
help=_('Stacks containing these tag names will be hidden. '
'Multiple tags should be given in a comma-delimited '
'list (eg. hidden_stack_tags=hide_me,me_too).')),
cfg.StrOpt('onready',
help=_('Deprecated.')),
cfg.BoolOpt('stack_scheduler_hints',
default=False,
help=_('When this feature is enabled, scheduler hints'
' identifying the heat stack context of a server'
' or volume resource are passed to the configured'
' schedulers in nova and cinder, for creates done'
' using heat resource types OS::Cinder::Volume,'
' OS::Nova::Server, and AWS::EC2::Instance.'
' heat_root_stack_id will be set to the id of the'
' root stack of the resource, heat_stack_id will be'
' set to the id of the resource\'s parent stack,'
' heat_stack_name will be set to the name of the'
' resource\'s parent stack, heat_path_in_stack will'
' be set to a list of comma delimited strings of'
' stackresourcename and stackname with list[0] being'
' \'rootstackname\', heat_resource_name will be set to'
' the resource\'s name, and heat_resource_uuid will be'
' set to the resource\'s orchestration id.')),
cfg.BoolOpt('encrypt_parameters_and_properties',
default=False,
help=_('Encrypt template parameters that were marked as'
' hidden and also all the resource properties before'
' storing them in database.'))]
rpc_opts = [
cfg.StrOpt('host',
default=socket.gethostname(),
sample_default='<Hostname>',
help=_('Name of the engine node. '
'This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, '
'or IP address.'))]
auth_password_group = cfg.OptGroup('auth_password')
auth_password_opts = [
cfg.BoolOpt('multi_cloud',
default=False,
help=_('Allow orchestration of multiple clouds.')),
cfg.ListOpt('allowed_auth_uris',
default=[],
help=_('Allowed keystone endpoints for auth_uri when '
'multi_cloud is enabled. At least one endpoint needs '
'to be specified.'))]
# these options define baseline defaults that apply to all clients
default_clients_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the OpenStack service.')),
cfg.StrOpt('ca_file',
help=_('Optional CA cert file to use in SSL connections.')),
cfg.StrOpt('cert_file',
help=_('Optional PEM-formatted certificate chain file.')),
cfg.StrOpt('key_file',
help=_('Optional PEM-formatted file that contains the '
'private key.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set, then the server's certificate will not "
"be verified."))]
# these options can be defined for each client
# they must not specify defaults, since any options not defined in a client
# specific group is looked up on the generic group above
clients_opts = [
cfg.StrOpt('endpoint_type',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the OpenStack service.')),
cfg.StrOpt('ca_file',
help=_('Optional CA cert file to use in SSL connections.')),
cfg.StrOpt('cert_file',
help=_('Optional PEM-formatted certificate chain file.')),
cfg.StrOpt('key_file',
help=_('Optional PEM-formatted file that contains the '
'private key.')),
cfg.BoolOpt('insecure',
help=_("If set, then the server's certificate will not "
"be verified."))]
heat_client_opts = [
cfg.StrOpt('url',
default='',
help=_('Optional heat url in format like'
' http://0.0.0.0:8004/v1/%(tenant_id)s.'))]
keystone_client_opts = [
cfg.StrOpt('auth_uri',
default='',
help=_('Unversioned keystone url in format like'
' http://0.0.0.0:5000.'))]
client_http_log_debug_opts = [
cfg.BoolOpt('http_log_debug',
default=False,
help=_("Allow client's debug log output."))]
revision_group = cfg.OptGroup('revision')
revision_opts = [
cfg.StrOpt('heat_revision',
default='unknown',
help=_('Heat build revision. '
'If you would prefer to manage your build revision '
'separately, you can move this section to a different '
'file and add it as another config option.'))]
volumes_group = cfg.OptGroup('volumes')
volumes_opts = [
cfg.BoolOpt('backups_enabled',
default=True,
help=_("Indicate if cinder-backup service is enabled. "
"This is a temporary workaround until cinder-backup "
"service becomes discoverable, see LP#1334856."))]
noauth_group = cfg.OptGroup('noauth')
noauth_opts = [
cfg.StrOpt('token_response',
default='',
help=_("JSON file containing the content returned by the "
"noauth middleware."))]
def startup_sanity_check():
if (not cfg.CONF.stack_user_domain_id and
not cfg.CONF.stack_user_domain_name):
# FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration
LOG.warning('stack_user_domain_id or stack_user_domain_name not '
'set in heat.conf falling back to using default')
else:
domain_admin_user = cfg.CONF.stack_domain_admin
domain_admin_password = cfg.CONF.stack_domain_admin_password
if not (domain_admin_user and domain_admin_password):
raise exception.Error(_('heat.conf misconfigured, cannot '
'specify "stack_user_domain_id" or '
'"stack_user_domain_name" without '
'"stack_domain_admin" and '
'"stack_domain_admin_password"'))
auth_key_len = len(cfg.CONF.auth_encryption_key)
if auth_key_len in (16, 24):
LOG.warning(
'Please update auth_encryption_key to be 32 characters.')
elif auth_key_len != 32:
raise exception.Error(_('heat.conf misconfigured, auth_encryption_key '
'must be 32 characters'))
def list_opts():
yield None, rpc_opts
yield None, engine_opts
yield None, service_opts
yield paste_deploy_group.name, paste_deploy_opts
yield auth_password_group.name, auth_password_opts
yield revision_group.name, revision_opts
yield volumes_group.name, volumes_opts
yield noauth_group.name, noauth_opts
yield profiler.list_opts()[0]
yield 'clients', default_clients_opts
for client in ('aodh', 'barbican', 'cinder', 'designate',
'glance', 'heat', 'keystone', 'magnum', 'manila', 'mistral',
'monasca', 'neutron', 'nova', 'octavia', 'sahara', 'senlin',
'swift', 'trove', 'zaqar'
):
client_specific_group = 'clients_' + client
yield client_specific_group, clients_opts
yield 'clients_heat', heat_client_opts
yield 'clients_keystone', keystone_client_opts
yield 'clients_nova', client_http_log_debug_opts
yield 'clients_cinder', client_http_log_debug_opts
cfg.CONF.register_group(paste_deploy_group)
cfg.CONF.register_group(auth_password_group)
cfg.CONF.register_group(revision_group)
profiler.set_defaults(cfg.CONF)
for group, opts in list_opts():
cfg.CONF.register_opts(opts, group=group)
def _get_deployment_flavor():
"""Retrieves the paste_deploy.flavor config item.
Item formatted appropriately for appending to the application name.
"""
flavor = cfg.CONF.paste_deploy.flavor
return '' if not flavor else ('-' + flavor)
def _get_deployment_config_file():
"""Retrieves the deployment_config_file config item.
Item formatted as an absolute pathname.
"""
config_path = cfg.CONF.find_file(
cfg.CONF.paste_deploy['api_paste_config'])
if config_path is None:
return None
return os.path.abspath(config_path)
def load_paste_app(app_name=None):
"""Builds and returns a WSGI app from a paste config file.
We assume the last config file specified in the supplied ConfigOpts
object is the paste config file.
:param app_name: name of the application to load
:raises RuntimeError: when config file cannot be located or application
cannot be loaded from config file
"""
if app_name is None:
app_name = cfg.CONF.prog
# append the deployment flavor to the application name,
# in order to identify the appropriate paste pipeline
app_name += _get_deployment_flavor()
conf_file = _get_deployment_config_file()
if conf_file is None:
raise RuntimeError(_("Unable to locate config file [%s]") %
cfg.CONF.paste_deploy['api_paste_config'])
try:
app = wsgi.paste_deploy_app(conf_file, app_name, cfg.CONF)
# Log the options used when starting if we're in debug mode...
if cfg.CONF.debug:
cfg.CONF.log_opt_values(logging.getLogger(app_name),
logging.DEBUG)
return app
except (LookupError, ImportError) as e:
raise RuntimeError(_("Unable to load %(app_name)s from "
"configuration file %(conf_file)s."
"\nGot: %(e)r") % {'app_name': app_name,
'conf_file': conf_file,
'e': e})
def get_client_option(client, option):
# look for the option in the [clients_${client}] section
# unknown options raise cfg.NoSuchOptError
try:
group_name = 'clients_' + client
cfg.CONF.import_opt(option, 'heat.common.config',
group=group_name)
v = getattr(getattr(cfg.CONF, group_name), option)
if v is not None:
return v
except cfg.NoSuchGroupError:
pass # do not error if the client is unknown
# look for the option in the generic [clients] section
cfg.CONF.import_opt(option, 'heat.common.config', group='clients')
return getattr(cfg.CONF.clients, option)
def get_ssl_options(client):
# Look for the ssl options in the [clients_${client}] section
cacert = get_client_option(client, 'ca_file')
insecure = get_client_option(client, 'insecure')
cert = get_client_option(client, 'cert_file')
key = get_client_option(client, 'key_file')
if insecure:
verify = False
else:
verify = cacert or True
if cert and key:
cert = (cert, key)
return {'verify': verify, 'cert': cert}
def set_config_defaults():
"""This method updates all configuration default values."""
cors.set_defaults(
allow_headers=['X-Auth-Token',
'X-Identity-Status',
'X-Roles',
'X-Service-Catalog',
'X-User-Id',
'X-Tenant-Id',
'X-OpenStack-Request-ID'],
expose_headers=['X-Auth-Token',
'X-Subject-Token',
'X-Service-Token',
'X-OpenStack-Request-ID'],
allow_methods=['GET',
'PUT',
'POST',
'DELETE',
'PATCH']
)
| apache-2.0 | -464,540,316,762,593,900 | 44.548214 | 79 | 0.551966 | false |
ddico/odoo | addons/mrp/models/mrp_workorder.py | 1 | 39609 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from collections import defaultdict
import json
from odoo import api, fields, models, _, SUPERUSER_ID
from odoo.exceptions import UserError
from odoo.tools import float_compare, float_round, format_datetime
class MrpWorkorder(models.Model):
_name = 'mrp.workorder'
_description = 'Work Order'
_inherit = ['mail.thread', 'mail.activity.mixin']
def _read_group_workcenter_id(self, workcenters, domain, order):
workcenter_ids = self.env.context.get('default_workcenter_id')
if not workcenter_ids:
workcenter_ids = workcenters._search([], order=order, access_rights_uid=SUPERUSER_ID)
return workcenters.browse(workcenter_ids)
name = fields.Char(
'Work Order', required=True,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
workcenter_id = fields.Many2one(
'mrp.workcenter', 'Work Center', required=True,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)], 'progress': [('readonly', True)]},
group_expand='_read_group_workcenter_id', check_company=True)
working_state = fields.Selection(
string='Workcenter Status', related='workcenter_id.working_state', readonly=False,
help='Technical: used in views only')
product_id = fields.Many2one(related='production_id.product_id', readonly=True, store=True, check_company=True)
product_tracking = fields.Selection(related="product_id.tracking")
product_uom_id = fields.Many2one('uom.uom', 'Unit of Measure', required=True, readonly=True)
use_create_components_lots = fields.Boolean(related="production_id.picking_type_id.use_create_components_lots")
production_id = fields.Many2one('mrp.production', 'Manufacturing Order', required=True, check_company=True)
production_availability = fields.Selection(
string='Stock Availability', readonly=True,
related='production_id.reservation_state', store=True,
help='Technical: used in views and domains only.')
production_state = fields.Selection(
string='Production State', readonly=True,
related='production_id.state',
help='Technical: used in views only.')
production_bom_id = fields.Many2one('mrp.bom', related='production_id.bom_id')
qty_production = fields.Float('Original Production Quantity', readonly=True, related='production_id.product_qty')
company_id = fields.Many2one(related='production_id.company_id')
qty_producing = fields.Float(
compute='_compute_qty_producing', inverse='_set_qty_producing',
string='Currently Produced Quantity', digits='Product Unit of Measure')
qty_remaining = fields.Float('Quantity To Be Produced', compute='_compute_qty_remaining', digits='Product Unit of Measure')
qty_produced = fields.Float(
'Quantity', default=0.0,
readonly=True,
digits='Product Unit of Measure',
copy=False,
help="The number of products already handled by this work order")
is_produced = fields.Boolean(string="Has Been Produced",
compute='_compute_is_produced')
state = fields.Selection([
('pending', 'Waiting for another WO'),
('ready', 'Ready'),
('progress', 'In Progress'),
('done', 'Finished'),
('cancel', 'Cancelled')], string='Status',
default='pending', copy=False, readonly=True)
leave_id = fields.Many2one(
'resource.calendar.leaves',
help='Slot into workcenter calendar once planned',
check_company=True, copy=False)
date_planned_start = fields.Datetime(
'Scheduled Start Date',
compute='_compute_dates_planned',
inverse='_set_dates_planned',
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
store=True, tracking=True, copy=False)
date_planned_finished = fields.Datetime(
'Scheduled End Date',
compute='_compute_dates_planned',
inverse='_set_dates_planned',
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
store=True, tracking=True, copy=False)
date_start = fields.Datetime(
'Start Date', copy=False,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
date_finished = fields.Datetime(
'End Date', copy=False,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
duration_expected = fields.Float(
'Expected Duration', digits=(16, 2), default=60.0,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
help="Expected duration (in minutes)")
duration = fields.Float(
'Real Duration', compute='_compute_duration',
readonly=True, store=True)
duration_unit = fields.Float(
'Duration Per Unit', compute='_compute_duration',
readonly=True, store=True)
duration_percent = fields.Integer(
'Duration Deviation (%)', compute='_compute_duration',
group_operator="avg", readonly=True, store=True)
progress = fields.Float('Progress Done (%)', digits=(16, 2), compute='_compute_progress')
operation_id = fields.Many2one(
'mrp.routing.workcenter', 'Operation', check_company=True)
# Should be used differently as BoM can change in the meantime
worksheet = fields.Binary(
'Worksheet', related='operation_id.worksheet', readonly=True)
worksheet_type = fields.Selection(
string='Worksheet Type', related='operation_id.worksheet_type', readonly=True)
worksheet_google_slide = fields.Char(
'Worksheet URL', related='operation_id.worksheet_google_slide', readonly=True)
operation_note = fields.Text("Description", related='operation_id.note', readonly=True)
move_raw_ids = fields.One2many(
'stock.move', 'workorder_id', 'Raw Moves',
domain=[('raw_material_production_id', '!=', False), ('production_id', '=', False)])
move_finished_ids = fields.One2many(
'stock.move', 'workorder_id', 'Finished Moves',
domain=[('raw_material_production_id', '=', False), ('production_id', '!=', False)])
move_line_ids = fields.One2many(
'stock.move.line', 'workorder_id', 'Moves to Track',
help="Inventory moves for which you must scan a lot number at this work order")
finished_lot_id = fields.Many2one(
'stock.production.lot', string='Lot/Serial Number', compute='_compute_finished_lot_id',
inverse='_set_finished_lot_id', domain="[('product_id', '=', product_id), ('company_id', '=', company_id)]",
check_company=True)
time_ids = fields.One2many(
'mrp.workcenter.productivity', 'workorder_id', copy=False)
is_user_working = fields.Boolean(
'Is the Current User Working', compute='_compute_working_users',
help="Technical field indicating whether the current user is working. ")
working_user_ids = fields.One2many('res.users', string='Working user on this work order.', compute='_compute_working_users')
last_working_user_id = fields.One2many('res.users', string='Last user that worked on this work order.', compute='_compute_working_users')
next_work_order_id = fields.Many2one('mrp.workorder', "Next Work Order", check_company=True)
scrap_ids = fields.One2many('stock.scrap', 'workorder_id')
scrap_count = fields.Integer(compute='_compute_scrap_move_count', string='Scrap Move')
production_date = fields.Datetime('Production Date', related='production_id.date_planned_start', store=True, readonly=False)
json_popover = fields.Char('Popover Data JSON', compute='_compute_json_popover')
show_json_popover = fields.Boolean('Show Popover?', compute='_compute_json_popover')
consumption = fields.Selection([
('strict', 'Strict'),
('warning', 'Warning'),
('flexible', 'Flexible')],
required=True,
)
@api.depends('production_state', 'date_planned_start', 'date_planned_finished')
def _compute_json_popover(self):
previous_wo_data = self.env['mrp.workorder'].read_group(
[('next_work_order_id', 'in', self.ids)],
['ids:array_agg(id)', 'date_planned_start:max', 'date_planned_finished:max'],
['next_work_order_id'])
previous_wo_dict = dict([(x['next_work_order_id'][0], {
'id': x['ids'][0],
'date_planned_start': x['date_planned_start'],
'date_planned_finished': x['date_planned_finished']})
for x in previous_wo_data])
if self.ids:
conflicted_dict = self._get_conflicted_workorder_ids()
for wo in self:
infos = []
if not wo.date_planned_start or not wo.date_planned_finished or not wo.ids:
wo.show_json_popover = False
wo.json_popover = False
continue
if wo.state in ['pending', 'ready']:
previous_wo = previous_wo_dict.get(wo.id)
prev_start = previous_wo and previous_wo['date_planned_start'] or False
prev_finished = previous_wo and previous_wo['date_planned_finished'] or False
if wo.state == 'pending' and prev_start and not (prev_start > wo.date_planned_start):
infos.append({
'color': 'text-primary',
'msg': _("Waiting the previous work order, planned from %(start)s to %(end)s",
start=format_datetime(self.env, prev_start, dt_format=False),
end=format_datetime(self.env, prev_finished, dt_format=False))
})
if wo.date_planned_finished < fields.Datetime.now():
infos.append({
'color': 'text-warning',
'msg': _("The work order should have already been processed.")
})
if prev_start and prev_start > wo.date_planned_start:
infos.append({
'color': 'text-danger',
'msg': _("Scheduled before the previous work order, planned from %(start)s to %(end)s",
start=format_datetime(self.env, prev_start, dt_format=False),
end=format_datetime(self.env, prev_finished, dt_format=False))
})
if conflicted_dict.get(wo.id):
infos.append({
'color': 'text-danger',
'msg': _("Planned at the same time than other workorder(s) at %s", wo.workcenter_id.display_name)
})
color_icon = infos and infos[-1]['color'] or False
wo.show_json_popover = bool(color_icon)
wo.json_popover = json.dumps({
'infos': infos,
'color': color_icon,
'icon': 'fa-exclamation-triangle' if color_icon in ['text-warning', 'text-danger'] else 'fa-info-circle',
'replan': color_icon not in [False, 'text-primary']
})
@api.depends('production_id.lot_producing_id')
def _compute_finished_lot_id(self):
for workorder in self:
workorder.finished_lot_id = workorder.production_id.lot_producing_id
def _set_finished_lot_id(self):
for workorder in self:
workorder.production_id.lot_producing_id = workorder.finished_lot_id
@api.depends('production_id.qty_producing')
def _compute_qty_producing(self):
for workorder in self:
workorder.qty_producing = workorder.production_id.qty_producing
def _set_qty_producing(self):
for workorder in self:
workorder.production_id.qty_producing = workorder.qty_producing
workorder.production_id._set_qty_producing()
# Both `date_planned_start` and `date_planned_finished` are related fields on `leave_id`. Let's say
# we slide a workorder on a gantt view, a single call to write is made with both
# fields Changes. As the ORM doesn't batch the write on related fields and instead
# makes multiple call, the constraint check_dates() is raised.
# That's why the compute and set methods are needed. to ensure the dates are updated
# in the same time.
@api.depends('leave_id')
def _compute_dates_planned(self):
for workorder in self:
workorder.date_planned_start = workorder.leave_id.date_from
workorder.date_planned_finished = workorder.leave_id.date_to
def _set_dates_planned(self):
date_from = self[0].date_planned_start
date_to = self[0].date_planned_finished
self.mapped('leave_id').write({
'date_from': date_from,
'date_to': date_to,
})
def name_get(self):
res = []
for wo in self:
if len(wo.production_id.workorder_ids) == 1:
res.append((wo.id, "%s - %s - %s" % (wo.production_id.name, wo.product_id.name, wo.name)))
else:
res.append((wo.id, "%s - %s - %s - %s" % (wo.production_id.workorder_ids.ids.index(wo.id) + 1, wo.production_id.name, wo.product_id.name, wo.name)))
return res
def unlink(self):
# Removes references to workorder to avoid Validation Error
(self.mapped('move_raw_ids') | self.mapped('move_finished_ids')).write({'workorder_id': False})
self.mapped('leave_id').unlink()
mo_dirty = self.production_id.filtered(lambda mo: mo.state in ("confirmed", "progress", "to_close"))
res = super().unlink()
# We need to go through `_action_confirm` for all workorders of the current productions to
# make sure the links between them are correct (`next_work_order_id` could be obsolete now).
mo_dirty.workorder_ids._action_confirm()
return res
@api.depends('production_id.product_qty', 'qty_produced', 'production_id.product_uom_id')
def _compute_is_produced(self):
self.is_produced = False
for order in self.filtered(lambda p: p.production_id and p.production_id.product_uom_id):
rounding = order.production_id.product_uom_id.rounding
order.is_produced = float_compare(order.qty_produced, order.production_id.product_qty, precision_rounding=rounding) >= 0
@api.depends('time_ids.duration', 'qty_produced')
def _compute_duration(self):
for order in self:
order.duration = sum(order.time_ids.mapped('duration'))
order.duration_unit = round(order.duration / max(order.qty_produced, 1), 2) # rounding 2 because it is a time
if order.duration_expected:
order.duration_percent = 100 * (order.duration_expected - order.duration) / order.duration_expected
else:
order.duration_percent = 0
@api.depends('duration', 'duration_expected', 'state')
def _compute_progress(self):
for order in self:
if order.state == 'done':
order.progress = 100
elif order.duration_expected:
order.progress = order.duration * 100 / order.duration_expected
else:
order.progress = 0
def _compute_working_users(self):
""" Checks whether the current user is working, all the users currently working and the last user that worked. """
for order in self:
order.working_user_ids = [(4, order.id) for order in order.time_ids.filtered(lambda time: not time.date_end).sorted('date_start').mapped('user_id')]
if order.working_user_ids:
order.last_working_user_id = order.working_user_ids[-1]
elif order.time_ids:
order.last_working_user_id = order.time_ids.sorted('date_end')[-1].user_id
else:
order.last_working_user_id = False
if order.time_ids.filtered(lambda x: (x.user_id.id == self.env.user.id) and (not x.date_end) and (x.loss_type in ('productive', 'performance'))):
order.is_user_working = True
else:
order.is_user_working = False
def _compute_scrap_move_count(self):
data = self.env['stock.scrap'].read_group([('workorder_id', 'in', self.ids)], ['workorder_id'], ['workorder_id'])
count_data = dict((item['workorder_id'][0], item['workorder_id_count']) for item in data)
for workorder in self:
workorder.scrap_count = count_data.get(workorder.id, 0)
@api.onchange('date_planned_finished')
def _onchange_date_planned_finished(self):
if self.date_planned_start and self.date_planned_finished:
diff = self.date_planned_finished - self.date_planned_start
self.duration_expected = diff.total_seconds() / 60
@api.onchange('operation_id')
def _onchange_operation_id(self):
if self.operation_id:
self.name = self.operation_id.name
self.workcenter_id = self.operation_id.workcenter_id.id
@api.onchange('date_planned_start', 'duration_expected')
def _onchange_date_planned_start(self):
if self.date_planned_start and self.duration_expected:
self.date_planned_finished = self.date_planned_start + relativedelta(minutes=self.duration_expected)
@api.onchange('operation_id', 'workcenter_id', 'qty_production')
def _onchange_expected_duration(self):
self.duration_expected = self._get_duration_expected()
def write(self, values):
if 'production_id' in values:
raise UserError(_('You cannot link this work order to another manufacturing order.'))
if 'workcenter_id' in values:
for workorder in self:
if workorder.workcenter_id.id != values['workcenter_id']:
if workorder.state in ('progress', 'done', 'cancel'):
raise UserError(_('You cannot change the workcenter of a work order that is in progress or done.'))
workorder.leave_id.resource_id = self.env['mrp.workcenter'].browse(values['workcenter_id']).resource_id
if any(k not in ['time_ids', 'duration_expected', 'next_work_order_id'] for k in values.keys()) and any(workorder.state == 'done' for workorder in self):
raise UserError(_('You can not change the finished work order.'))
if 'date_planned_start' in values or 'date_planned_finished' in values:
for workorder in self:
start_date = fields.Datetime.to_datetime(values.get('date_planned_start')) or workorder.date_planned_start
end_date = fields.Datetime.to_datetime(values.get('date_planned_finished')) or workorder.date_planned_finished
if start_date and end_date and start_date > end_date:
raise UserError(_('The planned end date of the work order cannot be prior to the planned start date, please correct this to save the work order.'))
# Update MO dates if the start date of the first WO or the
# finished date of the last WO is update.
if workorder == workorder.production_id.workorder_ids[0] and 'date_planned_start' in values:
if values['date_planned_start']:
workorder.production_id.with_context(force_date=True).write({
'date_planned_start': fields.Datetime.to_datetime(values['date_planned_start'])
})
if workorder == workorder.production_id.workorder_ids[-1] and 'date_planned_finished' in values:
if values['date_planned_finished']:
workorder.production_id.with_context(force_date=True).write({
'date_planned_finished': fields.Datetime.to_datetime(values['date_planned_finished'])
})
return super(MrpWorkorder, self).write(values)
@api.model_create_multi
def create(self, values):
res = super().create(values)
# Auto-confirm manually added workorders.
# We need to go through `_action_confirm` for all workorders of the current productions to
# make sure the links between them are correct.
to_confirm = res.filtered(lambda wo: wo.production_id.state in ("confirmed", "progress", "to_close"))
to_confirm = to_confirm.production_id.workorder_ids
to_confirm._action_confirm()
return res
def _action_confirm(self):
workorders_by_production = defaultdict(lambda: self.env['mrp.workorder'])
for workorder in self:
workorders_by_production[workorder.production_id] |= workorder
for production, workorders in workorders_by_production.items():
workorders_by_bom = defaultdict(lambda: self.env['mrp.workorder'])
bom = self.env['mrp.bom']
moves = production.move_raw_ids | production.move_finished_ids
for workorder in self:
if workorder.operation_id.bom_id:
bom = workorder.operation_id.bom_id
if not bom:
bom = workorder.production_id.bom_id
previous_workorder = workorders_by_bom[bom][-1:]
previous_workorder.next_work_order_id = workorder.id
workorders_by_bom[bom] |= workorder
moves.filtered(lambda m: m.operation_id == workorder.operation_id).write({
'workorder_id': workorder.id
})
exploded_boms, dummy = production.bom_id.explode(production.product_id, 1, picking_type=production.bom_id.picking_type_id)
exploded_boms = {b[0]: b[1] for b in exploded_boms}
for move in moves:
if move.workorder_id:
continue
bom = move.bom_line_id.bom_id
while bom and bom not in workorders_by_bom:
bom_data = exploded_boms.get(bom, {})
bom = bom_data.get('parent_line') and bom_data['parent_line'].bom_id or False
if bom in workorders_by_bom:
move.write({
'workorder_id': workorders_by_bom[bom][-1:].id
})
else:
move.write({
'workorder_id': workorders_by_bom[production.bom_id][-1:].id
})
for workorders in workorders_by_bom.values():
if workorders[0].state == 'pending':
workorders[0].state = 'ready'
for workorder in workorders:
workorder._start_nextworkorder()
def _get_byproduct_move_to_update(self):
return self.production_id.move_finished_ids.filtered(lambda x: (x.product_id.id != self.production_id.product_id.id) and (x.state not in ('done', 'cancel')))
def _start_nextworkorder(self):
rounding = self.product_id.uom_id.rounding
if self.next_work_order_id.state == 'pending' and (
(self.operation_id.batch == 'no' and
float_compare(self.qty_production, self.qty_produced, precision_rounding=rounding) <= 0) or
(self.operation_id.batch == 'yes' and
float_compare(self.operation_id.batch_size, self.qty_produced, precision_rounding=rounding) <= 0)):
self.next_work_order_id.state = 'ready'
if self.state == 'done' and self.next_work_order_id.state == 'pending':
self.next_work_order_id.state = 'ready'
@api.model
def gantt_unavailability(self, start_date, end_date, scale, group_bys=None, rows=None):
"""Get unavailabilities data to display in the Gantt view."""
workcenter_ids = set()
def traverse_inplace(func, row, **kargs):
res = func(row, **kargs)
if res:
kargs.update(res)
for row in row.get('rows'):
traverse_inplace(func, row, **kargs)
def search_workcenter_ids(row):
if row.get('groupedBy') and row.get('groupedBy')[0] == 'workcenter_id' and row.get('resId'):
workcenter_ids.add(row.get('resId'))
for row in rows:
traverse_inplace(search_workcenter_ids, row)
start_datetime = fields.Datetime.to_datetime(start_date)
end_datetime = fields.Datetime.to_datetime(end_date)
workcenters = self.env['mrp.workcenter'].browse(workcenter_ids)
unavailability_mapping = workcenters._get_unavailability_intervals(start_datetime, end_datetime)
# Only notable interval (more than one case) is send to the front-end (avoid sending useless information)
cell_dt = (scale in ['day', 'week'] and timedelta(hours=1)) or (scale == 'month' and timedelta(days=1)) or timedelta(days=28)
def add_unavailability(row, workcenter_id=None):
if row.get('groupedBy') and row.get('groupedBy')[0] == 'workcenter_id' and row.get('resId'):
workcenter_id = row.get('resId')
if workcenter_id:
notable_intervals = filter(lambda interval: interval[1] - interval[0] >= cell_dt, unavailability_mapping[workcenter_id])
row['unavailabilities'] = [{'start': interval[0], 'stop': interval[1]} for interval in notable_intervals]
return {'workcenter_id': workcenter_id}
for row in rows:
traverse_inplace(add_unavailability, row)
return rows
def button_start(self):
self.ensure_one()
# As button_start is automatically called in the new view
if self.state in ('done', 'cancel'):
return True
if self.product_tracking == 'serial':
self.qty_producing = 1.0
# Need a loss in case of the real time exceeding the expected
timeline = self.env['mrp.workcenter.productivity']
if not self.duration_expected or self.duration < self.duration_expected:
loss_id = self.env['mrp.workcenter.productivity.loss'].search([('loss_type','=','productive')], limit=1)
if not len(loss_id):
raise UserError(_("You need to define at least one productivity loss in the category 'Productivity'. Create one from the Manufacturing app, menu: Configuration / Productivity Losses."))
else:
loss_id = self.env['mrp.workcenter.productivity.loss'].search([('loss_type','=','performance')], limit=1)
if not len(loss_id):
raise UserError(_("You need to define at least one productivity loss in the category 'Performance'. Create one from the Manufacturing app, menu: Configuration / Productivity Losses."))
if self.production_id.state != 'progress':
self.production_id.write({
'date_start': datetime.now(),
})
timeline.create({
'workorder_id': self.id,
'workcenter_id': self.workcenter_id.id,
'description': _('Time Tracking: ') + self.env.user.name,
'loss_id': loss_id[0].id,
'date_start': datetime.now(),
'user_id': self.env.user.id, # FIXME sle: can be inconsistent with company_id
'company_id': self.company_id.id,
})
if self.state == 'progress':
return True
start_date = datetime.now()
vals = {
'state': 'progress',
'date_start': start_date,
}
if not self.leave_id:
leave = self.env['resource.calendar.leaves'].create({
'name': self.display_name,
'calendar_id': self.workcenter_id.resource_calendar_id.id,
'date_from': start_date,
'date_to': start_date + relativedelta(minutes=self.duration_expected),
'resource_id': self.workcenter_id.resource_id.id,
'time_type': 'other'
})
vals['leave_id'] = leave.id
return self.write(vals)
else:
vals['date_planned_start'] = start_date
if self.date_planned_finished < start_date:
vals['date_planned_finished'] = start_date
return self.write(vals)
def button_finish(self):
end_date = datetime.now()
for workorder in self:
if workorder.state in ('done', 'cancel'):
continue
workorder.end_all()
vals = {
'state': 'done',
'date_finished': end_date,
'date_planned_finished': end_date
}
if not workorder.date_start:
vals['date_start'] = end_date
if not workorder.date_planned_start or end_date < workorder.date_planned_start:
vals['date_planned_start'] = end_date
workorder.write(vals)
workorder._start_nextworkorder()
return True
def end_previous(self, doall=False):
"""
@param: doall: This will close all open time lines on the open work orders when doall = True, otherwise
only the one of the current user
"""
# TDE CLEANME
timeline_obj = self.env['mrp.workcenter.productivity']
domain = [('workorder_id', 'in', self.ids), ('date_end', '=', False)]
if not doall:
domain.append(('user_id', '=', self.env.user.id))
not_productive_timelines = timeline_obj.browse()
for timeline in timeline_obj.search(domain, limit=None if doall else 1):
wo = timeline.workorder_id
if wo.duration_expected <= wo.duration:
if timeline.loss_type == 'productive':
not_productive_timelines += timeline
timeline.write({'date_end': fields.Datetime.now()})
else:
maxdate = fields.Datetime.from_string(timeline.date_start) + relativedelta(minutes=wo.duration_expected - wo.duration)
enddate = datetime.now()
if maxdate > enddate:
timeline.write({'date_end': enddate})
else:
timeline.write({'date_end': maxdate})
not_productive_timelines += timeline.copy({'date_start': maxdate, 'date_end': enddate})
if not_productive_timelines:
loss_id = self.env['mrp.workcenter.productivity.loss'].search([('loss_type', '=', 'performance')], limit=1)
if not len(loss_id):
raise UserError(_("You need to define at least one unactive productivity loss in the category 'Performance'. Create one from the Manufacturing app, menu: Configuration / Productivity Losses."))
not_productive_timelines.write({'loss_id': loss_id.id})
return True
def end_all(self):
return self.end_previous(doall=True)
def button_pending(self):
self.end_previous()
return True
def button_unblock(self):
for order in self:
order.workcenter_id.unblock()
return True
def action_cancel(self):
self.leave_id.unlink()
return self.write({'state': 'cancel'})
def action_replan(self):
"""Replan a work order.
It actually replans every "ready" or "pending"
work orders of the linked manufacturing orders.
"""
for production in self.production_id:
production._plan_workorders(replan=True)
return True
def button_done(self):
if any([x.state in ('done', 'cancel') for x in self]):
raise UserError(_('A Manufacturing Order is already done or cancelled.'))
self.end_all()
end_date = datetime.now()
return self.write({
'state': 'done',
'date_finished': end_date,
'date_planned_finished': end_date,
})
def button_scrap(self):
self.ensure_one()
return {
'name': _('Scrap'),
'view_mode': 'form',
'res_model': 'stock.scrap',
'view_id': self.env.ref('stock.stock_scrap_form_view2').id,
'type': 'ir.actions.act_window',
'context': {'default_company_id': self.production_id.company_id.id,
'default_workorder_id': self.id,
'default_production_id': self.production_id.id,
'product_ids': (self.production_id.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel')) | self.production_id.move_finished_ids.filtered(lambda x: x.state == 'done')).mapped('product_id').ids},
'target': 'new',
}
def action_see_move_scrap(self):
self.ensure_one()
action = self.env.ref('stock.action_stock_scrap').read()[0]
action['domain'] = [('workorder_id', '=', self.id)]
return action
def action_open_wizard(self):
self.ensure_one()
action = self.env.ref('mrp.mrp_workorder_mrp_production_form').read()[0]
action['res_id'] = self.id
return action
@api.depends('qty_production', 'qty_produced')
def _compute_qty_remaining(self):
for wo in self:
wo.qty_remaining = float_round(wo.qty_production - wo.qty_produced, precision_rounding=wo.production_id.product_uom_id.rounding)
def _get_duration_expected(self, alternative_workcenter=False):
self.ensure_one()
if not self.workcenter_id:
return False
qty_production = self.production_id.product_uom_id._compute_quantity(self.qty_production, self.production_id.product_id.uom_id)
cycle_number = float_round(qty_production / self.workcenter_id.capacity, precision_digits=0, rounding_method='UP')
if alternative_workcenter:
# TODO : find a better alternative : the settings of workcenter can change
duration_expected_working = (self.duration_expected - self.workcenter_id.time_start - self.workcenter_id.time_stop) * self.workcenter_id.time_efficiency / (100.0 * cycle_number)
if duration_expected_working < 0:
duration_expected_working = 0
return alternative_workcenter.time_start + alternative_workcenter.time_stop + cycle_number * duration_expected_working * 100.0 / alternative_workcenter.time_efficiency
time_cycle = self.operation_id and self.operation_id.time_cycle or 60.0
return self.workcenter_id.time_start + self.workcenter_id.time_stop + cycle_number * time_cycle * 100.0 / self.workcenter_id.time_efficiency
def _get_conflicted_workorder_ids(self):
"""Get conlicted workorder(s) with self.
Conflict means having two workorders in the same time in the same workcenter.
:return: defaultdict with key as workorder id of self and value as related conflicted workorder
"""
self.flush(['state', 'date_planned_start', 'date_planned_finished', 'workcenter_id'])
sql = """
SELECT wo1.id, wo2.id
FROM mrp_workorder wo1, mrp_workorder wo2
WHERE
wo1.id IN %s
AND wo1.state IN ('pending','ready')
AND wo2.state IN ('pending','ready')
AND wo1.id != wo2.id
AND wo1.workcenter_id = wo2.workcenter_id
AND (DATE_TRUNC('second', wo2.date_planned_start), DATE_TRUNC('second', wo2.date_planned_finished))
OVERLAPS (DATE_TRUNC('second', wo1.date_planned_start), DATE_TRUNC('second', wo1.date_planned_finished))
"""
self.env.cr.execute(sql, [tuple(self.ids)])
res = defaultdict(list)
for wo1, wo2 in self.env.cr.fetchall():
res[wo1].append(wo2)
return res
@api.model
def _prepare_component_quantity(self, move, qty_producing):
""" helper that computes quantity to consume (or to create in case of byproduct)
depending on the quantity producing and the move's unit factor"""
if move.product_id.tracking == 'serial':
uom = move.product_id.uom_id
else:
uom = move.product_uom
return move.product_uom._compute_quantity(
qty_producing * move.unit_factor,
uom,
round=False
)
def _update_finished_move(self):
""" Update the finished move & move lines in order to set the finished
product lot on it as well as the produced quantity. This method get the
information either from the last workorder or from the Produce wizard."""
production_move = self.production_id.move_finished_ids.filtered(
lambda move: move.product_id == self.product_id and
move.state not in ('done', 'cancel')
)
if production_move and production_move.product_id.tracking != 'none':
if not self.finished_lot_id:
raise UserError(_('You need to provide a lot for the finished product.'))
move_line = production_move.move_line_ids.filtered(
lambda line: line.lot_id.id == self.finished_lot_id.id
)
if move_line:
if self.product_id.tracking == 'serial':
raise UserError(_('You cannot produce the same serial number twice.'))
move_line.product_uom_qty += self.qty_producing
move_line.qty_done += self.qty_producing
else:
location_dest_id = production_move.location_dest_id._get_putaway_strategy(self.product_id).id or production_move.location_dest_id.id
move_line.create({
'move_id': production_move.id,
'product_id': production_move.product_id.id,
'lot_id': self.finished_lot_id.id,
'product_uom_qty': self.qty_producing,
'product_uom_id': self.product_uom_id.id,
'qty_done': self.qty_producing,
'location_id': production_move.location_id.id,
'location_dest_id': location_dest_id,
})
else:
rounding = production_move.product_uom.rounding
production_move._set_quantity_done(
float_round(self.qty_producing, precision_rounding=rounding)
)
def _strict_consumption_check(self):
if self.consumption == 'strict':
for move in self.move_raw_ids:
qty_done = 0.0
for line in move.move_line_ids:
qty_done += line.product_uom_id._compute_quantity(line.qty_done, move.product_uom)
rounding = move.product_uom_id.rounding
if float_compare(qty_done, move.product_uom_qty, precision_rounding=rounding) != 0:
raise UserError(_('You should consume the quantity of %s defined in the BoM. If you want to consume more or less components, change the consumption setting on the BoM.', move.product_id.name))
def _check_sn_uniqueness(self):
""" Alert the user if the serial number as already been produced """
if self.product_tracking == 'serial' and self.finished_lot_id:
sml = self.env['stock.move.line'].search_count([
('lot_id', '=', self.finished_lot_id.id),
('location_id.usage', '=', 'production'),
('qty_done', '=', 1),
('state', '=', 'done')
])
if sml:
raise UserError(_('This serial number for product %s has already been produced', self.product_id.name))
| agpl-3.0 | 3,872,464,057,137,297,000 | 50.708877 | 230 | 0.601883 | false |
juliotrigo/juliotrigo | juliotrigo/wsgi.py | 1 | 1431 | """
WSGI config for juliotrigo project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "juliotrigo.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "juliotrigo.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| bsd-3-clause | 1,546,838,388,843,277,300 | 43.71875 | 79 | 0.794549 | false |
blakev/suds | suds/__init__.py | 1 | 4404 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Suds is a lightweight SOAP python client that provides a
service proxy for Web Services.
"""
import os
import sys
#
# Project properties
#
__version__ = '0.4.unomena.2'
__build__="GA R699-20100913"
#
# Exceptions
#
class MethodNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Method not found: '%s'" % name)
class PortNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Port not found: '%s'" % name)
class ServiceNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Service not found: '%s'" % name)
class TypeNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Type not found: '%s'" % tostr(name))
class BuildError(Exception):
msg = \
"""
An error occured while building a instance of (%s). As a result
the object you requested could not be constructed. It is recommended
that you construct the type manually using a Suds object.
Please open a ticket with a description of this error.
Reason: %s
"""
def __init__(self, name, exception):
Exception.__init__(self, BuildError.msg % (name, exception))
class SoapHeadersNotPermitted(Exception):
msg = \
"""
Method (%s) was invoked with SOAP headers. The WSDL does not
define SOAP headers for this method. Retry without the soapheaders
keyword argument.
"""
def __init__(self, name):
Exception.__init__(self, self.msg % name)
class WebFault(Exception):
def __init__(self, fault, document):
if hasattr(fault, 'faultstring'):
Exception.__init__(self, "Server raised fault: '%s'" % fault.faultstring)
self.fault = fault
self.document = document
#
# Logging
#
class Repr:
def __init__(self, x):
self.x = x
def __str__(self):
return repr(self.x)
#
# Utility
#
def tostr(object, encoding=None):
""" get a unicode safe string representation of an object """
if isinstance(object, basestring):
if encoding is None:
return object
else:
return object.encode(encoding)
if isinstance(object, tuple):
s = ['(']
for item in object:
if isinstance(item, basestring):
s.append(item)
else:
s.append(tostr(item))
s.append(', ')
s.append(')')
return ''.join(s)
if isinstance(object, list):
s = ['[']
for item in object:
if isinstance(item, basestring):
s.append(item)
else:
s.append(tostr(item))
s.append(', ')
s.append(']')
return ''.join(s)
if isinstance(object, dict):
s = ['{']
for item in object.items():
if isinstance(item[0], basestring):
s.append(item[0])
else:
s.append(tostr(item[0]))
s.append(' = ')
if isinstance(item[1], basestring):
s.append(item[1])
else:
s.append(tostr(item[1]))
s.append(', ')
s.append('}')
return ''.join(s)
try:
return unicode(object)
except:
return str(object)
class null:
"""
The I{null} object.
Used to pass NULL for optional XML nodes.
"""
pass
def objid(obj):
return obj.__class__.__name__\
+':'+hex(id(obj))
import client
| lgpl-3.0 | -8,721,852,589,140,907,000 | 27.597403 | 85 | 0.581971 | false |
GPGPackageHandler/gpgpackagehandler | gpg_package_handler.py | 1 | 37509 |
#Use the included script run.sh to execute gpg_package_handler - execute run using ./run.sh
#the options available to run.sh are shown below
# debugFlag - If value is 1 print error/descriptive messages.
# pathToScript - The path leading to the folder containing this script.
# (Used if run.sh is launched outside of folder containing gpg_package_handler.py)
#Things to do
#TODO ensure data is not cached in insecure locations. done in run.sh by making sure gpg has uid of root so sensitive data is handled in RAM and not page files.
#TODO find better way to detect if error occurs while sending key to keyserver.
#TODO complete documentation
#Testing to do
#TODO confirm consistent strategy for error handling
#TODO confirm consistent use of new line within print statements - after message only.
#TODO test with various file types - tested with .png, .docx and works as expected
#TODO test use with subkeys
#Future / Experimental work
#TODO implement argument passing from command line to allow this utility to be used within scripts
#TODO offline mode - dont query keyserver when decrypting message - allows for key forgery though!
import configparser
import fileinput
import gnupg
import os
import re #regular expression
import subprocess
import sys
import time
from datetime import date
from datetime import datetime
from optparse import OptionParser
#Classes
class KeyArgs:
def __init__(self, name, email, expiry_date, custom_date=False):
self.name = name
self.email = email
self.expiry_date = expiry_date
self.custom_date = custom_date
class KeyInfo:
def __init__(self, fingerprint, key_id=None, uid=None):
self.fingerprint = fingerprint
self.key_id = key_id
self.uid = uid
def print_info(self):
print('User ID: %s' % self.uid)
print('Fingerprint: %s' % self.fingerprint)
print('Key ID: %s' % self.key_id)
print()
class SenderArguments:
# userid (name,email), recipients (recip1, recip2), keytype, keylength, subkeytype, subkeylength, expiry date,
def __init__(user_id=None, recipient_list=None, key_type=None, key_length=None, subkey_type=None, subkey_length=None, expiry_date=None):
self.user_id = user_id
self.recipient_list = recipient_list
self.key_type = key_type
self.key_length = key_length
self.subkey_type = subkey_type
self.subkey_length = subkey_length
self.expiry_date = expiry_date
# Global Variables
sender_path = os.getcwd()+ '/data/sender/'
receiver_path = os.getcwd() + '/data/receiver/'
sender_pubkey_path = receiver_path + 'sender_public_keys/'
encrypt_path = sender_path + 'to_encrypt/'
encrypted_path = receiver_path + 'to_decrypt/'
decrypted_path = receiver_path + 'decrypted/'
default_key_server = 'hkp://pool.sks-keyservers.net'
# Functions
def print_exit(messages=""):
if(messages != ""):
print(messages)
print("- Exiting Program.")
print()
raise SystemExit
def print_keyring_info(secret=False):
KeyInfos = []
i = 0
#If secret is True, only the keypairs that have a private key associated with them are retrieved.
import_result = gpg.list_keys(secret)
print(" -- Available Keys -- ")
for fingerprint in import_result.key_map:
# Do not show expired or revoked keys
key_info = retrieve_valid_key(fingerprint)
if(key_info == None):
continue;
i += 1
if(i%10 == 0):
input('Press enter to show next 10 available keys.\n')
print('%s. %s\n Fingerprint: %s' % (str(i), key_info.uid, key_info.fingerprint))
KeyInfos.append(key_info)
print()
return KeyInfos
def collect_recipient_info():
name = input('What is the name of the recipient? : ')
email = input('What is the email of the recipient? : ')
return format_uid(name,email)
def reassign_default_keypair():
# List all keys without including secret keys and associate a number with each key
# Select which key to assign to default key using input of associated number.
complete_keypairs_only = True
key_info_list = print_keyring_info(complete_keypairs_only)
list_len = len(key_info_list)
if(len(key_info_list) == 0):
print('- Unable to find any keypairs within GPG keyring.')
return 1
#loop controlling valid selection of default key
while(True):
#loop controlling valid entry of a numeric argument
while(True):
reply = None
reply = collect_input(prompt = 'Enter number associated with desired key.\n - ', pattern='[1-9]+')
print()
if(reply != None):
break
if(int(reply) > list_len):
print('- Invalid selection. Key does not exist\n')
continue
break
reply = int(reply) - 1
selected_key = key_info_list[int(reply)]
try:
set_default_keypair(selected_key.fingerprint)
except Exception as e:
print(e)
raise Exception('Unable to reassign default keypair.\n')
print('Successfully set default key to that of %s.\n' % selected_key.uid)
return 0
def format_uid(name, email):
return str(name + ' <' + email + '>')
def collect_input(prompt,pattern):
reply = None
reply = input(prompt)
if(re.fullmatch(pattern,reply,flags=0) == None):
print("Input is incorrect.\n")
return None
return reply
#optional arguments to allow for future use of command line arguments with GPG Package Handler
#def prepare_key_arguments(userId=None, keyType=None, keyLength=None, subkeyType=None, subkeyLength=None, expiryDate=None):
def prepare_key_arguments():
reply = None
#name,email,date,custom_date
key_args = KeyArgs(None,None,None,False)
print('- Creating new keypair.')
print('Enter credentials to be associated with this key.\n')
key_args.name = input('Enter Name: ')
key_args.email = input('Enter Email Address: ')
print('- Creating expiry date for new default key.\n')
print('Please provide your input.')
while(True):
reply = None
reply = collect_input(prompt='1. Custom.\n2. 1 Year.\n3. 1 Week.\n4. 1 Day.\n - ',pattern='[1-4]')
if(reply != None):
break
print()
if(reply != '1'):
key_args.custom_date = False
if(reply == '2'):
key_args.expiry_date = '1y'
if(reply == '3'):
key_args.expiry_date = '1w'
if(reply == '4'):
key_args.expiry_date = '1d'
#if a custom date was provided
if(reply == '1'):
key_args.custom_date = True
print('Please provide a custom date in the shown format yyyy-mm-dd.')
#print('You may also provide a date in a combination of the following formats.')
#print('Days (1d), Weeks (1w) or Years (1y).')
while(True):
#for when the first date is incorrect
key_args.expiry_date = None
key_args.expiry_date = input('Please enter date : ')
print()
try:
key_args.expiry_date = time.strptime(key_args.expiry_date ,'%Y-%m-%d')
except: #Not using exception message as the information provided is adequate
print('Please enter date using the format yyyy-mm-dd. Ex 2016-12-31')
continue
key_args.expiry_date = datetime.fromtimestamp(time.mktime(key_args.expiry_date)).date()
if(key_args.expiry_date <= date.today()):
print('The expiry date must be in the future.')
continue
break
#end while loop
return key_args
def generate_keypair(name, email, date, keyServer):
PRIMARY_UID = 0
print("Generating keypair using GnuPG. Please wait...")
#input_data = gpg.gen_key_input(expire_date=str(date), key_type="DSA", key_length=4096, name_real=name, name_email=email, subkey_type="ELG-E", subkey_length=3072)
input_data = gpg.gen_key_input(expire_date=str(date), key_type="RSA", key_length=4096, name_real=name, name_email=email)
try:
new_keypair = gpg.gen_key(input_data)
#print(new_keypair)
except Exception as e:
print(e)
raise Exception(' - Error occured while generating keypair with GnuPG.')
#public key
#try:
#public key converted to ascii for support with web clients
# ascii_armored_public_key = gpg.export_keys(new_keypair.fingerprint, armor=True)
#except Exception as e:
# print(e)
# raise Exception(' - Error creating ASCII armored public key. Key cannot be provided to recipient(s)')
send_result=gpg.send_keys(keyServer,new_keypair.fingerprint)
#ensuring the key was sent to the key server.
errors = str(send_result.__dict__['stderr'])
if(errors.find("keyserver send failed") != -1):
#If your public key cannot be sent to the keyserver then the sender cannot retreive your public key to send you messages.
gpg.delete_keys(new_keypair.fingerprint,secret=True) #deleting the private key
gpg.delete_keys(new_keypair.fingerprint) # deleting the public key
raise Exception('- Error sending key to keysever.')
#Retrieve key_id and uid from result of list_keys
#Since the keypair has just been generated only a single uid can exist for this keypair.
key_dict = gpg.list_keys(keys=new_keypair.fingerprint)[0]
return KeyInfo(key_id=key_dict['keyid'], fingerprint = new_keypair.fingerprint, uid = key_dict['uids'][PRIMARY_UID])
#optional arguments allowing for future use of command line arguments with GPG Package Handler
#def create_default_keypair(userId=None, keyType=None, keyLength=None, subkeyType=None, subkeyLength=None, expiryDate=None, keyServer=None):
def create_default_keypair(keyServer=None):
key_args = prepare_key_arguments()
if(keyServer == None):
keyServer = default_key_server
if(key_args.custom_date == True):
try:
key_args.expiry_date = key_args.expiry_date.strftime('%Y-%m-%d')
except Exception as e:
err_output='Error while formatting custom date of %s' % key_args.expiry_date
raise Exception(err_output)
try:
key_info = generate_keypair(key_args.name,key_args.email,key_args.expiry_date, keyServer)
except Exception as e:
print(e)
raise Exception('- Error creating keypair.')
try:
set_default_keypair(key_info.fingerprint)
print('- Created keypair successfully set as default key.')
print('- The default key can be reset from the main menu.')
except Exception as e:
gpg.delete_keys(key_info.fingerprint,secret=True) #deleting the private key
gpg.delete_keys(key_info.fingerprint); #deleting the public key
print(e)
raise Exception('Unable to set created key as default key.')
return key_info
def set_default_keypair(fingerprint):
gpg_config = None
gpg_temp_config = None
def_key_set=0
def_key_entry='default-key '+ fingerprint + '\n'
#open gpg.conf at location specified by gpg_path
gpg_conf_path = gpg_path + "/gpg.conf"
gpg_temp_path = gpg_path + "/gpg_temp.conf"
try:
#Verify the authenticity of the fingerprint by exporting it out of the keyring.
#If the export fails, this key is not in the keyring and cannot be set as default key.
if(len(gpg.export_keys(fingerprint)) == 0):
raise('Key matching provided fingerprint could not be found in keyring.')
#check if gpg configuration file exists
#if not create a copy of this file
#add default key entry to the file
if(os.path.isfile(gpg_conf_path) == False):
gpg_config = open(gpg_conf_path, 'w')
gpg_config.write(def_key_entry)
gpg_config.close()
else:
#modify gpg configuration file by performing these steps
#create temp config file from current config file.
#locate default-key entry in the configuration file
#replace the keyid value with the supplied fingerprint
#copy contents of temp config to active config
gpg_config = open(gpg_conf_path, 'r') #read
gpg_temp_config = open(gpg_temp_path, 'w') #write
#locate default-key entry in the configuration file
for line in gpg_config:
if(line.find('default-key') != -1):
match = re.match('^(default-key|#default-key)\s([A-Z0-9]{40}|[A-Z0-9]{8})', line, flags=0)
if(match != None):
#replace the keyid value with the supplied fingerprint
line=str(def_key_entry)
def_key_set=1
#create temp file from current file, the entire file must be read to allow a full copy to be made
gpg_temp_config.write(line)
if(def_key_set == 0):
gpg_temp_config.write(def_key_entry)
#Close config file to allow it to be reopened in write IO mode.
gpg_config.close()
#close temp file to allow the buffer to flush its contents to file
gpg_temp_config.close()
gpg_config = open(gpg_conf_path,'w')
gpg_temp_config = open(gpg_temp_path, 'r')
#copy contents of temp config to active config
gpg_config.write(gpg_temp_config.read())
gpg_config.close()
gpg_temp_config.close()
os.remove(gpg_temp_config.name)
except Exception as e:
if(gpg_config != None):
gpg_config.close()
if(gpg_temp_config != None):
gpg_temp_config.close()
print(e)
raise Exception('Error occured while assigning key to default key.')
def get_default_keypair():
gpg_conf_path = gpg_path + "/gpg.conf"
gpg_config = None
fingerprint = None
def_key = None
#locate default-key entry in the configuration file
try:
gpg_config = open(gpg_conf_path, 'r') #read
for line in gpg_config:
if(line.find('default') != -1):
match = re.match('^default-key\s([A-Z0-9]{40}|[A-Z0-9]{8})', line, flags=0)
if(match != None):
fingerprint = match.group(1)
# retrieving key entered in configuration file from GnuPG's keyring.
def_key = retrieve_valid_key(fingerprint,verbose=True)
if(def_key != None):
gpg_config.close()
return def_key
#end of for loop
gpg_config.close()
except Exception as e:
if(gpg_config != None):
gpg_config.close()
print(e)
print_exit('- Error retrieving default public key')
print("A default keypair has not be assigned.\n")
return None
def retrieve_valid_key(fingerprint="", verbose = False):
KEY_INDEX=0
PRIMARY_UID=0
key_list=None
key=None
#Retreiving keys from GnuPG keyring.
key_list = gpg.list_keys(keys=fingerprint)
#print(key_list)
if(len(key_list) != 0):
key = key_list[KEY_INDEX]
if (key == None):
if(verbose == True):
print('A key with the following fingerprint could not be found within GnuPG\'s keyring:\n%s\n' % fingerprint)
return None
#If key is expired.
if (str(key['trust']) == 'e'):
if(verbose == True):
print('%s\'s key is expired.\nKey Fingerprint: %s\n' % (key['uids'][PRIMARY_UID],fingerprint))
return None
#If key is revoked.
if (str(key['trust']) == 'r'):
if(verbose == True):
print('%s\'s key has been revoked.\nKey Fingerprint: %s\n' % (key['uids'][PRIMARY_UID],fingerprint))
return None
#only the first uid is selected as this is the primary uid of the keypair.
return KeyInfo(fingerprint,key['keyid'],key['uids'][PRIMARY_UID])
def search_for_key(recipients, key_server = None):
key_list = []
recipient_info = []
i = 0
reply = None
if(key_server == None):
key_server = default_key_server
#if only a single recipient is to be searched for
if(type(recipients) is str):
s = recipients
recipients = []
recipients.append(s)
for recipient in recipients:
match = 0
while(True): #start of loop allowing for reattempt of search for specific recipient
try:
print('- Searching for %s within %s.\n' % (recipient,key_server))
key_list = gpg.search_keys(recipient, key_server)
except Exception as e:
print(e)
print('Error while searching for public key of %s.\n' % recipient)
break
if(key_list.__dict__['curkey'] != None):
#ensuring the provided user ID matches the searched user ID
#cannot compare by fingerprint at this point as the fingerprint is unknown before the search by User ID.
for key in key_list:
for uid in key['uids']:
if(uid == recipient):
match = 1
recipient_info.append(KeyInfo(fingerprint = key['keyid'],uid=uid))
break
if(match == 1):
break
#end of loop through key_list
if(match != 1):
print('- Unable to find information on recipient %s from %s.\n' % (recipient, key_server))
prompt = 'Choose retry option below:\n'
options = [ '1. Search using new recipient info.\n','2. Retry search for ' + recipient + '.\n','3. Abandon search for ' + recipient +'.\n', '4. Return to main menu.\n' ]
choice = create_menu(prompt, options)
print(choice);
if(choice == 1):
recipients[i] = collect_recipient_info()
recipient = recipients[i]
continue
if(choice == 2):
continue
if(choice == 3):
break
if(choice == 4):
recipient_info = [];
break;
print('- Recipient %s found at %s.\nFingerprint of public key: %s \n' % (recipient, key_server, recipient_info[i].fingerprint))
i += 1
break; #end of while loop allowing reattempt of search for specific recipient
#end of recipients loop
return recipient_info
def output_to_file(path, public_key):
pubkey_file = None
try:
#Outputting the public key to file
pubkey_file = open(path, 'w')
pubkey_file.write(public_key)
pubkey_file.close()
except Exception as e:
if(pubkey_file != None):
pubkey_file.close()
print(e)
raise Exception("File could not be exported")
def encrypt_file(encrypt_path, encrypted_path, recip_fingerprints, signer_fingerprint):
file = None
print('- Beginning file encryption.\n')
try:
file = open(encrypt_path, 'rb')
encrypted_path = encrypted_path + '.gpg';
result = gpg.encrypt_file(file, recip_fingerprints, sign=signer_fingerprint, output=encrypted_path)
except Exception as e:
#print(result.data)
#print(result.ok)
#print(result.status)
if(file != None):
file.close()
print(e)
raise Exception("Unable to encrypt file.")
file.close()
print('- Successfully encrypted and signed file.\nEncrypted file has been output to %s\n' % encrypted_path)
#def sign_file(file_path, signer_fingerprint, userid = None):
#
# signed_data = None
# file = None
# print()
# print('- Signing file located at:\n' + file_path)
# print('Signing file with private key of ' + userId)
# file = open(file_path, 'rb')
#
# try:
# try clearsign = False
# signed_data=gpg.sign_file(file, keyid=signer_fingerprint, clearsign=True, output=file_path)
# except Exception as e:
# if(file != None):
# file.close()
# reprint(e)
# raise Exception('Error signing file with the private key of ' + userid)
# raise Exception('Error signing file with the private key.')
# file.close()
# return signed_data
#Checking if a keypair's public key has previously been signed by the sender or receiver.
def check_for_key_signature(key_to_sign_fingerprint, signature_to_find_keyid):
cur_key_id = None
RECEIVER_KEY = 0
KEY_ID = 0
key_list = gpg.list_keys(keys=key_to_sign_fingerprint,sigs=True)
if(len(key_list) != 0):
for sig_info in key_list[RECEIVER_KEY]['sigs']:
cur_key_id = str(sig_info[KEY_ID])
if(cur_key_id == signature_to_find_keyid):
return True
return False
#sign_key calls on BASH script sign_key.sh that calls upon gpg's --sign-key command
#key signing is done using the current default key.
def sign_key(fingerprint):
resources_path = os.getcwd() + "/res/sign_key.sh"
reply = None
key_info = retrieve_valid_key(fingerprint,verbose=True)
if(key_info == None):
return False
print( "-- Details of key to be signed --\n")
key_info.print_info()
while(True):
reply = collect_input(prompt="Do you want to sign this key\n1. Yes\n2. No\n - ",pattern="1|2")
print()
if(reply != None):
break
if(reply == '1'):
try:
retcode = subprocess.call([resources_path, fingerprint], shell=False)
except Exception as e:
print(e)
raise Exception("Unable to execute the script performing key signing.\n" + resources_path)
return True
return False
def delete_file(file_path):
print('Would you like to delete the original unencrypted message?\n')
reply = None
reply = input('Type \'y\' to confirm deletion of the message? : ')
print()
if(str(reply) != 'y' and str(reply) != 'Y'):
print('The file located at %s will NOT be deleted.\n' % file_path)
return False
# the utility srm is used to securely remove the unencrypted message.
try:
print('Deleting file located at %s\n' % file_path)
subprocess.check_call([os.getcwd() + "/res/srm.sh", file_path], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
print('Unable to delete file using Secure Remove (SRM).')
raise Exception('Return code: ' + e.returnCode + '\nErrors returned:\n' + e.stderr + '\n')
return True
#Functions called exclusively in main
#optional arguments to allow for future use of command line arguments with GPG Package Handler
#def prepare_message(userid=None, keytype=None, keylength=None, subkeytype=None, subkeylength=None, expirydate=None, keyserver=None):
def prepare_message():
RECIPIENT = 0
reply = None
def_keypair = None
file_found = False
key_signed = False
key_server = default_key_server
recipients = []
fingerprints = []
cur_sender_pubkey_path = ""
print('- Preparing message for encryption.\n')
#Checking to see if a file is present within the to_encrpyt folder.
#This check is performed first to avoid completing unnecssary steps.
for file in os.listdir(encrypt_path):
if(file.startswith('.') == False):
file_found = True
if(file_found == False):
print('- No content to encrypt.\n- Please add content within\n%s\n' % encrypt_path)
return
def_keypair=get_default_keypair()
if(def_keypair == None):
print('Unable to retrieve the sender\'s keypair.')
return
prompt = '- Preparing GPG key used to identify sender.\n'
options = ['1. Use existing default keypair created by ' + def_keypair.uid, '\n2. Reassign default keypair from existing keys.\n', '3. Create new keypair\n']
choice = create_menu(prompt,options)
if(choice == 1):
print('- Existing key used. Messages will be signed with default key.\n')
if(choice == 2):
if(reassign_default_keypair() != 0):
print("Error occured while reassigning default keypair.")
return
def_keypair=get_default_keypair()
if(choice == 3):
try:
def_keypair=create_default_keypair(key_server)
except Exception as e:
print(e)
return
#if(len(gpg.list_keys(secret=True) > 1):
# options = ['1. Use existing default keypair created by ' + def_keypair.uid, '\n2. Reassign default keypair from existing keys.\n', '3. Create new keypair\n']
# choice = create_menu(prompt,options)
# if(choice == 1):
# print('- Existing key used. Messages will be signed with default key.')
# if(choice == 2):
# if(reassign_default_keypair() != 0):
# print("Error occured while reassigning default keypair.")
# return
# def_keypair=get_default_keypair()
# if(choice == 3):
# try:
# def_keypair=create_default_keypair(key_server)
# except Exception as e:
# print(e)
# return
#else:
# options = ['1. Use existing default keypair created by ' + def_keypair.uid, '\n2. Create new keypair\n']
# choice = create_menu(prompt,options)
# if(choice == 1):
# print('- Existing key used. Messages will be signed with default key.')
# if(choice == 2):
# try:
# def_keypair=create_default_keypair(key_server)
# except Exception as e:
# print(e)
# return
print("Info related to sender's key pair.")
def_keypair.print_info()
#Collecting recipient information from the sender.
print('- Determining recipient(s) for message by User ID.\nPlease ensure the receiver has previously created a keypair.\n')
while(True):
recipients.append(collect_recipient_info())
reply = None
reply = input('Type \'y\' to add another recipient : ')
print()
if(str(reply) != 'y' and str(reply) != 'Y'):
break
recipient_info = search_for_key(recipients,key_server)
if(len(recipient_info) == 0):
print('- Unable to retrieve info on recipient(s).')
return
# retrieving recipient(s) public keys from keyserver based on fingerprint of each and import keys into key ring
for key_info in recipient_info:
key_signed = False
try:
print("- Importing info for %s from %s.\n" % (key_info.uid, key_server))
key = gpg.recv_keys(key_server, key_info.fingerprint)
if(key.__dict__['results'][RECIPIENT]['fingerprint'] != key_info.fingerprint):
print("The imported key information does not match the information from the initial search.\n")
#Deleting the undesired public key.
#Deletion will not work if there is a private key existing within the local GPG keyring.
gpg.delete_keys(key.__dict__['results'][RECIPIENT]['fingerprint'])
continue
if(retrieve_valid_key(key_info.fingerprint, verbose=True) == None):
print("Unable to send messages to recipient using this key.\n");
continue
except Exception as e:
print(e)
print('- Unable to import receivers key into local GPG keyring.')
return
#Checking if the receiver's public key has previously been signed by the sender.
#Will be useful if an offline mode is developed.
#key_signed = check_for_key_signature(key_info.fingerprint,def_keypair.key_id)
#Signing recipient's public key the sender's private key.
#print()
#print('- Signing recipient\'s public key to establish trust for future communication.')
#if(key_signed == False):
# try:
# if(sign_key(key_info.fingerprint) == True):
# print('Trust established as recipient\'s public key has been signed by ' + def_keypair.uid + '.')
# else:
# print('Trust NOT established with recipient\'s public key.')
# except Exception as e:
# print(e)
# print_exit('- Unable to sign recipient\'s public key.')
#else:
# print('Recipient\'s public key has been previously signed by ' + def_keypair.uid + '.')
try:
recip_public_key = gpg.export_keys(key_info.fingerprint, armor=True)
recipient_pubkey_path = sender_path + 'recipient_public_keys/' + key_info.uid.replace(" ","") + '.asc'
output_to_file(recipient_pubkey_path, recip_public_key)
print('- Successfully output public key of %s to file at:\n%s\n' % (key_info.uid,recipient_pubkey_path))
except Exception as e:
print(e)
print('- Unable to output public key of ' + key_info.uid + ' to file.\nCannot sign file to establish trust with recipient.')
return
# collecting the fingerprints of each recipient to use during encryption
# the recipients public key is used to encrypt the document, their private key can then be used to decrypt the document
fingerprints.append(key_info.fingerprint)
#Export senders public key to file.
#Will be useful if an offline mode is developed
#print()
#print('- Exporting sender\'s public key to file.\nRecipients can use your public key to establish trust with you as sender.')
#print('Private Key created, messages can be signed using your private key.')
#try:
# cur_sender_pubkey_path = sender_pubkey_path + '/' + keypair.uids.replace(" ","") +'.asc'
# output_to_file(cur_sender_pubkey_path, keypair.public_key)
# print()
# print('- Sender\'s public key successfully output to\n' + cur_sender_pubkey_path)
#except Exception as e:
# print(e)
# print_exit("- Cannot export public key to file.\n- Unable to provide recipient with copy of your public key")
#
#extract all fingerprints from list of keyInfo objects
if(len(fingerprints) == 0):
print('- Unable to encrypt file. Reciepient info not retrieved.')
return
#read name of files folder of files to encrypt
for file in os.listdir(encrypt_path):
if(file.startswith('.') == False):
try:
encrypt_file(encrypt_path + file, encrypted_path + file, fingerprints, def_keypair.fingerprint)
delete_file(encrypt_path + file)
except Exception as e:
print(e)
return
print('- Encryption process complete.\nSee ' + encrypted_path + ' to find the content to provide to the receiver.\n')
def receive_message():
SENDER = 0
file_found = False
key_signed = False
def_keypair = None
sender_key = None
sender_key_info = None
sender_name = None
sender_email = None
key_server = default_key_server
print('- Preparing message for decryption.\n')
#Checking to see if a file is present within the to_encrpyt folder.
#This check is performed first to avoid completing unnecssary steps.
for file in os.listdir(encrypted_path):
if(file.startswith('.') == False):
file_found = True
if(file_found == False):
print('- No content to decrypt.\n- Please add content within\n%s\n' % encrypted_path)
return
def_keypair=get_default_keypair()
if(def_keypair == None):
print('Unable to retrieve the receiver\'s keypair')
return
prompt = '- Preparing GPG key used to identify receiver.\n'
options = ['1. Use existing default keypair created by ' + def_keypair.uid,'\n2. Reassign default keypair from existing keys.\n']
choice = create_menu(prompt,options)
if(choice == 1):
print('- Existing key used. Messages will be signed with default key.\n')
if(choice == 2):
if(reassign_default_keypair() != 0):
print("Error occured while reassigning default keypair.")
return
def_keypair=get_default_keypair()
print("Info related to receivers's key pair.")
def_keypair.print_info()
print('- Determining sender of the message by User ID.')
sender_name = input('What is the name of the sender? : ')
sender_email = input('What is the email of the sender? : ')
print()
#query's keyserver and return KeyInfo object if uid is found.
sender_key_info = search_for_key(format_uid(sender_name,sender_email), key_server)
if(len(sender_key_info) == 0):
print('- Unable to retrieve sender\'s key from %s.\n' % key_server)
return
sender_key_info = sender_key_info[SENDER]
try:
print("- Importing info for %s from %s.\n" % (sender_key_info.uid, key_server))
sender_key = gpg.recv_keys(key_server, sender_key_info.fingerprint)
if(sender_key.__dict__['results'][SENDER]['fingerprint'] != sender_key_info.fingerprint):
print("The imported key information does not match the information from the initial search.\n")
#Deleting the undesired public key.
#Deletion will not work if there is a private key existing within the local GPG keyring.
gpg.delete_keys(sender_key.__dict__['results'][SENDER]['fingerprint'])
return
if(retrieve_valid_key(sender_key_info.fingerprint, verbose=True) == None):
print("Unable to receive messages from sender using this key.");
return
except Exception as e:
print(e)
print('- Unable to import sender\'s key into local GPG keyring.')
return
try:
print('- Signing sender\'s public key to establish trust.\n')
#Checking if the sender's public key has previously been signed by the receiver.
key_signed = check_for_key_signature(sender_key_info.fingerprint,def_keypair.key_id)
if(key_signed == False):
if(sign_key(sender_key_info.fingerprint) == True):
print('- Trust established!\n%s\'s public key has been signed by %s.\n' % (sender_key_info.uid, def_keypair.uid))
print('- Sending public key of %s to %s to help establish web of trust\n.' % (sender_key_info.uid, key_server))
#sending signed public key of sender back to keyserver.
send_result=gpg.send_keys(key_server,sender_key_info.fingerprint)
#ensuring the key was sent to the key server.
errors = str(send_result.__dict__['stderr'])
if(errors.find("keyserver send failed") != -1):
print('- Error sending public key to keysever.')
else:
print('- Trust NOT established with sender\'s public key.\nDecryption process will now stop.\n')
#Deleting the undesired public key.
#Deletion will not work if there is a private key existing within the local GPG keyring.
gpg.delete_keys(sender_key_info.fingerprint)
return
else:
print('Sender\'s public key has been previously signed by %s.\n' % def_keypair.uid)
except Exception as e:
print(e)
print('- Unable to sign sender\'s public key.\nDecryption process will now stop.')
return
print('- Outputting the public key of %s to file.\n' % sender_key_info.uid)
try:
sender_public_key = gpg.export_keys(sender_key_info.fingerprint, armor=True)
sender_pubkey_file_path = sender_pubkey_path + sender_key_info.uid.replace(" ","") + '.asc'
output_to_file(sender_pubkey_file_path, sender_public_key)
print('Sender\'s public key saved at %s\n' % sender_pubkey_file_path)
except Exception as e:
print(e)
print('- Unable to output public key of %s to file.\nCannot sign file to establish trust with recipient.' % sender_key_info.uid)
return
try:
print('- Decrypting file(s) received from sender.\n')
for file in os.listdir(encrypted_path):
if(file.startswith('.') == False):
encrypted_file = open(encrypted_path + file, 'rb')
file = file[:-4]
result = gpg.decrypt_file(encrypted_file, output=decrypted_path + file)
#TRUST_LEVELS = {
#"TRUST_UNDEFINED" : TRUST_UNDEFINED,
#"TRUST_NEVER" : TRUST_NEVER,
#"TRUST_MARGINAL" : TRUST_MARGINAL,
#"TRUST_FULLY" : TRUST_FULLY,
#"TRUST_ULTIMATE" : TRUST_ULTIMATE,
#
# See trust levels above.
#}
#Verifying trust in encrypted file
if result.trust_level is not None and result.trust_level >= result.TRUST_FULLY:
print( '- File successfully decrypted.\n%s saved at:\n%s' % (file,decrypted_path))
print()
else:
print('Decryption of ' + file + ' unsuccessful.')
return
#end of encrypted file loop
except Exception as e:
print(e)
print('- Unable to decrpyt message from sender')
return
#Allows for creation of menus without creating chained if statements.
#prompt - String - The question asked to the user.
#options - List of String - The available options to the user.
def create_menu(prompt, options):
#Building the regular expression used check for valid input within collect_input()
pattern = '[1-' + str(len(options)) + ']'
#Appending all options to a single string that is provided to collect_input()
input_prompt = prompt
for option in options:
input_prompt += option
#Adding the line that the user will enter its choice on.
input_prompt += ' - '
#Presenting the menu until valid input is provided.
while(True):
reply = None
reply = collect_input(prompt=input_prompt,pattern=pattern)
if(reply != None):
break;
print()
return int(reply)
def main_menu():
print("--- MAIN MENU ---\n")
def_keypair=get_default_keypair()
if(def_keypair != None):
print("The key currently in use is:")
def_keypair.print_info()
elif( len(gpg.list_keys(secret=True)) == 0):
print("Please create a keypair.\nA keypair is needed to send or receive packages.\n")
try:
create_default_keypair()
except Exception as e:
print(e)
return
else:
prompt = 'What would you like to do?\n'
options = [ '1. Assign default keypair from existing keys.\n','2. Create and assign new default keypair.\n','3. Exit Program.\n' ]
functionNames = [ reassign_default_keypair, create_default_keypair , print_exit ]
choice = create_menu(prompt, options)
functionToCall = functionNames[choice-1]
try:
functionToCall()
except Exception as e:
print(e)
return
prompt = 'What would you like to do?\n'
options = ['1. Send or recieve a package.\n','2. Manage your default keypair.\n','3. Exit program.\n']
functionNames = [ manage_package_menu, manage_key_menu, print_exit ]
choice = create_menu(prompt,options)
functionToCall = functionNames[choice-1]
functionToCall()
def manage_package_menu():
prompt = 'Are you sending or receiving a package?\n'
options = [ '1. Sending package.\n','2. Receiving package.\n','3. Return to main menu.\n']
functionNames = [ prepare_message, receive_message, main_menu ]
choice = create_menu(prompt, options)
functionToCall = functionNames[choice-1]
functionToCall()
def manage_key_menu():
prompt = 'What would you like to do?\n'
options = [ '1. Create and assign new default keypair.\n','2. Reassign default keypair from existing keys.\n','3. Return to main menu.\n' ]
functionNames = [ create_default_keypair , reassign_default_keypair, main_menu ]
choice = create_menu(prompt, options)
functionToCall = functionNames[choice-1]
try:
functionToCall()
except Exception as e:
print(e)
return
######################################
# MAIN
######################################
sys.path.insert(0, os.getcwd() + '/res')
parser = OptionParser()
reply = 1
#NAME and EMAIL are used when parsing user_id command line argument
#NAME = 0
#EMAIL = 1
#reading argument passed in from command line
#home directory, userid (name,email), keytype, keylength, subkeytype, subkeylength, expiry date, recipients (recip1, recip2)
parser.add_option('-l', dest='home_directory', help='-p Path of current users home directory leading to where GPG is installed. run.sh provides this argument automatically')
#parser.add_option('-u', dest='user_id', help='-u Name,Email')
#parser.add_option('-e', dest='expiry_date',help='-e Expiry Date. Format: yyyy-mm-dd')
#parser.add_option('-r', dest='recipients',help='r RecipientList Format: recip1, recip2')
#parser.add_option('--kt', dest='key_type', help='--ky Key Type')
#parser.add_option('--kl', dest='key_length', help='--kl Key Length. Max = : :')
#parser.add_option('--skt', dest='subkey_type', help='skt Subkey Type')
#parser.add_option('--skl', dest='subkey_length', help='--skl Subkey Length. Max = : :')
(options, args) = parser.parse_args()
#if(options.user_id != None):
# #Vinay Sajip <[email protected]>'
# id_parts = user_id.split(',')
# user_id = format_uid(id_parts[NAME], id_parts[EMAIL])
#if(options.recipients!= None):
# recipient_list = recipients.split(',')
#should be able to avoid retrieving and passing the home directory from the installation script
#gpg_path = '~/.gnupg'
gpg_path = options.home_directory + '/.gnupg'
#print(gpg_path)
gpg = gnupg.GPG(gnupghome=gpg_path)
gpg.encoding = 'utf-8'
while(True):
main_menu()
| lgpl-3.0 | -1,344,193,104,869,084,000 | 31.730366 | 173 | 0.69298 | false |
meprogrammerguy/pyMadness | scrape_stats.py | 1 | 2098 | #!/usr/bin/env python3
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
import html5lib
import pdb
from collections import OrderedDict
import json
import csv
import contextlib
url = "https://kenpom.com/index.php"
#url = "https://kenpom.com/index.php?y=2017" #past year testing override
print ("Scrape Statistics Tool")
print ("**************************")
print ("data is from {0}".format(url))
print ("**************************")
with contextlib.closing(urlopen(url)) as page:
soup = BeautifulSoup(page, "html5lib")
ratings_table=soup.find('table', id='ratings-table')
IDX=[]
A=[]
B=[]
C=[]
D=[]
E=[]
F=[]
G=[]
H=[]
I=[]
J=[]
K=[]
L=[]
M=[]
index=0
for row in ratings_table.findAll("tr"):
col=row.findAll('td')
if len(col)>0:
index+=1
IDX.append(index)
A.append(col[0].find(text=True))
B.append(col[1].find(text=True))
C.append(col[2].find(text=True))
D.append(col[3].find(text=True))
E.append(col[4].find(text=True))
F.append(col[5].find(text=True))
G.append(col[7].find(text=True))
H.append(col[9].find(text=True))
I.append(col[11].find(text=True))
J.append(col[13].find(text=True))
K.append(col[15].find(text=True))
L.append(col[17].find(text=True))
M.append(col[19].find(text=True))
df=pd.DataFrame(IDX,columns=['Index'])
df['Rank']=A
df['Team']=B
df['Conf']=C
df['W-L']=D
df['AdjEM']=E
df['AdjO']=F
df['AdjD']=G
df['AdjT']=H
df['Luck']=I
df['AdjEMSOS']=J
df['OppOSOS']=K
df['OppDSOS']=L
df['AdjEMNCSOS']=M
with open('stats.json', 'w') as f:
f.write(df.to_json(orient='index'))
with open("stats.json") as stats_json:
dict_stats = json.load(stats_json, object_pairs_hook=OrderedDict)
stats_sheet = open('stats.csv', 'w', newline='')
csvwriter = csv.writer(stats_sheet)
count = 0
for row in dict_stats.values():
#pdb.set_trace()
if (count == 0):
header = row.keys()
csvwriter.writerow(header)
count += 1
csvwriter.writerow(row.values())
stats_sheet.close()
print ("done.")
| mit | -3,005,067,790,104,389,600 | 22.054945 | 72 | 0.609152 | false |
berkeley-stat159/project-alpha | code/utils/scripts/glm_script.py | 1 | 3957 | """ Script for GLM functions.
Run with:
python glm_script.py
"""
# Loading modules.
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
import os
import sys
# Relative paths to subject 1 data.
project_path = "../../../"
pathtodata = project_path + "data/ds009/sub001/"
condition_location = pathtodata+"model/model001/onsets/task001_run001/"
location_of_images = project_path+"images/"
location_of_functions = project_path+"code/utils/functions/"
sys.path.append(location_of_functions)
# Load events2neural from the stimuli module.
from stimuli import events2neural
from event_related_fMRI_functions import hrf_single, convolution_specialized
# Load our GLM functions.
from glm import glm, glm_diagnostics, glm_multiple
# Load the image data for subject 1.
img = nib.load(pathtodata+"BOLD/task001_run001/bold.nii.gz")
data = img.get_data()
data = data[...,6:] # Knock off the first 6 observations.
cond1=np.loadtxt(condition_location+"cond001.txt")
cond2=np.loadtxt(condition_location+"cond002.txt")
cond3=np.loadtxt(condition_location+"cond003.txt")
#######################
# a. (my) convolution #
#######################
all_stimuli=np.array(sorted(list(cond2[:,0])+list(cond3[:,0])+list(cond1[:,0]))) # could also just x_s_array
my_hrf = convolution_specialized(all_stimuli,np.ones(len(all_stimuli)),hrf_single,np.linspace(0,239*2-2,239))
##################
# b. np.convolve #
##################
# initial needed values
TR = 2
tr_times = np.arange(0, 30, TR)
hrf_at_trs = np.array([hrf_single(x) for x in tr_times])
n_vols=data.shape[-1]
# creating the .txt file for the events2neural function
cond_all=np.row_stack((cond1,cond2,cond3))
cond_all=sorted(cond_all,key= lambda x:x[0])
np.savetxt(condition_location+"cond_all.txt",cond_all)
neural_prediction = events2neural(condition_location+"cond_all.txt",TR,n_vols)
convolved = np.convolve(neural_prediction, hrf_at_trs) # hrf_at_trs sample data
N = len(neural_prediction) # N == n_vols == 173
M = len(hrf_at_trs) # M == 12
np_hrf=convolved[:N]
#############################
#############################
# Analysis and diagonistics #
#############################
#############################
#######################
# a. (my) convolution #
#######################
# Now get the estimated coefficients and design matrix for doing
# regression on the convolved time course.
B_my, X_my = glm(data, my_hrf)
# Some diagnostics.
MRSS_my, fitted_my, residuals_my = glm_diagnostics(B_my, X_my, data)
# Print out the mean MRSS.
print("MRSS using 'my' convolution function: "+str(np.mean(MRSS_my)))
# Plot the time course for a single voxel with the fitted values.
# Looks pretty bad.
plt.plot(data[41, 47, 2]) #change from cherry-picking
plt.plot(fitted_my[41, 47, 2])
plt.savefig(location_of_images+"glm_plot_my.png")
plt.close()
##################
# b. np.convolve #
##################
# Now get the estimated coefficients and design matrix for doing
# regression on the convolved time course.
B_np, X_np = glm(data, np_hrf)
# Some diagnostics.
MRSS_np, fitted_np, residuals_np = glm_diagnostics(B_np, X_np, data)
# Print out the mean MRSS.
print("MRSS using np convolution function: "+str(np.mean(MRSS_np)))
# Plot the time course for a single voxel with the fitted values.
# Looks pretty bad.
plt.plot(data[41, 47, 2])
plt.plot(fitted_np[41, 47, 2])
plt.savefig(location_of_images+"glm_plot_np.png")
plt.close()
X_my3=np.ones((data.shape[-1],4))
for i in range(2):
X_my3[:,i+1]=my_hrf**(i+1)
B_my3, X_my3 = glm_multiple(data, X_my3)
MRSS_my3, fitted_my3, residuals_my3 = glm_diagnostics(B_my3, X_my3, data)
print("MRSS using 'my' convolution function, 3rd degree polynomial: "+str(np.mean(MRSS_my3))+ ", but the chart looks better")
plt.plot(data[41, 47, 2])
plt.plot(fitted_my3[41, 47, 2])
plt.savefig(location_of_images+"glm_plot_my3.png")
plt.close()
| bsd-3-clause | -381,920,970,145,549,500 | 28.75188 | 125 | 0.664645 | false |
manufactured-solutions/analytical | navier_stokes/NS_Power_law_transient_scalar_sympy/sympy/NS_PowerLaw_scalar_transient_3d_phi_codes.py | 1 | 1413 |
# Writing Q_phi into a C code ------------------------------------------
#Q_phi_time=Q2n
#Q_phi_convection=Q1n
#Q_phi_diffusion=Q3n
#unassigning variables Q_phi_time and Q_phi_convection in order to write a more readable C code
var('Q_phi_time, Q_phi_convection, Q_phi_diffusion')
Q_phi=Q_phi_time+Q_phi_convection+Q_phi_diffusion
from sympy.utilities.codegen import codegen,Routine
codegen((
("Rho", rho_an),
("U", u_an),
("V", v_an),
("W", w_an),
#("P", p_an),
("Phi", phi_an),
("Q_phi_time", Q2n ),
("Q_phi_convection", Q1n ),
("Q_phi_diffusion", Q3n ),
("Q_phi", Q_phi ),
), "C", "../C_codes/NS_PowerLaw_scalar_transient_3d_phi", header=True, to_files=True)
# Writing Q_g into a latex file ------------------------------------------
latexQ=latex(Q_phi)
latexQ1=latex(Q1n)#(Q_phi_convection)
latexQ2=latex(Q2n)#(Q_phi_time)
latexQ3=latex(Q3n)#(Q_phi_diffusion)
s=str(latexQ)
s1=str(latexQ1)
s2=str(latexQ2)
s3=str(latexQ3)
f=open('../latex/Q_phi.tex','w')
f.write('\n Q_phi: \n')
f.write(s)
f.write('\n\n Q_phi_convection: \n')
f.write(s1)
f.write('\n\n Q_phi_time: \n')
f.write(s2)
f.write('\n\n Q_phi_diffusion: \n')
f.write(s3)
f.close()
## Writing Q_phi into a file in order to count characters ------------------------------------------
#s=str(Q_phi)
#f=open('../C_code_sympy/SourceQrho_after_factorization.dat','w')
#f.write(s)
#f.close()
| lgpl-2.1 | 5,003,624,397,822,584,000 | 19.185714 | 100 | 0.57891 | false |
dbatalov/ri-optimizer | example_main.py | 1 | 9032 | """
This is the main example script to execute, it is meant as an example
of how the riptimize.py module is to be used, and effectivey acts as
the driver of the module with rudimentary console UI + CSV report
generation and S3 upload. It's job is to demonstrate the functionality
of riptimize and it is not meant to execute in production as is.
The step-by-step instructions as to how to execute this script is
embedded in comments below labeled with STEP X OF X.
"""
import riptimize
import datetime
import csv
import boto
def main():
print "Example Riptimize Driver"
print
# 1. setup
# STEP 1 of 7: specify region
region = 'us-east-1'
# STEP 2 of 7: set the RI holding account id and credentials
ri_account_id = 'RIRI-RIRI-RIRI' # replace with actual AWS Account ID
ri_account_credentials = ('<access-key-id-ri>', '<secret_access-key-ri>')
all_accounts = {ri_account_id: ri_account_credentials}
# STEP 3 of 7: add ids and credentials for all other linked accounts, at first just add a couple other accounts
# all_accounts['AAAA-AAAA-AAAA'] = ('<access-key-id-a>', '<secret-access-key-a>')
# all_accounts['BBBB-BBBB-BBBB'] = ('<access-key-id-b>', '<secret-access-key-b>')
# ...
# all_accounts['ZZZZ-ZZZZ-ZZZZ'] = ('<access-key-id-z>', '<secret-access-key-z>')
# STEP 4 of 7: For the first few tests this should be set to False
# once you see that the script is running, change to True to actually execute RI modifications
optimize = False # if False, means a DRY-RUN
# STEP 5 of 7: Leaving as True will publish RI surplus metrics to CloudWatch
publish_metrics = True # custom metrics are created in AWS CloudWatch
# STEP 6 of 7: Leaving as True will upload the CSV report to S3 for safekeeping
upload_report = True # CSV reports will be saved in S3 in s3_report_bucket
s3_report_bucket = "riptimize-reports-%s" % ri_account_id
# 2. do it
# STEP 7 of 7: Ok, you are ready to go, just execute on the command line % python example_main.py
riptimize_result_tuple = riptimize.riptimize(all_accounts, ri_account_credentials, region, optimize, publish_metrics)
# 3. show results
i_inventory, i_inventory_by_account, ri_inventory, supported_ri_zones, processing_modifications, clean_mismatch, recommendations, plan, modification_ids = riptimize_result_tuple
time_now = datetime.datetime.utcnow()
print "Report for region %s as of %s" % (region, time_now)
print
# 3.1 print on-demand instance inventory
print "Instance Inventory by account:"
print i_inventory_by_account
print
print "Aggregate instance inventory:"
print i_inventory
print
# 3.2 print RI inventory
print "RI Inventory:"
print ri_inventory
print
# 3.3 show all supported AZs in the RI holding account
print "Supported RI zones: " + str(supported_ri_zones)
# 3.4 show if previous modifications are still being executed
modifications_inflight = len(processing_modifications) != 0
if modifications_inflight:
print
print "======--- WARNING ---======"
print "Previous modifications are still processing:"
for mod in processing_modifications:
print "modification_id: %s, status: %s" % (mod.modification_id, mod.status)
print "!!! RI optimizations cannot be performed until previous modifications are completed"
print "!!! RI inventory and recommendations will also be potentially incorrect"
print
# 3.5 print detected mismatches between numbers of on-demand running instances and RIs by availability zone and instance type
if len(clean_mismatch) > 0:
print "On-demand/RI inventory mismatches per availability zone:"
print clean_mismatch
else:
print "No On-demand/RI inventory mimatches detected in any availability zones:"
print
# 3.6 print recommendations for migrating running instances into AZs covered by RI holding account, purchasing additional RIs or launching additional instances to get better RI utilization
eliminated_i_inventory, ri_imbalance = recommendations
if len(eliminated_i_inventory) == 0 and len(ri_imbalance) == 0:
print "No recomendations available"
else:
print "Recommendations:"
if len(eliminated_i_inventory) > 0:
print "\tOn-demand instances running in zones not supported by RIs. Migrate them to supported zones:"
print "\t" + str(eliminated_i_inventory)
print
if len(ri_imbalance) > 0:
print "\tOn-demand/RI imbalance detected!"
print "\tNegative numbers indicate additional RIs needed, positive ones indicate that RIs are underutilized and more instances can be launched:"
print "\t" + str(ri_imbalance)
print
# 3.7 print high-level optimization plan if one is possible, showing how many RIs need to be moved to which AZs
if len(plan) == 0:
print "No RI redistribution is possible."
else:
print "RI Optimization possible! Plan: " + str(plan)
if optimize:
if modifications_inflight:
print "Previous optimizations are still processing, new optimizations kicked off in DRY-RUN mode only!"
else:
print "Optimize option selected, optimizations kicked-off..."
else:
print "Optimize flag not set, so optimizations kicked off in DRY-RUN mode only!"
print
# 3.8 finally, if optimizations were actually kicked off, list all modification ids, or fake ones in case of a dry run
print "Initiated optimizations:"
print modification_ids
filename_safe_timestamp = str(time_now).replace(' ','_').replace(':', '-')
report_file_name = "riptimize_report_%s_%s.csv" % (region, filename_safe_timestamp)
csv_report(report_file_name, time_now, region, i_inventory_by_account, ri_inventory, clean_mismatch, plan, modification_ids)
print
print "CSV report written to %s" % report_file_name
if upload_report:
upload_report_to_s3(ri_account_credentials, report_file_name, s3_report_bucket)
print
print "Report uploaded to S3 as %s/%s of RI holding account %s" % (s3_report_bucket, report_file_name, ri_account_id)
print
print "Done"
# exapmle of generating a CSV report
def csv_report(csv_file_name, time_now, region, i_inventory_by_account, ri_inventory, clean_mismatch, plan, modification_ids):
with open(csv_file_name, 'wb') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(["Report for region %s at %s" % (region, str(time_now))])
# write instance inventory report
writer.writerow([])
writer.writerow(['Instance Inventory'])
writer.writerow(['Account ID', 'Instance Type', 'Availability Zone', 'Count'])
for account_id, inventory_for_account in i_inventory_by_account.items():
for (itype, az), count in inventory_for_account.items():
writer.writerow([account_id, itype, az, count])
# write RI inventory report
writer.writerow([])
writer.writerow(['RI Inventory'])
writer.writerow(['Instance Type', 'Availability Zone', 'Count'])
for (itype, az), count in ri_inventory.items():
writer.writerow([itype, az, count])
# write report on On-demand/RI inventory mismatches
writer.writerow([])
writer.writerow(['On-demand/RI inventory mismatches per each availability zone'])
writer.writerow(['Instance Type', 'Availability Zone', 'Diff'])
for (itype, az), count in clean_mismatch.items():
writer.writerow([itype, az, count])
# write optimization plan
writer.writerow([])
writer.writerow(['RI modification plan'])
writer.writerow(['Instance Type', 'Source AZ', 'Destination AZ', 'Count'])
for itype, source_az, dest_az, count in plan:
writer.writerow([itype, source_az, dest_az, count])
# write modification_ids
writer.writerow([])
writer.writerow(['Kicked off RI modifications'])
writer.writerow(['Modification ID'])
for modification_id in modification_ids:
writer.writerow([modification_id])
def upload_report_to_s3(ri_account_credentials, report_file_name, s3_report_bucket):
access_key_id, secret_access_key = ri_account_credentials
s3 = boto.connect_s3(aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key)
# create bucket if does not exist
bucket = s3.lookup(s3_report_bucket)
if not bucket:
bucket = s3.create_bucket(s3_report_bucket)
# upload the report
key = bucket.new_key(report_file_name)
key.set_contents_from_filename(report_file_name)
s3.close()
if __name__ == '__main__':
main()
| bsd-2-clause | -7,441,872,143,540,658,000 | 46.042553 | 192 | 0.660762 | false |
EKiefer/edge-starter | py34env/Scripts/enhancer.py | 1 | 1558 | #!c:\users\ekiefer\projects\django\my_edge\py34env\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# this demo script creates four windows containing an image and a slider.
# drag the slider to modify the image.
#
try:
from tkinter import Tk, Toplevel, Frame, Label, Scale, HORIZONTAL
except ImportError:
from Tkinter import Tk, Toplevel, Frame, Label, Scale, HORIZONTAL
from PIL import Image, ImageTk, ImageEnhance
import sys
#
# enhancer widget
class Enhance(Frame):
def __init__(self, master, image, name, enhancer, lo, hi):
Frame.__init__(self, master)
# set up the image
self.tkim = ImageTk.PhotoImage(image.mode, image.size)
self.enhancer = enhancer(image)
self.update("1.0") # normalize
# image window
Label(self, image=self.tkim).pack()
# scale
s = Scale(self, label=name, orient=HORIZONTAL,
from_=lo, to=hi, resolution=0.01,
command=self.update)
s.set(self.value)
s.pack()
def update(self, value):
self.value = eval(value)
self.tkim.paste(self.enhancer.enhance(self.value))
#
# main
root = Tk()
im = Image.open(sys.argv[1])
im.thumbnail((200, 200))
Enhance(root, im, "Color", ImageEnhance.Color, 0.0, 4.0).pack()
Enhance(Toplevel(), im, "Sharpness", ImageEnhance.Sharpness, -2.0, 2.0).pack()
Enhance(Toplevel(), im, "Brightness", ImageEnhance.Brightness, -1.0, 3.0).pack()
Enhance(Toplevel(), im, "Contrast", ImageEnhance.Contrast, -1.0, 3.0).pack()
root.mainloop()
| mit | 8,260,503,957,191,499,000 | 25.40678 | 80 | 0.646341 | false |
prasannav7/ggrc-core | test/integration/ggrc/models/factories.py | 1 | 4323 | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
"""Factories for models"""
import random
import factory
from ggrc import db
from ggrc import models
def random_string(prefix=''):
return '{prefix}{suffix}'.format(
prefix=prefix,
suffix=random.randint(0, 9999999999),
)
class ModelFactory(factory.Factory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
# modified_by_id = 1
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = target_class(*args, **kwargs)
db.session.add(instance)
db.session.commit()
return instance
class TitledFactory(factory.Factory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
title = factory.LazyAttribute(lambda m: random_string('title'))
class DirectiveFactory(ModelFactory, TitledFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Directive
class ControlFactory(ModelFactory, TitledFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Control
directive = factory.SubFactory(DirectiveFactory)
kind_id = None
version = None
documentation_description = None
verify_frequency_id = None
fraud_related = None
key_control = None
active = None
notes = None
class AssessmentFactory(ModelFactory, TitledFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Assessment
class ControlCategoryFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.ControlCategory
name = factory.LazyAttribute(lambda m: random_string('name'))
lft = None
rgt = None
scope_id = None
depth = None
required = None
class CategorizationFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Categorization
category = None
categorizable = None
category_id = None
categorizable_id = None
categorizable_type = None
class ProgramFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Program
title = factory.LazyAttribute(lambda _: random_string("program_title"))
slug = factory.LazyAttribute(lambda _: random_string(""))
class AuditFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Audit
title = factory.LazyAttribute(lambda _: random_string("title"))
slug = factory.LazyAttribute(lambda _: random_string(""))
status = "Planned"
program_id = factory.LazyAttribute(lambda _: ProgramFactory().id)
class ContractFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Contract
class EventFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Event
revisions = []
class RelationshipFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Relationship
source = None
destination = None
class RelationshipAttrFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.RelationshipAttr
relationship_id = None
attr_name = None
attr_value = None
class PersonFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Person
| apache-2.0 | -2,486,109,641,783,697,400 | 23.844828 | 78 | 0.734906 | false |
mira67/DL4RS | MPFprj/prjutil.py | 1 | 1728 | """
utils
configure file parser
Author: Qi Liu, 11/2016
"""
import ConfigParser
def cfgmap(cfg,section):
dict1 = {}
options = cfg.options(section)
for option in options:
try:
dict1[option] = cfg.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
def read_config():
config = ConfigParser.ConfigParser()
config.read("config.ini")
#datapath
p = {}
p['train_path'] = cfgmap(config,"Workspace")['train_path']
p['result_path'] = cfgmap(config,"Workspace")['result_path']
p['test_path'] = cfgmap(config,"Workspace")['test_path']
p['model_path'] = cfgmap(config,"Workspace")['model_path']
p['test_result_csv'] = cfgmap(config,"Workspace")['test_result_csv']
#parameters
p['fea_num'] = config.getint('Settings','fea_num')
p['out_num'] = config.getint('Settings','out_num')
p['bsize'] = config.getint('Settings','batch_size')
p['iters'] = config.getint('Settings','nb_epoch')
#graphics
p['plot_on'] = config.getint('Settings','plot_on')
# model
p['model_id'] = config.getint('Model','model_id')
p['model_name'] = cfgmap(config,"Model")['model_name']
p['model_name2'] = cfgmap(config,"Model")['model_name2']
p['verbose_on'] = config.getboolean('Model','verbose_on')
p['kfold'] = config.getint('Model','kfold')
p['n_splits'] = config.getint('Model','n_splits')
# sql
p['train_sql'] = cfgmap(config,"SQL")['train_sql']
p['test_sql'] = cfgmap(config,"SQL")['test_sql']
p['csvtosql'] = cfgmap(config,"SQL")['csvtosql']
return p
| gpl-3.0 | 3,041,653,088,066,856,400 | 32.882353 | 72 | 0.594329 | false |
k0001/meaningtoolws | meaningtoolws/ct.py | 1 | 4944 | # -*- coding: utf-8 -*-
# Copyright (c) 2009, Popego Corporation <contact [at] popego [dot] com>
# All rights reserved.
#
# This file is part of the Meaningtool Web Services Python Client project
#
# See the COPYING file distributed with this project for its licensing terms.
"""
Meaningtool Category Tree REST API v0.1 client
Official documentation for the REST API v0.1 can be found at
http://meaningtool.com/docs/ws/ct/restv0.1
"""
import re
import urllib
import urllib2
try:
import json
except ImportError:
import simplejson as json
MT_BASE_URL = u"http://ws.meaningtool.com/ct/restv0.1"
_re_url = re.compile(ur"^https?://.+$")
class Result(object):
def __init__(self, status_errcode, status_message, data):
super(Result, self).__init__()
self.status_errcode = status_errno
self.status_message = status_message
self.data = data
def __repr__(self):
return u"<%s - %s>" % (self.__class__.__name__, self.status_message)
class ResultError(Result, Exception):
def __init__(self, status_errcode, status_message, data):
Result.__init__(self, status_errcode, status_message, data)
Exception.__init__(self, u"%s: %s" % (status_errcode, status_message))
def __repr__(self):
return u"<%s - %s: %s>" % (self.__class__.__name__, self.status_errcode, self.status_message)
class Client(object):
def __init__(self, ct_key):
self.ct_key = ct_key
self._base_url = u"%s/%s" % (MT_BASE_URL, ct_key)
def __repr__(self):
return u"<%s - ct_key: %s>" % (self.__class__.__name__, self.ct_key)
def _req_base(self, method, url, data, headers):
if method == "GET":
req = urllib2.Request(u"%s?%s" % (url, urllib.urlencode(data)))
elif method == "POST":
req = urllib2.Request(url, urllib.urlencode(data))
else:
raise ValueError(u"HTTP Method '%s' not supported" % method)
req.add_header("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8")
req.add_header("Accept-Charset", "UTF-8")
for k,v in headers:
req.add_header(k, v)
try:
resp = urllib2.urlopen(req)
except urllib2.HTTPError, e:
if e.code >= 500:
raise
resp = e
s = resp.read()
return s
def _req_json(self, method, url, data, headers):
url += u'.json'
headers.append(("Accept", "application/json"))
return self._req_base(method, url, data, headers)
def _parse_result_base(self, result_dict):
status = result_dict["status"]
status_errcode = result_dict["errno"]
status_message = result_dict["message"]
data = result_dict["data"]
if status == "ok":
return Result(status_errcode, status_message, data)
else:
raise ResultError(status_errcode, status_message, data)
def _parse_result_json(self, raw):
return self._parse_result_base(json.loads(raw, encoding="utf8"))
# default request/parse methods
_req = _req_json
_parse_result = _parse_result_json
def get_categories(self, source, input, url_hint=None, additionals=None, content_language=None):
url = u"%s/categories" % self._base_url
data = {}
headers = []
data["source"] = source.encode("utf8")
data["input"] = input.encode("utf8")
if url_hint:
if not _re_url.match(url_hint):
raise ValueError(u"url_hint")
data["url_hint"] = url_hint.encode("utf8")
if additionals:
additionals = u",".join(set(additionals))
data["additionals"] = additionals.encode("utf8")
if content_language:
content_language = content_language[:2].lower()
if not len(content_language) == 2:
raise ValueError(u"content_language")
headers.append(("Content-Language", content_language.encode("ascii")))
# Even if POST, it's idempotent as GET.
return self._parse_result(self._req("POST", url, data, headers))
def get_tags(self, source, input, url_hint=None, content_language=None):
url = u"%s/tags" % self._base_url
data = {}
headers = []
data["source"] = source.encode("utf8")
data["input"] = input.encode("utf8")
if url_hint:
if not _re_url.match(url_hint):
raise ValueError(u"url_hint")
data["url_hint"] = url_hint.encode("utf8")
if content_language:
content_language = content_language[:2].lower()
if not len(content_language) == 2:
raise ValueError(u"content_language")
headers.append(("Content-Language", content_language.encode("ascii")))
# Even if POST, it's idempotent as GET.
return self._parse_result(self._req("POST", url, data, headers))
| bsd-3-clause | 6,838,378,754,736,598,000 | 32.405405 | 101 | 0.587379 | false |
proversity-org/edx-platform | lms/djangoapps/student_account/talenetic.py | 1 | 7807 | from six.moves.urllib_parse import urlencode, unquote
import jwt
import json
from django.conf import settings
from student.models import Registration, UserProfile
from social_core.backends.oauth import BaseOAuth2
from django.contrib.auth.models import User
import uuid
import logging
import social_django
log = logging.getLogger(__name__)
class TaleneticOAuth2(BaseOAuth2):
"""
Talenetic OAuth2 authentication backend
"""
settings_dict = settings.CUSTOM_BACKENDS.get('talenetic')
name = 'talenetic-oauth2'
REDIRECT_STATE = False
ID_KEY = 'emailaddress'
STATE_PARAMETER = False
AUTHORIZATION_URL = settings_dict.get('AUTH_URL')
ACCESS_TOKEN_URL = settings_dict.get('ACCESS_TOKEN_URL')
ACCESS_TOKEN_METHOD = 'GET'
REFRESH_TOKEN_URL = settings_dict.get('REFRESH_TOKEN_URL')
REFRESH_TOKEN_METHOD = 'POST'
RESPONSE_TYPE = 'code jwt_token'
REDIRECT_IS_HTTPS = False
REVOKE_TOKEN_URL = settings_dict.get('LOGOUT_URL')
REVOKE_TOKEN_METHOD = 'POST'
def get_scope_argument(self):
return {}
def auth_complete(self, *args, **kwargs):
"""Completes login process, must return user instance"""
self.process_error(self.data)
state = self.validate_state()
access_url = "{}?uid={}".format(self.access_token_url(), self._get_uid())
response = self.request_access_token(
access_url,
data=self._get_creds(),
headers=self._get_creds(),
auth=self.auth_complete_credentials(),
method=self.ACCESS_TOKEN_METHOD
)
self.process_error(response)
return self.do_auth(response['jwt_token'], response=response,
*args, **kwargs)
def do_auth(self, jwt_token, *args, **kwargs):
data = self.user_data(jwt_token, *args, **kwargs)
response = kwargs.get('response') or {}
response.update(data or {})
if 'access_token' not in response:
response['access_token'] = jwt_token
kwargs.update({'response': response, 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
def _get_uid(self):
if 'uid' in self.data:
return self.data['uid']
else:
return None
def auth_params(self, state=None):
client_id, client_secret = self.get_key_and_secret()
uri = self.get_redirect_uri(state)
if self.REDIRECT_IS_HTTPS:
uri = uri.replace('http://', 'https://')
params = {
'urlredirect': uri,
'clientId': client_id,
'secretkey': client_secret
}
return params
def get_user_id(self, details, response):
return details.get('email')
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_user_details(kwargs.get('response'))
def get_user_details(self, response):
response = self._fill_fields(response)
self._set_uid_to_profile(self._get_uid(), response.get('emailaddress'))
return {'username': response.get('username'),
'email': response.get('emailaddress'),
'fullname': response.get('firstname'),
'first_name': response.get('firstname')}
def _fill_fields(self, data):
# a little util to fill in missing data for later consumption
if data.get('firstname') is None:
data['firstname'] = data.get('emailaddress').split('@')[0]
if data.get('username') is None:
data['username'] = data.get('emailaddress').split('@')[0]
return data
def _get_creds(self):
client_id, client_secret = self.get_key_and_secret()
return {
'secretkey': client_secret,
'clientId': client_id
}
def auth_headers(self):
return {'Accept': 'application/json'}
def pipeline(self, pipeline, pipeline_index=0, *args, **kwargs):
"""
This is a special override of the pipeline method.
This will grab the user from the actual ran pipeline and
add the incoming uid as a uid field to the meta field on the user profile
"""
# due to some of the usernames that will come in from the SSO containing a .fullstop
# the user can not be found and then the oauth tries
# to make a new one and breaks as the email exists,
# this is to set the user if it exists forcefully for the rest of oauth to work properly.
if kwargs.get('user') is None:
try:
user = User.objects.get(email=kwargs.get('response').get('emailaddress'))
kwargs['user'] = user
except User.DoesNotExist:
pass
out = self.run_pipeline(pipeline, pipeline_index, *args, **kwargs)
if not isinstance(out, dict):
return out
user = out.get('user')
if user:
user.social_user = out.get('social')
user.is_new = out.get('is_new')
return user
def _set_uid_to_profile(self, uid, emailaddress):
"""
This function calls for the existing user by emailaddress,
if the user is found we save the requested uid to the user profile
because we need it to logout.
"""
try:
user = User.objects.get(email=emailaddress)
user_profile = user.profile
new_meta = {'talenetic-uid': uid}
if len(user_profile.meta) > 0:
previous_meta = json.loads(user_profile.meta)
mixed_dicts =\
(previous_meta.items() + new_meta.items())
new_meta =\
{key: value for (key, value) in mixed_dicts}
user_profile.meta = json.dumps(new_meta)
user_profile.save()
except Exception as e:
log.error("Could not save uid to user profile or something else: {}".format(e.message))
def auth_url(self):
"""Return redirect url"""
params = self.auth_params()
params = urlencode(params)
if not self.REDIRECT_STATE:
# redirect_uri matching is strictly enforced, so match the
# providers value exactly.
params = unquote(params)
return '{0}?{1}'.format(self.authorization_url(), params)
def revoke_token_url(self, token, uid):
social_user = social_django.models.DjangoStorage.user.get_social_auth(provider=self.name, uid=uid)
profile = social_user.user.profile
meta_data = json.loads(profile.meta)
url = "{}?uid={}".format(self.REVOKE_TOKEN_URL, meta_data.get('talenetic-uid'))
return url
def revoke_token_params(self, token, uid):
return {}
def revoke_token_headers(self, token, uid):
return self._get_creds()
def process_revoke_token_response(self, response):
return response.status_code == 200
def revoke_token(self, token, uid):
if self.REVOKE_TOKEN_URL:
url = self.revoke_token_url(token, uid)
params = self.revoke_token_params(token, uid)
headers = self.revoke_token_headers(token, uid)
data = urlencode(params) if self.REVOKE_TOKEN_METHOD != 'GET' \
else None
response = self.request(url, params=params, headers=headers,
data=data, method=self.REVOKE_TOKEN_METHOD)
return self.process_revoke_token_response(response)
| agpl-3.0 | 211,726,550,094,709,500 | 33.166667 | 106 | 0.575893 | false |
edx/cookiecutter-django-ida | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/apps/core/tests/test_models.py | 1 | 1676 | """ Tests for core models. """
from django.test import TestCase
from django_dynamic_fixture import G
from social_django.models import UserSocialAuth
from {{cookiecutter.repo_name}}.apps.core.models import User
class UserTests(TestCase):
""" User model tests. """
TEST_CONTEXT = {'foo': 'bar', 'baz': None}
def test_access_token(self):
user = G(User)
self.assertIsNone(user.access_token)
social_auth = G(UserSocialAuth, user=user)
self.assertIsNone(user.access_token)
access_token = 'My voice is my passport. Verify me.'
social_auth.extra_data['access_token'] = access_token
social_auth.save()
self.assertEqual(user.access_token, access_token)
def test_get_full_name(self):
""" Test that the user model concatenates first and last name if the full name is not set. """
full_name = 'George Costanza'
user = G(User, full_name=full_name)
self.assertEqual(user.get_full_name(), full_name)
first_name = 'Jerry'
last_name = 'Seinfeld'
user = G(User, full_name=None, first_name=first_name, last_name=last_name)
expected = '{first_name} {last_name}'.format(first_name=first_name, last_name=last_name)
self.assertEqual(user.get_full_name(), expected)
user = G(User, full_name=full_name, first_name=first_name, last_name=last_name)
self.assertEqual(user.get_full_name(), full_name)
def test_string(self):
"""Verify that the model's string method returns the user's full name."""
full_name = 'Bob'
user = G(User, full_name=full_name)
self.assertEqual(str(user), full_name)
| apache-2.0 | 3,668,149,026,875,080,700 | 36.244444 | 102 | 0.647375 | false |
denis-guillemenot/pmi_collect | simpleHTTPServer.py | 1 | 1587 | # ----------------------------------------------------------------
# name : simpleHTTPServer.py
# object: Simple MultiThreaded Web Server
# usage: python SimpleHTTPServer [port] / default port: 8080
# author: [email protected] / [email protected]
# date : 19/09/2013
# ----------------------------------------------------------------
import sys
# Use default or provided port
print
if ( len( sys.argv) > 0):
msg = "provided"
try:
cause = "must be an integer"
port = int( sys.argv[0])
if ( port < 1024):
cause = "must be =< 1024"
raise
except:
print "ERROR: %s port:%s %s... exiting" % (msg, sys.argv[0], cause)
sys.exit( 1)
else:
msg = "default"
port = 8080
print "Using %s port:%d" % ( msg, port)
import SocketServer, BaseHTTPServer, sys, os, CGIHTTPServer, os, os.path
# port = 8080
class ThreadingCGIServer( SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
pass
# set os separator
try:
os_sep = os.path.os.sep
False = 0
True = 1
except:
try:
os_sep = os.path.sep
except:
print("ERROR: can not set os.separator, exiting...")
sys.exit(-1)
# set rootdir
currdir = os.getcwd()
# rootdir = currdir + os_sep + 'data'
# if ( os.path.exists( rootdir)): os.chdir( rootdir)
# start HTTP Server
server = ThreadingCGIServer( ('', port), CGIHTTPServer.CGIHTTPRequestHandler)
print "Server started on port %s." % port
try:
while 1:
sys.stdout.flush()
server.handle_request()
except keyboardInterrupt:
if ( os.path.exists( currdir)): os.chdir( currdir)
print "Server stopped."
| mit | 3,394,831,740,184,228,400 | 24.190476 | 82 | 0.608066 | false |
theolind/home-assistant | tests/helpers/test_init.py | 1 | 1603 | """
tests.test_helpers
~~~~~~~~~~~~~~~~~~~~
Tests component helpers.
"""
# pylint: disable=protected-access,too-many-public-methods
import unittest
from common import get_test_home_assistant
import homeassistant as ha
import homeassistant.loader as loader
from homeassistant.const import STATE_ON, STATE_OFF, ATTR_ENTITY_ID
from homeassistant.helpers import extract_entity_ids
class TestComponentsCore(unittest.TestCase):
""" Tests homeassistant.components module. """
def setUp(self): # pylint: disable=invalid-name
""" Init needed objects. """
self.hass = get_test_home_assistant()
loader.prepare(self.hass)
self.hass.states.set('light.Bowl', STATE_ON)
self.hass.states.set('light.Ceiling', STATE_OFF)
self.hass.states.set('light.Kitchen', STATE_OFF)
loader.get_component('group').setup_group(
self.hass, 'test', ['light.Ceiling', 'light.Kitchen'])
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_extract_entity_ids(self):
""" Test extract_entity_ids method. """
call = ha.ServiceCall('light', 'turn_on',
{ATTR_ENTITY_ID: 'light.Bowl'})
self.assertEqual(['light.bowl'],
extract_entity_ids(self.hass, call))
call = ha.ServiceCall('light', 'turn_on',
{ATTR_ENTITY_ID: 'group.test'})
self.assertEqual(['light.ceiling', 'light.kitchen'],
extract_entity_ids(self.hass, call))
| mit | 9,065,493,892,400,765,000 | 31.714286 | 67 | 0.615097 | false |
sylvainnizac/Djangoctopus | blog/admin.py | 1 | 2774 | # -*- coding: utf8 -*-
from django.contrib import admin
from blog.models import Categorie, Article, Comment
class ArticleAdmin(admin.ModelAdmin):
list_display = ('titre', 'auteur', 'date', 'categorie', 'apercu_contenu')
list_filter = ('auteur','categorie',)
date_hierarchy = 'date'
ordering = ('-date', )
search_fields = ('titre', 'contenu')
prepopulated_fields = {"slug": ("titre",)}
# Configuration du formulaire d'édition
fieldsets = (
# Fieldset 1 : meta-info (titre, auteur…)
('Général',
{'fields': ('titre', 'slug', 'auteur', 'categorie')
}),
# Fieldset 2 : contenu de l'article
('Contenu de l\'article',
{ 'description': 'Le formulaire accepte les balises HTML. Utilisez-les à bon escient !',
'fields': ('contenu', )
}),
)
def apercu_contenu(self, article):
"""
Retourne les 40 premiers caractères du contenu de l'article. S'il
y a plus de 40 caractères, il faut ajouter des points de suspension.
"""
text = article.contenu[0:40]
if len(article.contenu) > 40:
return '%s...' % text
else:
return text
# En-tête de notre colonne
apercu_contenu.short_description = 'Aperçu du contenu'
class CommentsAdmin(admin.ModelAdmin):
list_display = ('pseudo', 'email', 'article', 'apercu_description', 'date', 'commentaire_visible')
list_filter = ('pseudo', 'article', 'email', )
date_hierarchy = 'date'
ordering = ('-date', )
search_fields = ('pseudo', 'email', 'article', )
# Configuration du formulaire d'édition
fieldsets = (
# Fieldset 1 : meta-info (titre, auteur…)
('Général',
{'fields': ('pseudo', 'email'), }),
# Fieldset 2 : contenu de l'article
('Commentaire',
{ 'description': 'Le formulaire n\'accepte pas les balises HTML.',
'fields': ('description', )}),
# Fieldset 3 : modération
('Modération',
{ 'fields': ('commentaire_visible', )}),
)
def apercu_description(self, commentaire):
"""
Retourne les 40 premiers caractères du contenu du commentaire. S'il
y a plus de 40 caractères, il faut ajouter des points de suspension.
"""
text = commentaire.description[0:40]
if len(commentaire.description) > 40:
return '%s...' % text
else:
return text
# En-tête de notre colonne
apercu_description.short_description = 'Aperçu du commentaire'
# Register your models here.
admin.site.register(Categorie)
admin.site.register(Article, ArticleAdmin)
admin.site.register(Comment, CommentsAdmin)
| gpl-2.0 | -2,033,523,279,121,713,400 | 33.848101 | 104 | 0.587722 | false |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/services/media_file_service/transports/grpc.py | 1 | 11285 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v6.resources.types import media_file
from google.ads.googleads.v6.services.types import media_file_service
from .base import MediaFileServiceTransport, DEFAULT_CLIENT_INFO
class MediaFileServiceGrpcTransport(MediaFileServiceTransport):
"""gRPC backend transport for MediaFileService.
Service to manage media files.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn("api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning)
host = api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
)
@classmethod
def create_channel(cls,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_media_file(self) -> Callable[
[media_file_service.GetMediaFileRequest],
media_file.MediaFile]:
r"""Return a callable for the get media file method over gRPC.
Returns the requested media file in full detail.
Returns:
Callable[[~.GetMediaFileRequest],
~.MediaFile]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_media_file' not in self._stubs:
self._stubs['get_media_file'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v6.services.MediaFileService/GetMediaFile',
request_serializer=media_file_service.GetMediaFileRequest.serialize,
response_deserializer=media_file.MediaFile.deserialize,
)
return self._stubs['get_media_file']
@property
def mutate_media_files(self) -> Callable[
[media_file_service.MutateMediaFilesRequest],
media_file_service.MutateMediaFilesResponse]:
r"""Return a callable for the mutate media files method over gRPC.
Creates media files. Operation statuses are returned.
Returns:
Callable[[~.MutateMediaFilesRequest],
~.MutateMediaFilesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'mutate_media_files' not in self._stubs:
self._stubs['mutate_media_files'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v6.services.MediaFileService/MutateMediaFiles',
request_serializer=media_file_service.MutateMediaFilesRequest.serialize,
response_deserializer=media_file_service.MutateMediaFilesResponse.deserialize,
)
return self._stubs['mutate_media_files']
__all__ = (
'MediaFileServiceGrpcTransport',
)
| apache-2.0 | -4,525,491,664,470,927,400 | 43.254902 | 112 | 0.608773 | false |
derv82/wifite2 | wifite/tools/ifconfig.py | 1 | 1784 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from .dependency import Dependency
class Ifconfig(Dependency):
dependency_required = True
dependency_name = 'ifconfig'
dependency_url = 'apt-get install net-tools'
@classmethod
def up(cls, interface, args=[]):
'''Put interface up'''
from ..util.process import Process
command = ['ifconfig', interface]
if type(args) is list:
command.extend(args)
elif type(args) is 'str':
command.append(args)
command.append('up')
pid = Process(command)
pid.wait()
if pid.poll() != 0:
raise Exception('Error putting interface %s up:\n%s\n%s' % (interface, pid.stdout(), pid.stderr()))
@classmethod
def down(cls, interface):
'''Put interface down'''
from ..util.process import Process
pid = Process(['ifconfig', interface, 'down'])
pid.wait()
if pid.poll() != 0:
raise Exception('Error putting interface %s down:\n%s\n%s' % (interface, pid.stdout(), pid.stderr()))
@classmethod
def get_mac(cls, interface):
from ..util.process import Process
output = Process(['ifconfig', interface]).stdout()
# Mac address separated by dashes
mac_dash_regex = ('[a-zA-Z0-9]{2}-' * 6)[:-1]
match = re.search(' ({})'.format(mac_dash_regex), output)
if match:
return match.group(1).replace('-', ':')
# Mac address separated by colons
mac_colon_regex = ('[a-zA-Z0-9]{2}:' * 6)[:-1]
match = re.search(' ({})'.format(mac_colon_regex), output)
if match:
return match.group(1)
raise Exception('Could not find the mac address for %s' % interface)
| gpl-2.0 | 4,402,937,286,454,165,000 | 28.245902 | 113 | 0.568946 | false |
HalcyonChimera/osf.io | website/project/metadata/schemas.py | 1 | 2265 | import os
import json
LATEST_SCHEMA_VERSION = 2
def _id_to_name(id):
return ' '.join(id.split('_'))
def _name_to_id(name):
return '_'.join(name.split(' '))
def ensure_schema_structure(schema):
schema['pages'] = schema.get('pages', [])
schema['title'] = schema['name']
schema['version'] = schema.get('version', 1)
schema['active'] = schema.get('active', True)
return schema
here = os.path.split(os.path.abspath(__file__))[0]
def from_json(fname):
with open(os.path.join(here, fname)) as f:
return json.load(f)
OSF_META_SCHEMAS = [
ensure_schema_structure(from_json('osf-open-ended-1.json')),
ensure_schema_structure(from_json('osf-open-ended-2.json')),
ensure_schema_structure(from_json('osf-standard-1.json')),
ensure_schema_structure(from_json('osf-standard-2.json')),
ensure_schema_structure(from_json('brandt-prereg-1.json')),
ensure_schema_structure(from_json('brandt-prereg-2.json')),
ensure_schema_structure(from_json('brandt-postcomp-1.json')),
ensure_schema_structure(from_json('brandt-postcomp-2.json')),
ensure_schema_structure(from_json('prereg-prize.json')),
ensure_schema_structure(from_json('erpc-prize.json')),
ensure_schema_structure(from_json('confirmatory-general-2.json')),
ensure_schema_structure(from_json('egap-project-2.json')),
ensure_schema_structure(from_json('veer-1.json')),
ensure_schema_structure(from_json('aspredicted.json')),
ensure_schema_structure(from_json('registered-report.json')),
ensure_schema_structure(from_json('ridie-initiation.json')),
ensure_schema_structure(from_json('ridie-complete.json')),
]
METASCHEMA_ORDERING = (
'Prereg Challenge',
'Open-Ended Registration',
'Preregistration Template from AsPredicted.org',
'Registered Report Protocol Preregistration',
'OSF-Standard Pre-Data Collection Registration',
'Replication Recipe (Brandt et al., 2013): Pre-Registration',
'Replication Recipe (Brandt et al., 2013): Post-Completion',
"Pre-Registration in Social Psychology (van 't Veer & Giner-Sorolla, 2016): Pre-Registration",
'Election Research Preacceptance Competition',
'RIDIE Registration - Study Initiation',
'RIDIE Registration - Study Complete',
)
| apache-2.0 | 8,412,698,295,121,021,000 | 38.736842 | 98 | 0.696247 | false |
ashbc/tgrsite | tgrsite/settings.py | 1 | 6161 | """
Django settings for tgrsite project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import django.contrib.messages.constants as message_constants
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from django.urls import reverse_lazy
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ADMINS = [('Webadmin', '[email protected]')]
MANAGERS = [('Webadmin', '[email protected]')]
LOGIN_URL = '/login/'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = os.environ.get('EMAIL_HOST', 'localhost')
EMAIL_PORT = os.environ.get('EMAIL_PORT', 587)
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER', '')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', '')
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = os.environ.get('FROM_EMAIL', 'webmaster@localhost')
s = ''
try:
from .keys import secret
s = secret()
except:
# this will throw a KeyError and crash if neither are specified
# which is a decent enough way of enforcing it
s = os.environ['SECRET_KEY']
SECRET_KEY = s
# Defaults off unless explicitly stated in environment variable
try:
if os.environ['DEBUG'].lower() == 'true':
DEBUG = True
else:
DEBUG = False
except KeyError:
DEBUG = False
# needs 127 to work on my machine...
ALLOWED_HOSTS = [os.environ.get('HOST', 'localhost'), '127.0.0.1']
PRIMARY_HOST = '127.0.0.1:8000'
if DEBUG:
from .ipnetworks import IpNetworks
INTERNAL_IPS = IpNetworks(['127.0.0.1', '192.168.0.0/255.255.0.0'])
else:
INTERNAL_IPS = ['127.0.0.1']
INSTALLED_APPS = [
'website_settings',
'navbar',
'assets',
'minutes',
'inventory',
'forum',
'users',
'rpgs',
'exec',
'templatetags',
'timetable',
'messaging',
'gallery',
'pages',
'newsletters',
'notifications',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'redirect'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tgrsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'tgrsite/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'tgrsite.context_processors.latestposts',
'tgrsite.context_processors.mergednavbar'
],
},
},
]
WSGI_APPLICATION = 'tgrsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = [
'users.backends.CaseInsensitiveModelBackend',
# 'django.contrib.auth.backends.ModelBackend',
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-gb'
USE_I18N = True
USE_L10N = True
# Europe/London means GMT+0 with a DST offset of +1:00 i.e. England time
TIME_ZONE = 'Europe/London'
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
# site URL that static files are served from
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL=reverse_lazy("homepage")
# directories to collect static files from
STATICFILES_DIRS = [
# where the static files are stored in the repo and collected from
os.path.join(BASE_DIR, 'static_resources'),
]
# directory the static files are served from
STATIC_ROOT = os.path.join(BASE_DIR, 'STATIC')
# directories for the uploaded files
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'MEDIA')
# Monday
FIRST_DAY_OF_WEEK = 1
# Setup Cripsy to render forms bootstrap4ish
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# as advised by python manage.py check --deploy
# prevent browsers from MIME type sniffing. doesn't play nice
# SECURE_CONTENT_TYPE_NOSNIFF=True
# enable browsers' XSS filters
SECURE_BROWSER_XSS_FILTER = True
# ensure all traffic is SSL (https)
SECURE_SSL_REDIRECT = not DEBUG
# session cookies secure-only
SESSION_COOKIE_SECURE = not DEBUG
# same for CSRF cookie
CSRF_COOKIE_SECURE = not DEBUG
# CSRF_COOKIE_HTTPONLY=True
X_FRAME_OPTIONS = 'DENY'
MESSAGE_TAGS = {
message_constants.DEBUG: 'alert-dark',
message_constants.INFO: 'alert-primary',
message_constants.SUCCESS: 'alert-success',
message_constants.WARNING: 'alert-warning',
message_constants.ERROR: 'alert-danger',
}
# Allow local configuration (change deploy options etc.)
try:
from .local_config import *
except ImportError:
pass
| isc | -7,394,605,813,598,171,000 | 26.382222 | 91 | 0.687226 | false |
Guts/isogeo-api-py-minsdk | isogeo_pysdk/models/metadata.py | 1 | 38320 | # -*- coding: UTF-8 -*-
#! python3
"""
Isogeo API v1 - Model of Metadata (= Resource) entity
See: http://help.isogeo.com/api/complete/index.html#definition-resource
"""
# #############################################################################
# ########## Libraries #############
# ##################################
# standard library
import logging
import pprint
import re
import unicodedata
# package
from isogeo_pysdk.enums import MetadataSubresources, MetadataTypes
# others models
from isogeo_pysdk.models import Workgroup
# #############################################################################
# ########## Globals ###############
# ##################################
logger = logging.getLogger(__name__)
# for slugified title
_regex_slugify_strip = re.compile(r"[^\w\s-]")
_regex_slugify_hyphenate = re.compile(r"[-\s]+")
# #############################################################################
# ########## Classes ###############
# ##################################
class Metadata(object):
"""Metadata are the main entities in Isogeo.
:Example:
.. code-block:: json
{
"_abilities": [
"string"
],
"_created": "string (date-time)",
"_creator": {
"_abilities": [
"string"
],
"_created": "string (date-time)",
"_id": "string (uuid)",
"_modified": "string (date-time)",
"areKeywordsRestricted": "boolean",
"canCreateMetadata": "boolean",
"code": "string",
"contact": {
"_created": "string (date-time)",
"_id": "string (uuid)",
"_modified": "string (date-time)",
"addressLine1": "string",
"addressLine2": "string",
"addressLine3": "string",
"available": "string",
"city": "string",
"count": "integer (int32)",
"countryCode": "string",
"email": "string",
"fax": "string",
"hash": "string",
"name": "string",
"organization": "string",
"phone": "string",
"type": "string",
"zipCode": "string"
},
"keywordsCasing": "string",
"metadataLanguage": "string",
"themeColor": "string"
},
"_id": "string (uuid)",
"_modified": "string (date-time)",
"abstract": "string",
"bbox": [
"number (double)"
],
"collectionContext": "string",
"collectionMethod": "string",
"conditions": [
{
"_id": "string (uuid)",
"description": "string",
"license": {
"_id": "string (uuid)",
"content": "string",
"count": "integer (int32)",
"link": "string",
"name": "string"
}
}
],
"contacts": [
{
"_id": "string (uuid)",
"contact": {
"_created": "string (date-time)",
"_id": "string (uuid)",
"_modified": "string (date-time)",
"addressLine1": "string",
"addressLine2": "string",
"addressLine3": "string",
"available": "string",
"city": "string",
"count": "integer (int32)",
"countryCode": "string",
"email": "string",
"fax": "string",
"hash": "string",
"name": "string",
"organization": "string",
"phone": "string",
"type": "string",
"zipCode": "string"
},
"role": "string"
}
],
"context": "object",
"coordinate-system": "object",
"created": "string (date-time)",
"distance": "number (double)",
"editionProfile": "string",
"encoding": "string",
"envelope": "object",
"features": "integer (int32)",
"format": "string",
"formatVersion": "string",
"geometry": "string",
"height": "integer (int32)",
"keywords": [
{}
]
}
"""
# -- ATTRIBUTES --------------------------------------------------------------------
ATTR_TYPES = {
"_abilities": list,
"_created": str,
"_creator": dict,
"_id": str,
"_modified": str,
"abstract": str,
"collectionContext": str,
"collectionMethod": str,
"conditions": list,
"contacts": list,
"coordinateSystem": dict,
"created": str,
"distance": float,
"editionProfile": str,
"encoding": str,
"envelope": dict,
"events": list,
"featureAttributes": list,
"features": int,
"format": str,
"formatVersion": str,
"geometry": str,
"keywords": list,
"language": str,
"layers": list,
"limitations": list,
"links": list,
"modified": str,
"name": str,
"operations": list,
"path": str,
"precision": str,
"published": str,
"scale": int,
"series": bool,
"serviceLayers": list,
"specifications": list,
"tags": list,
"title": str,
"topologicalConsistency": str,
"type": str,
"updateFrequency": str,
"validFrom": str,
"validTo": str,
"validityComment": str,
}
ATTR_CREA = {
"abstract": str,
"collectionContext": str,
"collectionMethod": str,
"distance": float,
"editionProfile": str,
"encoding": str,
"envelope": dict,
"features": int,
"format": str,
"formatVersion": str,
"geometry": str,
"language": str,
"name": str,
"path": str,
"precision": str,
"scale": int,
"series": bool,
"title": str,
"topologicalConsistency": str,
"type": str,
"updateFrequency": str,
"validFrom": str,
"validTo": str,
"validityComment": str,
}
ATTR_MAP = {
"coordinateSystem": "coordinate-system",
"featureAttributes": "feature-attributes",
}
# -- CLASS METHODS -----------------------------------------------------------------
@classmethod
def clean_attributes(cls, raw_object: dict):
"""Renames attributes which are incompatible with Python (hyphens...).
See related issue: https://github.com/isogeo/isogeo-api-py-minsdk/issues/82
:param dict raw_object: metadata dictionary returned by a request.json()
:returns: the metadata with correct attributes
:rtype: Metadata
"""
for k, v in cls.ATTR_MAP.items():
raw_object[k] = raw_object.pop(v, [])
return cls(**raw_object)
# -- CLASS INSTANCIATION -----------------------------------------------------------
def __init__(
self,
_abilities: list = None,
_created: str = None,
_creator: dict = None,
_id: str = None,
_modified: str = None,
abstract: str = None,
collectionContext: str = None,
collectionMethod: str = None,
conditions: list = None,
contacts: list = None,
coordinateSystem: dict = None,
created: str = None,
distance: float = None,
editionProfile: str = None,
encoding: str = None,
envelope: dict = None,
events: list = None,
featureAttributes: list = None,
features: int = None,
format: str = None,
formatVersion: str = None,
geometry: str = None,
keywords: list = None,
language: str = None,
layers: list = None,
limitations: list = None,
links: list = None,
modified: str = None,
name: str = None,
operations: list = None,
path: str = None,
precision: str = None,
published: str = None,
scale: int = None,
series: bool = None,
serviceLayers: list = None,
specifications: list = None,
tags: list = None,
title: str = None,
topologicalConsistency: str = None,
type: str = None,
updateFrequency: str = None,
validFrom: str = None,
validTo: str = None,
validityComment: str = None,
):
"""Metadata model"""
# default values for the object attributes/properties
self.__abilities = None
self.__created = None
self.__creator = None
self.__id = None
self.__modified = None
self._abstract = None
self._collectionContext = None
self._collectionMethod = None
self._conditions = None
self._contacts = None
self._coordinateSystem = None
self._creation = None # = created
self._distance = None
self._editionProfile = None
self._encoding = None
self._envelope = None
self._events = None
self._featureAttributes = None
self._features = None
self._format = None
self._formatVersion = None
self._geometry = None
self._keywords = None
self._language = None
self._layers = None
self._limitations = None
self._links = None
self._modification = None # = modified
self._name = None
self._operations = None
self._path = None
self._precision = None
self._published = None
self._scale = None
self._series = None
self._serviceLayers = None
self._specifications = None
self._tags = None
self._title = None
self._topologicalConsistency = None
self._type = None
self._updateFrequency = None
self._validFrom = None
self._validTo = None
self._validityComment = None
# if values have been passed, so use them as objects attributes.
# attributes are prefixed by an underscore '_'
if _abilities is not None:
self.__abilities = _abilities
if _created is not None:
self.__created = _created
if _creator is not None:
self.__creator = _creator
if _id is not None:
self.__id = _id
if _modified is not None:
self.__modified = _modified
if abstract is not None:
self._abstract = abstract
if collectionContext is not None:
self._collectionContext = collectionContext
if collectionMethod is not None:
self._collectionMethod = collectionMethod
if conditions is not None:
self._conditions = conditions
if contacts is not None:
self._contacts = contacts
if coordinateSystem is not None:
self._coordinateSystem = coordinateSystem
if created is not None:
self._creation = created
if distance is not None:
self._distance = distance
if editionProfile is not None:
self._editionProfile = editionProfile
if encoding is not None:
self._encoding = encoding
if envelope is not None:
self._envelope = envelope
if events is not None:
self._events = events
if featureAttributes is not None:
self._featureAttributes = featureAttributes
if features is not None:
self._features = features
if format is not None:
self._format = format
if formatVersion is not None:
self._formatVersion = formatVersion
if geometry is not None:
self._geometry = geometry
if keywords is not None:
self._keywords = keywords
if language is not None:
self._language = language
if layers is not None:
self._layers = layers
if limitations is not None:
self._limitations = limitations
if links is not None:
self._links = links
if modified is not None:
self._modification = modified
if name is not None:
self._name = name
if operations is not None:
self._operations = operations
if path is not None:
self._path = path
if precision is not None:
self._precision = precision
if published is not None:
self._published = published
if scale is not None:
self._scale = scale
if serviceLayers is not None:
self._serviceLayers = serviceLayers
if specifications is not None:
self._specifications = specifications
if tags is not None:
self._tags = tags
if title is not None:
self._title = title
if topologicalConsistency is not None:
self._topologicalConsistency = topologicalConsistency
if type is not None:
self._type = type
if updateFrequency is not None:
self._updateFrequency = updateFrequency
if validFrom is not None:
self._validFrom = validFrom
if validTo is not None:
self._validTo = validTo
if validityComment is not None:
self._validityComment = validityComment
# -- PROPERTIES --------------------------------------------------------------------
# abilities of the user related to the metadata
@property
def _abilities(self) -> list:
"""Gets the abilities of this Metadata.
:return: The abilities of this Metadata.
:rtype: list
"""
return self.__abilities
# _created
@property
def _created(self) -> str:
"""Gets the creation datetime of the Metadata.
Datetime format is: `%Y-%m-%dT%H:%M:%S+00:00`.
:return: The created of this Metadata.
:rtype: str
"""
return self.__created
# _modified
@property
def _modified(self) -> str:
"""Gets the last modification datetime of this Metadata.
Datetime format is: `%Y-%m-%dT%H:%M:%S+00:00`.
:return: The modified of this Metadata.
:rtype: str
"""
return self.__modified
# metadata owner
@property
def _creator(self) -> dict:
"""Gets the creator of this Metadata.
:return: The creator of this Metadata.
:rtype: dict
"""
return self.__creator
# metadata UUID
@property
def _id(self) -> str:
"""Gets the id of this Metadata.
:return: The id of this Metadata.
:rtype: str
"""
return self.__id
@_id.setter
def _id(self, _id: str):
"""Sets the id of this Metadata.
:param str id: The id of this Metadata.
"""
self.__id = _id
# metadata description
@property
def abstract(self) -> str:
"""Gets the abstract.
:return: The abstract of this Metadata.
:rtype: str
"""
return self._abstract
@abstract.setter
def abstract(self, abstract: str):
"""Sets the abstract used into Isogeo filters of this Metadata.
:param str abstract: the abstract of this Metadata.
"""
self._abstract = abstract
# collection context
@property
def collectionContext(self) -> str:
"""Gets the collectionContext of this Metadata.
:return: The collectionContext of this Metadata.
:rtype: str
"""
return self._collectionContext
@collectionContext.setter
def collectionContext(self, collectionContext: str):
"""Sets the collection context of this Metadata.
:param str collectionContext: The collection context of this Metadata.
"""
self._collectionContext = collectionContext
# collection method
@property
def collectionMethod(self) -> str:
"""Gets the collection method of this Metadata.
:return: The collection method of this Metadata.
:rtype: str
"""
return self._collectionMethod
@collectionMethod.setter
def collectionMethod(self, collectionMethod: str):
"""Sets the collection method of this Metadata.
:param str collectionMethod: the collection method to set. Accepts markdown.
"""
self._collectionMethod = collectionMethod
# CGUs
@property
def conditions(self) -> list:
"""Gets the conditions of this Metadata.
:return: The conditions of this Metadata.
:rtype: list
"""
return self._conditions
@conditions.setter
def conditions(self, conditions: list):
"""Sets conditions of this Metadata.
:param list conditions: conditions to be set
"""
self._conditions = conditions
# contacts
@property
def contacts(self) -> list:
"""Gets the contacts of this Metadata.
:return: The contacts of this Metadata.
:rtype: list
"""
return self._contacts
@contacts.setter
def contacts(self, contacts: list):
"""Sets the of this Metadata.
:param list contacts: to be set
"""
self._contacts = contacts
# coordinateSystem
@property
def coordinateSystem(self) -> dict:
"""Gets the coordinateSystem of this Metadata.
:return: The coordinateSystem of this Metadata.
:rtype: dict
"""
return self._coordinateSystem
@coordinateSystem.setter
def coordinateSystem(self, coordinateSystem: dict):
"""Sets the coordinate systems of this Metadata.
:param dict coordinateSystem: to be set
"""
self._coordinateSystem = coordinateSystem
# created
@property
def created(self) -> str:
"""Gets the creation date of the data described by the Metadata.
It's the equivalent of the `created` original attribute (renamed to avoid conflicts with the _created` one).
Date format is: `%Y-%m-%dT%H:%M:%S+00:00`.
:return: The creation of this Metadata.
:rtype: str
"""
return self._creation
# distance
@property
def distance(self) -> str:
"""Gets the distance of this Metadata.
:return: The distance of this Metadata.
:rtype: str
"""
return self._distance
@distance.setter
def distance(self, distance: str):
"""Sets the of this Metadata.
:param str distance: to be set
"""
self._distance = distance
# editionProfile
@property
def editionProfile(self) -> str:
"""Gets the editionProfile of this Metadata.
:return: The editionProfile of this Metadata.
:rtype: str
"""
return self._editionProfile
@editionProfile.setter
def editionProfile(self, editionProfile: str):
"""Sets the of this Metadata.
:param str editionProfile: to be set
"""
self._editionProfile = editionProfile
# encoding
@property
def encoding(self) -> str:
"""Gets the encoding of this Metadata.
:return: The encoding of this Metadata.
:rtype: str
"""
return self._encoding
@encoding.setter
def encoding(self, encoding: str):
"""Sets the of this Metadata.
:param str encoding: to be set
"""
self._encoding = encoding
# envelope
@property
def envelope(self) -> str:
"""Gets the envelope of this Metadata.
:return: The envelope of this Metadata.
:rtype: str
"""
return self._envelope
@envelope.setter
def envelope(self, envelope: str):
"""Sets the of this Metadata.
:param str envelope: to be set
"""
self._envelope = envelope
# events
@property
def events(self) -> list:
"""Gets the events of this Metadata.
:return: The events of this Metadata.
:rtype: list
"""
return self._events
@events.setter
def events(self, events: list):
"""Sets the of this Metadata.
:param list events: to be set
"""
self._events = events
# featureAttributes
@property
def featureAttributes(self) -> list:
"""Gets the featureAttributes of this Metadata.
:return: The featureAttributes of this Metadata.
:rtype: list
"""
return self._featureAttributes
@featureAttributes.setter
def featureAttributes(self, featureAttributes: list):
"""Sets the of this Metadata.
:param list featureAttributes: to be set
"""
self._featureAttributes = featureAttributes
# features
@property
def features(self) -> int:
"""Gets the features of this Metadata.
:return: The features of this Metadata.
:rtype: int
"""
return self._features
@features.setter
def features(self, features: int):
"""Sets the of this Metadata.
:param int features: to be set
"""
self._features = features
# format
@property
def format(self) -> str:
"""Gets the format of this Metadata.
:return: The format of this Metadata.
:rtype: str
"""
return self._format
@format.setter
def format(self, format: str):
"""Sets the of this Metadata.
:param str format: to be set
"""
self._format = format
# formatVersion
@property
def formatVersion(self) -> str:
"""Gets the formatVersion of this Metadata.
:return: The formatVersion of this Metadata.
:rtype: str
"""
return self._formatVersion
@formatVersion.setter
def formatVersion(self, formatVersion: str):
"""Sets the of this Metadata.
:param str formatVersion: to be set
"""
self._formatVersion = formatVersion
# geometry
@property
def geometry(self) -> str:
"""Gets the geometry of this Metadata.
:return: The geometry of this Metadata.
:rtype: str
"""
return self._geometry
@geometry.setter
def geometry(self, geometry: str):
"""Sets the of this Metadata.
:param str geometry: to be set
"""
self._geometry = geometry
# keywords
@property
def keywords(self) -> str:
"""Gets the keywords of this Metadata.
:return: The keywords of this Metadata.
:rtype: str
"""
return self._keywords
@keywords.setter
def keywords(self, keywords: str):
"""Sets the of this Metadata.
:param str keywords: to be set
"""
self._keywords = keywords
# language
@property
def language(self) -> str:
"""Gets the language of this Metadata.
:return: The language of this Metadata.
:rtype: str
"""
return self._language
@language.setter
def language(self, language: str):
"""Sets the of this Metadata.
:param str language: to be set
"""
self._language = language
# layers
@property
def layers(self) -> list:
"""Gets the layers of this Metadata.
:return: The layers of this Metadata.
:rtype: list
"""
return self._layers
@layers.setter
def layers(self, layers: list):
"""Sets the of this Metadata.
:param list layers: to be set
"""
self._layers = layers
# limitations
@property
def limitations(self) -> str:
"""Gets the limitations of this Metadata.
:return: The limitations of this Metadata.
:rtype: str
"""
return self._limitations
@limitations.setter
def limitations(self, limitations: str):
"""Sets the of this Metadata.
:param str limitations: to be set
"""
self._limitations = limitations
# links
@property
def links(self) -> str:
"""Gets the links of this Metadata.
:return: The links of this Metadata.
:rtype: str
"""
return self._links
@links.setter
def links(self, links: str):
"""Sets the of this Metadata.
:param str links: to be set
"""
self._links = links
# modification
@property
def modified(self) -> str:
"""Gets the last modification date of the data described by this Metadata.
It's the equivalent of the `created` original attribute (renamed to avoid conflicts with the _created` one).
:return: The modification of this Metadata.
:rtype: str
"""
return self._modification
# name
@property
def name(self) -> str:
"""Gets the name of this Metadata.
:return: The name of this Metadata.
:rtype: str
"""
return self._name
@name.setter
def name(self, name: str):
"""Sets technical name of the Metadata.
:param str name: technical name this Metadata.
"""
self._name = name
# operations
@property
def operations(self) -> list:
"""Gets the operations of this Metadata.
:return: The operations of this Metadata.
:rtype: list
"""
return self._operations
@operations.setter
def operations(self, operations: list):
"""Sets the of this Metadata.
:param list operations: to be set
"""
self._operations = operations
# path
@property
def path(self) -> str:
"""Gets the path of this Metadata.
:return: The path of this Metadata.
:rtype: str
"""
return self._path
@path.setter
def path(self, path: str):
"""Sets the of this Metadata.
:param str path: to be set
"""
self._path = path
# precision
@property
def precision(self) -> str:
"""Gets the precision of this Metadata.
:return: The precision of this Metadata.
:rtype: str
"""
return self._precision
@precision.setter
def precision(self, precision: str):
"""Sets the of this Metadata.
:param str precision: to be set
"""
self._precision = precision
# published
@property
def published(self) -> str:
"""Gets the published of this Metadata.
:return: The published of this Metadata.
:rtype: str
"""
return self._published
@published.setter
def published(self, published: str):
"""Sets the of this Metadata.
:param str published: to be set
"""
self._published = published
# scale
@property
def scale(self) -> str:
"""Gets the scale of this Metadata.
:return: The scale of this Metadata.
:rtype: str
"""
return self._scale
@scale.setter
def scale(self, scale: str):
"""Sets the of this Metadata.
:param str scale: to be set
"""
self._scale = scale
# series
@property
def series(self) -> str:
"""Gets the series of this Metadata.
:return: The series of this Metadata.
:rtype: str
"""
return self._series
@series.setter
def series(self, series: str):
"""Sets the of this Metadata.
:param str series: to be set
"""
self._series = series
# serviceLayers
@property
def serviceLayers(self) -> list:
"""Gets the serviceLayers of this Metadata.
:return: The serviceLayers of this Metadata.
:rtype: list
"""
return self._serviceLayers
@serviceLayers.setter
def serviceLayers(self, serviceLayers: list):
"""Sets the of this Metadata.
:param list serviceLayers: to be set
"""
self._serviceLayers = serviceLayers
# specifications
@property
def specifications(self) -> str:
"""Gets the specifications of this Metadata.
:return: The specifications of this Metadata.
:rtype: str
"""
return self._specifications
@specifications.setter
def specifications(self, specifications: str):
"""Sets the of this Metadata.
:param str specifications: to be set
"""
self._specifications = specifications
# tags
@property
def tags(self) -> str:
"""Gets the tags of this Metadata.
:return: The tags of this Metadata.
:rtype: str
"""
return self._tags
@tags.setter
def tags(self, tags: str):
"""Sets the of this Metadata.
:param str tags: to be set
"""
self._tags = tags
# title
@property
def title(self) -> str:
"""Gets the title of this Metadata.
:return: The title of this Metadata.
:rtype: str
"""
return self._title
@title.setter
def title(self, title: str):
"""Sets the of this Metadata.
:param str title: to be set
"""
self._title = title
# topologicalConsistency
@property
def topologicalConsistency(self) -> str:
"""Gets the topologicalConsistency of this Metadata.
:return: The topologicalConsistency of this Metadata.
:rtype: str
"""
return self._topologicalConsistency
@topologicalConsistency.setter
def topologicalConsistency(self, topologicalConsistency: str):
"""Sets the of this Metadata.
:param str topologicalConsistency: to be set
"""
self._topologicalConsistency = topologicalConsistency
# type
@property
def type(self) -> str:
"""Gets the type of this Metadata.
:return: The type of this Metadata.
:rtype: str
"""
return self._type
@type.setter
def type(self, type: str):
"""Sets the type of this Metadata.
:param str type: The type of this Metadata.
"""
# check type value
if type not in MetadataTypes.__members__:
raise ValueError(
"Metadata type '{}' is not an accepted value. Must be one of: {}.".format(
type, " | ".join([e.name for e in MetadataTypes])
)
)
self._type = type
# updateFrequency
@property
def updateFrequency(self) -> str:
"""Gets the updateFrequency of this Metadata.
:return: The updateFrequency of this Metadata.
:rtype: str
"""
return self._updateFrequency
@updateFrequency.setter
def updateFrequency(self, updateFrequency: str):
"""Sets the of this Metadata.
:param str updateFrequency: to be set
"""
self._updateFrequency = updateFrequency
# validFrom
@property
def validFrom(self) -> str:
"""Gets the validFrom of this Metadata.
:return: The validFrom of this Metadata.
:rtype: str
"""
return self._validFrom
@validFrom.setter
def validFrom(self, validFrom: str):
"""Sets the of this Metadata.
:param str validFrom: to be set
"""
self._validFrom = validFrom
# validTo
@property
def validTo(self) -> str:
"""Gets the validTo of this Metadata.
:return: The validTo of this Metadata.
:rtype: str
"""
return self._validTo
@validTo.setter
def validTo(self, validTo: str):
"""Sets the of this Metadata.
:param str validTo: to be set
"""
self._validTo = validTo
# validityComment
@property
def validityComment(self) -> str:
"""Gets the validityComment of this Metadata.
:return: The validityComment of this Metadata.
:rtype: str
"""
return self._validityComment
@validityComment.setter
def validityComment(self, validityComment: str):
"""Sets the of this Metadata.
:param str validityComment: to be set
"""
self._validityComment = validityComment
# -- SPECIFIC TO IMPLEMENTATION ----------------------------------------------------
@property
def groupName(self) -> str:
"""Shortcut to get the name of the workgroup which owns the Metadata."""
if isinstance(self._creator, dict):
return self._creator.get("contact").get("name")
elif isinstance(self._creator, Workgroup):
return self._creator.contact.get("name")
else:
return None
@property
def groupId(self) -> str:
"""Shortcut to get the UUID of the workgroup which owns the Metadata."""
if isinstance(self._creator, dict):
return self._creator.get("_id")
elif isinstance(self._creator, Workgroup):
return self._creator._id
else:
return None
# -- METHODS -----------------------------------------------------------------------
def admin_url(self, url_base: str = "https://app.isogeo.com") -> str:
"""Returns the administration URL (https://app.isogeo.com) for this metadata.
:param str url_base: base URL of admin site. Defaults to: https://app.isogeo.com
:rtype: str
"""
if self._creator is None:
logger.warning("Creator is required to build admin URL")
return False
creator_id = self._creator.get("_id")
return "{}/groups/{}/resources/{}/".format(url_base, creator_id, self._id)
def title_or_name(self, slugged: bool = False) -> str:
"""Gets the title of this Metadata or the name if there is no title.
It can return a slugified value.
:param bool slugged: slugify title. Defaults to `False`.
:returns: the title or the name of this Metadata.
:rtype: str
"""
if self._title:
title_or_name = self._title
else:
title_or_name = self._name
# slugify
if slugged:
title_or_name = (
unicodedata.normalize("NFKD", title_or_name)
.encode("ascii", "ignore")
.decode("ascii")
)
title_or_name = _regex_slugify_strip.sub("", title_or_name).strip().lower()
title_or_name = _regex_slugify_hyphenate.sub("-", title_or_name)
return title_or_name
def to_dict(self) -> dict:
"""Returns the model properties as a dict"""
result = {}
for attr, _ in self.ATTR_TYPES.items():
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(Metadata, dict):
for key, value in self.items():
result[key] = value
return result
def to_dict_creation(self) -> dict:
"""Returns the model properties as a dict structured for creation purpose (POST)"""
result = {}
for attr, _ in self.ATTR_CREA.items():
# get attribute value
value = getattr(self, attr)
# switch attribute name for creation purpose
if attr in self.ATTR_MAP:
attr = self.ATTR_MAP.get(attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(Metadata, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self) -> str:
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other) -> bool:
"""Returns true if both objects are equal"""
if not isinstance(other, Metadata):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other) -> bool:
"""Returns true if both objects are not equal"""
return not self == other
# ##############################################################################
# ##### Stand alone program ########
# ##################################
if __name__ == "__main__":
""" standalone execution """
md = Metadata()
print(md)
| gpl-3.0 | 639,171,598,854,724,600 | 26.235252 | 116 | 0.522886 | false |
Yupeek/django-dynamic-logging | dynamic_logging/scheduler.py | 1 | 8574 | # -*- coding: utf-8 -*-
import functools
import logging
import threading
from django.db.utils import ProgrammingError
from django.utils import timezone
from dynamic_logging.models import Trigger
logger = logging.getLogger(__name__)
class Scheduler(object):
"""
a special class that keep trace of the next event to trigger and
will trigger it in time.
"""
def __init__(self):
self.next_timer = None
"""
:type: threading.Timer
"""
self._lock = threading.RLock()
self._enabled = True
self.current_trigger = Trigger.default()
"""
:type: Trigger
"""
self.current_config_hash = None
self.start_thread = True
"""
a bool to prevent the threads to start, for testing purpose
"""
self.reload_timer = None
"""
the local timer for the defered reload
"""
self.trigger_applied = threading.Event()
"""
a simple Event used to test each time a trigger is applied
"""
def disable(self):
"""
disable this scheduler. reload has no effect
:return:
"""
if self._enabled:
self._enabled = False
self.reset()
def enable(self):
"""
reenable the scheduler.
set the next timer to the next Trigger
:return:
"""
if not self._enabled:
self._enabled = True
self.reload()
def is_enabled(self):
return self._enabled
@staticmethod
def get_next_wake(current=None, after=None):
"""
function that return the next trigger to apply and the date at which it may occure
:param Trigger current: the current trigger active (which won't be reenabled
:param datetime after: the date to use to check current time
:return: the next trigger to enable, with the date it whith it occure. date can be none if the trigger shall be
enabled right now
:rtype: (Trigger, datetime.datetime)
"""
after = after or timezone.now()
# next wake is the earliest of :
# - the end of the current one
# - the start of a new one
try:
next_trigger = Trigger.objects.filter(is_active=True, start_date__gt=after).earliest('start_date')
except Trigger.DoesNotExist:
# no next trigger
next_trigger = None # type: Trigger
except ProgrammingError:
next_trigger = None
# boolean opperation is
# w = current trigger is null
# x = current trigger's end date is null =>_trigger don't end
# y = no next trigger
# z = current trigger end befor the next one start
# results are
# a = activate next trigger at next trigger start date
# b = find best trigger at current one end date
# c = no trigger
# boolean simplification lead to:
# c = y and not b
# a = not y and not b
# b = not w and not x and (y or (not y and z))
# start with b case => find best trigger at the end of the current one
if (
current is not None # not w
and current.end_date is not None # not x
and (next_trigger is None or current.end_date < next_trigger.start_date)
):
# b =>
try:
last_active = Trigger.objects.filter(is_active=True).valid_at(current.end_date).earliest('start_date')
except Trigger.DoesNotExist:
# no trigger active at the end of the current one, the default will be enabled
last_active = Trigger.default()
return last_active, current.end_date
elif next_trigger is None: # case c = not b and y
return current or Trigger.default(), None
else: # case a (last case)
return next_trigger, next_trigger.start_date
def set_next_wake(self, trigger, at):
logger.debug("next trigger to enable : %s at %s", trigger, at, extra={'next_date': at})
with self._lock:
self.reset_timer()
interval = (at - timezone.now()).total_seconds()
self.next_timer = threading.Timer(interval,
functools.partial(self.wake, trigger=trigger, date=at))
self.next_timer.name = 'ApplyTimer for %s' % trigger.pk
self.next_timer.daemon = True # prevent program hanging until netx trigger
self.next_timer.trigger = trigger
self.next_timer.at = at
if self.start_thread:
# in some tests, we skip the overload of starting thread for nothing.
self.next_timer.start()
def reset(self):
"""
reset the logging to the default settings. disable the timer to change it
:return:
"""
with self._lock:
self.reset_timer()
self.current_trigger = Trigger.default()
def reset_timer(self):
"""
reset the timer
:return:
"""
with self._lock:
if self.next_timer is not None:
self.next_timer.cancel()
self.next_timer = None
if self.reload_timer is not None:
self.reload_timer.cancel()
self.reload_timer = None
def activate_current(self):
"""
activate the current trigger
:return:
"""
try:
t = Trigger.objects.filter(is_active=True).valid_at(timezone.now()).latest('start_date')
except Trigger.DoesNotExist:
self.apply(Trigger.default())
return None
except ProgrammingError:
logger.info('the django-dynamic-logging tables don\'t exists: fall back to normal logging')
self.apply(Trigger.default())
return None
try:
self.apply(t)
return t
except ValueError as e:
logger.exception("error with current logger activation trigger=%s, config=%s => %s",
t.id, t.config_id, str(e))
return None
def reload(self, interval=None):
"""
cancel the timer and the next trigger, and
compute the next one. can be done after an interval to delay the setup for some time.
:return:
"""
if self._enabled:
with self._lock:
if self.reload_timer is not None:
self.reload_timer.cancel()
if interval is not None:
self.reload_timer = t = threading.Timer(interval, self.reload)
t.name = "ReloadTimer"
t.daemon = True
t.start()
self.reload_timer = t
return
self.reset_timer()
current = self.activate_current()
trigger, at = self.get_next_wake(current=current)
if at:
self.set_next_wake(trigger, at)
else:
# no date to wake. we apply now this trigger and so be it
self.apply(trigger)
def wake(self, trigger, date):
"""
function called each time a timer arrived at expiration
:return:
"""
logger.debug("wake to enable trigger %s at %s", trigger, date, extra={'expected_date': date})
next_trigger, at = self.get_next_wake(current=trigger, after=date)
with self._lock:
self.apply(trigger)
self.current_trigger = trigger
# get the next trigger valid at the current expected date
# we don't use timezone.now() to prevent the case where threading.Timer wakeup some ms befor the expected
# date
if at:
self.set_next_wake(next_trigger, at)
def apply(self, trigger):
hash_config = trigger.config.get_hash()
if self.current_config_hash == hash_config:
logger.debug("not applying currently active config %s", trigger,
extra={'trigger': trigger, 'config': trigger.config.config_json})
else:
logger.debug('applying %s', trigger, extra={'trigger': trigger, 'config': trigger.config.config_json})
trigger.apply()
self.current_config_hash = hash_config
self.current_trigger = trigger
self.trigger_applied.set()
main_scheduler = Scheduler()
| agpl-3.0 | 6,981,103,584,917,624,000 | 34.429752 | 119 | 0.55575 | false |
lochiiconnectivity/exabgp | lib/exabgp/configuration/engine/tokeniser.py | 1 | 3330 | # encoding: utf-8
"""
tokeniser.py
Created by Thomas Mangin on 2014-06-22.
Copyright (c) 2014-2015 Exa Networks. All rights reserved.
"""
from exabgp.util import coroutine
from exabgp.configuration.engine.location import Location
from exabgp.configuration.engine.raised import Raised
# convert special caracters
@coroutine.join
def unescape (s):
start = 0
while start < len(s):
pos = s.find('\\', start)
if pos == -1:
yield s[start:]
break
yield s[start:pos]
pos += 1
esc = s[pos]
if esc == 'b':
yield '\b'
elif esc == 'f':
yield '\f'
elif esc == 'n':
yield '\n'
elif esc == 'r':
yield '\r'
elif esc == 't':
yield '\t'
elif esc == 'u':
yield chr(int(s[pos + 1:pos + 5], 16))
pos += 4
else:
yield esc
start = pos + 1
# A coroutine which return the producer token, or string if quoted from the stream
@coroutine.each
def tokens (stream):
spaces = [' ','\t','\r','\n']
strings = ['"', "'"]
syntax = [',','[',']','{','}']
comment = ['#',]
nb_lines = 0
for line in stream:
nb_lines += 1
nb_chars = 0
quoted = ''
word = ''
for char in line:
if char in comment:
if quoted:
word += char
nb_chars += 1
else:
if word:
yield nb_lines,nb_chars,line,char
word = ''
break
elif char in syntax:
if quoted:
word += char
else:
if word:
yield nb_lines,nb_chars-len(word),line,word
word = ''
yield nb_lines,nb_chars,line,char
nb_chars += 1
elif char in spaces:
if quoted:
word += char
elif word:
yield nb_lines,nb_chars-len(word),line,word
word = ''
nb_chars += 1
elif char in strings:
word += char
if quoted == char:
quoted = ''
yield nb_lines,nb_chars-len(word),line,word
word = ''
else:
quoted = char
nb_chars += 1
else:
word += char
nb_chars += 1
# ==================================================================== Tokeniser
# Return the producer token from the configuration
class Tokeniser (Location):
def __init__ (self,name,stream):
super(Tokeniser,self).__init__()
self.name = name # A unique name for this tokenier, so we can have multiple
self.tokeniser = tokens(stream) # A corouting giving us the producer toker
self._rewind = [] # Should we want to rewind, the list of to pop first
def __call__ (self):
if self._rewind:
return self._rewind.pop()
token = self.content(self.tokeniser)
return token
# XXX: FIXME: line and position only work if we only rewind one element
def rewind (self,token):
self._rewind.append(token)
def content (self,producer):
try:
while True:
self.idx_line,self.idx_column,self.line,token = producer()
if token == '[':
returned = []
for token in self.iterate_list(producer):
returned.append((self.idx_line,self.idx_column,self.line,token))
return returned
elif token[0] in ('"',"'"):
return unescape(token[1:-1])
else:
return token
except ValueError:
raise Raised(Location(self.idx_line,self.idx_column,self.line),'Could not parse %s' % str(token))
except StopIteration:
return None
def iterate_list (self,producer):
token = self.content(producer)
while token and token != ']':
yield token
token = self.content(producer)
| bsd-3-clause | -4,720,082,603,888,199,000 | 22.125 | 100 | 0.597297 | false |
tmilicic/networkx | networkx/classes/function.py | 1 | 16409 | """Functional interface to graph methods and assorted utilities.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
#
import networkx as nx
from networkx.utils import not_implemented_for
import itertools
__author__ = """\n""".join(['Aric Hagberg ([email protected])',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
__all__ = ['nodes', 'edges', 'degree', 'degree_histogram', 'neighbors',
'number_of_nodes', 'number_of_edges', 'density',
'is_directed', 'info', 'freeze', 'is_frozen', 'subgraph',
'create_empty_copy', 'set_node_attributes',
'get_node_attributes', 'set_edge_attributes',
'get_edge_attributes', 'all_neighbors', 'non_neighbors',
'non_edges', 'common_neighbors', 'is_weighted',
'is_negatively_weighted', 'is_empty']
def nodes(G):
"""Return an iterator over the graph nodes."""
return G.nodes()
def edges(G,nbunch=None):
"""Return iterator over edges incident to nodes in nbunch.
Return all edges if nbunch is unspecified or nbunch=None.
For digraphs, edges=out_edges
"""
return G.edges(nbunch)
def degree(G,nbunch=None,weight=None):
"""Return degree of single node or of nbunch of nodes.
If nbunch is ommitted, then return degrees of *all* nodes.
"""
return G.degree(nbunch,weight)
def neighbors(G,n):
"""Return a list of nodes connected to node n. """
return G.neighbors(n)
def number_of_nodes(G):
"""Return the number of nodes in the graph."""
return G.number_of_nodes()
def number_of_edges(G):
"""Return the number of edges in the graph. """
return G.number_of_edges()
def density(G):
r"""Return the density of a graph.
The density for undirected graphs is
.. math::
d = \frac{2m}{n(n-1)},
and for directed graphs is
.. math::
d = \frac{m}{n(n-1)},
where `n` is the number of nodes and `m` is the number of edges in `G`.
Notes
-----
The density is 0 for a graph without edges and 1 for a complete graph.
The density of multigraphs can be higher than 1.
Self loops are counted in the total number of edges so graphs with self
loops can have density higher than 1.
"""
n=number_of_nodes(G)
m=number_of_edges(G)
if m==0 or n <= 1:
d=0.0
else:
if G.is_directed():
d=m/float(n*(n-1))
else:
d= m*2.0/float(n*(n-1))
return d
def degree_histogram(G):
"""Return a list of the frequency of each degree value.
Parameters
----------
G : Networkx graph
A graph
Returns
-------
hist : list
A list of frequencies of degrees.
The degree values are the index in the list.
Notes
-----
Note: the bins are width one, hence len(list) can be large
(Order(number_of_edges))
"""
# We need to make degseq list because we call it twice.
degseq = list(d for n, d in G.degree())
dmax = max(degseq) + 1
freq = [ 0 for d in range(dmax) ]
for d in degseq:
freq[d] += 1
return freq
def is_directed(G):
""" Return True if graph is directed."""
return G.is_directed()
def frozen(*args):
"""Dummy method for raising errors when trying to modify frozen graphs"""
raise nx.NetworkXError("Frozen graph can't be modified")
def freeze(G):
"""Modify graph to prevent further change by adding or removing
nodes or edges.
Node and edge data can still be modified.
Parameters
----------
G : graph
A NetworkX graph
Examples
--------
>>> G=nx.Graph()
>>> G.add_path([0,1,2,3])
>>> G=nx.freeze(G)
>>> try:
... G.add_edge(4,5)
... except nx.NetworkXError as e:
... print(str(e))
Frozen graph can't be modified
Notes
-----
To "unfreeze" a graph you must make a copy by creating a new graph object:
>>> graph = nx.path_graph(4)
>>> frozen_graph = nx.freeze(graph)
>>> unfrozen_graph = nx.Graph(frozen_graph)
>>> nx.is_frozen(unfrozen_graph)
False
See Also
--------
is_frozen
"""
G.add_node=frozen
G.add_nodes_from=frozen
G.remove_node=frozen
G.remove_nodes_from=frozen
G.add_edge=frozen
G.add_edges_from=frozen
G.remove_edge=frozen
G.remove_edges_from=frozen
G.clear=frozen
G.frozen=True
return G
def is_frozen(G):
"""Return True if graph is frozen.
Parameters
----------
G : graph
A NetworkX graph
See Also
--------
freeze
"""
try:
return G.frozen
except AttributeError:
return False
def subgraph(G, nbunch):
"""Return the subgraph induced on nodes in nbunch.
Parameters
----------
G : graph
A NetworkX graph
nbunch : list, iterable
A container of nodes that will be iterated through once (thus
it should be an iterator or be iterable). Each element of the
container should be a valid node type: any hashable type except
None. If nbunch is None, return all edges data in the graph.
Nodes in nbunch that are not in the graph will be (quietly)
ignored.
Notes
-----
subgraph(G) calls G.subgraph()
"""
return G.subgraph(nbunch)
def create_empty_copy(G,with_nodes=True):
"""Return a copy of the graph G with all of the edges removed.
Parameters
----------
G : graph
A NetworkX graph
with_nodes : bool (default=True)
Include nodes.
Notes
-----
Graph, node, and edge data is not propagated to the new graph.
"""
H=G.__class__()
if with_nodes:
H.add_nodes_from(G)
return H
def info(G, n=None):
"""Print short summary of information for the graph G or the node n.
Parameters
----------
G : Networkx graph
A graph
n : node (any hashable)
A node in the graph G
"""
info='' # append this all to a string
if n is None:
info+="Name: %s\n"%G.name
type_name = [type(G).__name__]
info+="Type: %s\n"%",".join(type_name)
info+="Number of nodes: %d\n"%G.number_of_nodes()
info+="Number of edges: %d\n"%G.number_of_edges()
nnodes=G.number_of_nodes()
if len(G) > 0:
if G.is_directed():
info+="Average in degree: %8.4f\n"%\
(sum(d for n, d in G.in_degree())/float(nnodes))
info+="Average out degree: %8.4f"%\
(sum(d for n, d in G.out_degree())/float(nnodes))
else:
s=sum(dict(G.degree()).values())
info+="Average degree: %8.4f"%\
(float(s)/float(nnodes))
else:
if n not in G:
raise nx.NetworkXError("node %s not in graph"%(n,))
info+="Node % s has the following properties:\n"%n
info+="Degree: %d\n"%G.degree(n)
info+="Neighbors: "
info+=' '.join(str(nbr) for nbr in G.neighbors(n))
return info
def set_node_attributes(G, name, values):
"""Set node attributes from dictionary of nodes and values
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
values: dict
Dictionary of attribute values keyed by node. If `values` is not a
dictionary, then it is treated as a single attribute value that is then
applied to every node in `G`.
Examples
--------
>>> G = nx.path_graph(3)
>>> bb = nx.betweenness_centrality(G)
>>> nx.set_node_attributes(G, 'betweenness', bb)
>>> G.node[1]['betweenness']
1.0
"""
try:
values.items
except AttributeError:
# Treat `value` as the attribute value for each node.
values = dict(zip(G.nodes(), [values] * len(G)))
for node, value in values.items():
G.node[node][name] = value
def get_node_attributes(G, name):
"""Get node attributes from graph
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
Returns
-------
Dictionary of attributes keyed by node.
Examples
--------
>>> G=nx.Graph()
>>> G.add_nodes_from([1,2,3],color='red')
>>> color=nx.get_node_attributes(G,'color')
>>> color[1]
'red'
"""
return dict( (n,d[name]) for n,d in G.node.items() if name in d)
def set_edge_attributes(G, name, values):
"""Set edge attributes from dictionary of edge tuples and values.
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
values : dict
Dictionary of attribute values keyed by edge (tuple). For multigraphs,
the keys tuples must be of the form (u, v, key). For non-multigraphs,
the keys must be tuples of the form (u, v). If `values` is not a
dictionary, then it is treated as a single attribute value that is then
applied to every edge in `G`.
Examples
--------
>>> G = nx.path_graph(3)
>>> bb = nx.edge_betweenness_centrality(G, normalized=False)
>>> nx.set_edge_attributes(G, 'betweenness', bb)
>>> G[1][2]['betweenness']
2.0
"""
try:
values.items
except AttributeError:
# Treat `value` as the attribute value for each edge.
if G.is_multigraph():
edges = list(G.edges(keys=True))
else:
edges = list(G.edges())
values = dict(zip(edges, [values] * len(list(edges))))
if G.is_multigraph():
for (u, v, key), value in values.items():
G[u][v][key][name] = value
else:
for (u, v), value in values.items():
G[u][v][name] = value
def get_edge_attributes(G, name):
"""Get edge attributes from graph
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
Returns
-------
Dictionary of attributes keyed by edge. For (di)graphs, the keys are
2-tuples of the form: (u,v). For multi(di)graphs, the keys are 3-tuples of
the form: (u, v, key).
Examples
--------
>>> G=nx.Graph()
>>> G.add_path([1,2,3],color='red')
>>> color=nx.get_edge_attributes(G,'color')
>>> color[(1,2)]
'red'
"""
if G.is_multigraph():
edges = G.edges(keys=True, data=True)
else:
edges = G.edges(data=True)
return dict( (x[:-1], x[-1][name]) for x in edges if name in x[-1] )
def all_neighbors(graph, node):
""" Returns all of the neighbors of a node in the graph.
If the graph is directed returns predecessors as well as successors.
Parameters
----------
graph : NetworkX graph
Graph to find neighbors.
node : node
The node whose neighbors will be returned.
Returns
-------
neighbors : iterator
Iterator of neighbors
"""
if graph.is_directed():
values = itertools.chain.from_iterable([graph.predecessors(node),
graph.successors(node)])
else:
values = graph.neighbors(node)
return values
def non_neighbors(graph, node):
"""Returns the non-neighbors of the node in the graph.
Parameters
----------
graph : NetworkX graph
Graph to find neighbors.
node : node
The node whose neighbors will be returned.
Returns
-------
non_neighbors : iterator
Iterator of nodes in the graph that are not neighbors of the node.
"""
nbors = set(neighbors(graph, node)) | set([node])
return (nnode for nnode in graph if nnode not in nbors)
def non_edges(graph):
"""Returns the non-existent edges in the graph.
Parameters
----------
graph : NetworkX graph.
Graph to find non-existent edges.
Returns
-------
non_edges : iterator
Iterator of edges that are not in the graph.
"""
if graph.is_directed():
for u in graph.nodes():
for v in non_neighbors(graph, u):
yield (u, v)
else:
nodes = set(graph)
while nodes:
u = nodes.pop()
for v in nodes - set(graph[u]):
yield (u, v)
@not_implemented_for('directed')
def common_neighbors(G, u, v):
"""Return the common neighbors of two nodes in a graph.
Parameters
----------
G : graph
A NetworkX undirected graph.
u, v : nodes
Nodes in the graph.
Returns
-------
cnbors : iterator
Iterator of common neighbors of u and v in the graph.
Raises
------
NetworkXError
If u or v is not a node in the graph.
Examples
--------
>>> G = nx.complete_graph(5)
>>> sorted(nx.common_neighbors(G, 0, 1))
[2, 3, 4]
"""
if u not in G:
raise nx.NetworkXError('u is not in the graph.')
if v not in G:
raise nx.NetworkXError('v is not in the graph.')
# Return a generator explicitly instead of yielding so that the above
# checks are executed eagerly.
return (w for w in G[u] if w in G[v] and w not in (u, v))
def is_weighted(G, edge=None, weight='weight'):
"""Returns ``True`` if ``G`` has weighted edges.
Parameters
----------
G : graph
A NetworkX graph.
edge : tuple, optional
A 2-tuple specifying the only edge in ``G`` that will be tested. If
``None``, then every edge in ``G`` is tested.
weight: string, optional
The attribute name used to query for edge weights.
Returns
-------
bool
A boolean signifying if ``G``, or the specified edge, is weighted.
Raises
------
NetworkXError
If the specified edge does not exist.
Examples
--------
>>> G = nx.path_graph(4)
>>> nx.is_weighted(G)
False
>>> nx.is_weighted(G, (2, 3))
False
>>> G = nx.DiGraph()
>>> G.add_edge(1, 2, weight=1)
>>> nx.is_weighted(G)
True
"""
if edge is not None:
data = G.get_edge_data(*edge)
if data is None:
msg = 'Edge {!r} does not exist.'.format(edge)
raise nx.NetworkXError(msg)
return weight in data
if is_empty(G):
# Special handling required since: all([]) == True
return False
return all(weight in data for u, v, data in G.edges(data=True))
def is_negatively_weighted(G, edge=None, weight='weight'):
"""Returns ``True`` if ``G`` has negatively weighted edges.
Parameters
----------
G : graph
A NetworkX graph.
edge : tuple, optional
A 2-tuple specifying the only edge in ``G`` that will be tested. If
``None``, then every edge in ``G`` is tested.
weight: string, optional
The attribute name used to query for edge weights.
Returns
-------
bool
A boolean signifying if ``G``, or the specified edge, is negatively
weighted.
Raises
------
NetworkXError
If the specified edge does not exist.
Examples
--------
>>> G=nx.Graph()
>>> G.add_edges_from([(1, 3), (2, 4), (2, 6)])
>>> G.add_edge(1, 2, weight=4)
>>> nx.is_negatively_weighted(G, (1, 2))
False
>>> G[2][4]['weight'] = -2
>>> nx.is_negatively_weighted(G)
True
>>> G = nx.DiGraph()
>>> G.add_weighted_edges_from([('0', '3', 3), ('0', '1', -5), ('1', '0', -2)])
>>> nx.is_negatively_weighted(G)
True
"""
if edge is not None:
data = G.get_edge_data(*edge)
if data is None:
msg = 'Edge {!r} does not exist.'.format(edge)
raise nx.NetworkXError(msg)
return weight in data and data[weight] < 0
return any(weight in data and data[weight] < 0
for u, v, data in G.edges(data=True))
def is_empty(G):
"""Returns ``True`` if ``G`` has no edges.
Parameters
----------
G : graph
A NetworkX graph.
Returns
-------
bool
``True`` if ``G`` has no edges, and ``False`` otherwise.
Notes
-----
An empty graph can have nodes but not edges. The empty graph with zero
nodes is known as the null graph. This is an O(n) operation where n is the
number of nodes in the graph.
"""
return not any(G.adj.values())
| bsd-3-clause | -4,094,525,715,898,061,000 | 23.899848 | 82 | 0.566945 | false |
clintonblackmore/enchanting2 | factory.py | 1 | 1388 | """factory.py lets you deserialize an unknown type from XML"""
import xml.etree.cElementTree as ElementTree
import data
import script
import media
import actor
def deserialize_value(element, *args):
"""Get an object representing this element,
be it a literal, list or what-not"""
class_map = {
"block-definition": script.BlockDefinition,
"block": script.Block,
"blocks": script.Blocks,
"bool": data.Literal,
"color": data.Color,
"comment": data.Comment,
"costume": media.Costume,
"costumes": media.Costumes,
"custom-block": script.Block,
"l": data.Literal,
"list": data.List,
"project": actor.Project,
"script": script.Script,
}
# if element.tag == "list":
# ElementTree.dump(element)
item = class_map[element.tag](*args)
item.deserialize(element)
return item
def deserialize_xml(xml, *args):
"""Take some XML and return an object for it"""
return deserialize_value(ElementTree.XML(xml), *args)
def deserialize_file(filename, *args):
"""Return the object represented by the data in the file"""
return deserialize_value(ElementTree.parse(filename).getroot(), *args)
def xml_for_object(object, **kwargs):
"""Return the XML that represents this object"""
return ElementTree.tostring(object.serialize(**kwargs))
| agpl-3.0 | 7,742,323,621,183,669,000 | 26.215686 | 74 | 0.650576 | false |
ESS-LLP/erpnext-healthcare | erpnext/hr/doctype/salary_slip/salary_slip.py | 1 | 34819 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
import datetime
from frappe.utils import add_days, cint, cstr, flt, getdate, rounded, date_diff, money_in_words, getdate
from frappe.model.naming import make_autoname
from frappe import msgprint, _
from erpnext.hr.doctype.payroll_entry.payroll_entry import get_start_end_dates
from erpnext.hr.doctype.employee.employee import get_holiday_list_for_employee
from erpnext.utilities.transaction_base import TransactionBase
from frappe.utils.background_jobs import enqueue
from erpnext.hr.doctype.additional_salary.additional_salary import get_additional_salary_component
from erpnext.hr.utils import get_payroll_period
from erpnext.hr.doctype.employee_benefit_application.employee_benefit_application import get_benefit_component_amount
from erpnext.hr.doctype.employee_benefit_claim.employee_benefit_claim import get_benefit_claim_amount, get_last_payroll_period_benefits
class SalarySlip(TransactionBase):
def __init__(self, *args, **kwargs):
super(SalarySlip, self).__init__(*args, **kwargs)
self.series = 'Sal Slip/{0}/.#####'.format(self.employee)
self.whitelisted_globals = {
"int": int,
"float": float,
"long": int,
"round": round,
"date": datetime.date,
"getdate": getdate
}
def autoname(self):
self.name = make_autoname(self.series)
def validate(self):
self.status = self.get_status()
self.validate_dates()
self.check_existing()
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
if not (len(self.get("earnings")) or len(self.get("deductions"))):
# get details from salary structure
self.get_emp_and_leave_details()
else:
self.get_leave_details(lwp = self.leave_without_pay)
# if self.salary_slip_based_on_timesheet or not self.net_pay:
self.calculate_net_pay()
company_currency = erpnext.get_company_currency(self.company)
self.total_in_words = money_in_words(self.rounded_total, company_currency)
if frappe.db.get_single_value("HR Settings", "max_working_hours_against_timesheet"):
max_working_hours = frappe.db.get_single_value("HR Settings", "max_working_hours_against_timesheet")
if self.salary_slip_based_on_timesheet and (self.total_working_hours > int(max_working_hours)):
frappe.msgprint(_("Total working hours should not be greater than max working hours {0}").
format(max_working_hours), alert=True)
def validate_dates(self):
if date_diff(self.end_date, self.start_date) < 0:
frappe.throw(_("To date cannot be before From date"))
def calculate_component_amounts(self):
if not getattr(self, '_salary_structure_doc', None):
self._salary_structure_doc = frappe.get_doc('Salary Structure', self.salary_structure)
data = self.get_data_for_eval()
for key in ('earnings', 'deductions'):
for struct_row in self._salary_structure_doc.get(key):
amount = self.eval_condition_and_formula(struct_row, data)
if amount and struct_row.statistical_component == 0:
self.update_component_row(struct_row, amount, key)
if key=="earnings" and struct_row.is_flexible_benefit == 1:
self.add_employee_flexi_benefits(struct_row)
additional_components = get_additional_salary_component(self.employee, self.start_date, self.end_date)
if additional_components:
for additional_component in additional_components:
additional_component = frappe._dict(additional_component)
amount = additional_component.amount
overwrite = additional_component.overwrite
key = "earnings"
if additional_component.type == "Deduction":
key = "deductions"
self.update_component_row(frappe._dict(additional_component.struct_row), amount, key, overwrite=overwrite)
self.get_last_payroll_period_benefit()
# Calculate variable_based_on_taxable_salary after all components updated in salary slip
for struct_row in self._salary_structure_doc.get("deductions"):
if struct_row.variable_based_on_taxable_salary == 1 and not struct_row.formula and not struct_row.amount:
tax_detail = self.calculate_variable_based_on_taxable_salary(struct_row.salary_component)
if tax_detail and tax_detail[1]:
self.update_component_row(frappe._dict(tax_detail[0]), tax_detail[1], "deductions", tax_detail[2], tax_detail[3])
def get_last_payroll_period_benefit(self):
payroll_period = get_payroll_period(self.start_date, self.end_date, self.company)
if payroll_period:
# Check for last payroll period
if (getdate(payroll_period.end_date) <= getdate(self.end_date)):
current_flexi_amount = 0
for d in self.get("earnings"):
if d.is_flexible_benefit == 1:
current_flexi_amount += d.amount
last_benefits = get_last_payroll_period_benefits(self.employee, self.start_date, self.end_date,\
current_flexi_amount, payroll_period, self._salary_structure_doc)
if last_benefits:
for last_benefit in last_benefits:
last_benefit = frappe._dict(last_benefit)
amount = last_benefit.amount
self.update_component_row(frappe._dict(last_benefit.struct_row), amount, "earnings")
def add_employee_flexi_benefits(self, struct_row):
if frappe.db.get_value("Salary Component", struct_row.salary_component, "pay_against_benefit_claim") != 1:
benefit_component_amount = get_benefit_component_amount(self.employee, self.start_date, self.end_date, \
struct_row, self._salary_structure_doc, self.total_working_days, self.payroll_frequency)
if benefit_component_amount:
self.update_component_row(struct_row, benefit_component_amount, "earnings")
else:
benefit_claim_amount = get_benefit_claim_amount(self.employee, self.start_date, self.end_date, struct_row.salary_component)
if benefit_claim_amount:
self.update_component_row(struct_row, benefit_claim_amount, "earnings")
def update_component_row(self, struct_row, amount, key, benefit_tax=None, additional_tax=None, overwrite=1):
component_row = None
for d in self.get(key):
if d.salary_component == struct_row.salary_component:
component_row = d
if not component_row:
self.append(key, {
'amount': amount,
'default_amount': amount,
'depends_on_lwp' : struct_row.depends_on_lwp,
'salary_component' : struct_row.salary_component,
'abbr' : struct_row.abbr,
'do_not_include_in_total' : struct_row.do_not_include_in_total,
'is_tax_applicable': struct_row.is_tax_applicable,
'is_flexible_benefit': struct_row.is_flexible_benefit,
'variable_based_on_taxable_salary': struct_row.variable_based_on_taxable_salary,
'is_additional_component': struct_row.is_additional_component,
'tax_on_flexible_benefit': benefit_tax,
'tax_on_additional_salary': additional_tax
})
else:
if overwrite:
component_row.default_amount = amount
component_row.amount = amount
else:
component_row.default_amount += amount
component_row.amount = component_row.default_amount
component_row.tax_on_flexible_benefit = benefit_tax
component_row.tax_on_additional_salary = additional_tax
def eval_condition_and_formula(self, d, data):
try:
condition = d.condition.strip() if d.condition else None
if condition:
if not frappe.safe_eval(condition, self.whitelisted_globals, data):
return None
amount = d.amount
if d.amount_based_on_formula:
formula = d.formula.strip() if d.formula else None
if formula:
amount = frappe.safe_eval(formula, self.whitelisted_globals, data)
if amount:
data[d.abbr] = amount
return amount
except NameError as err:
frappe.throw(_("Name error: {0}".format(err)))
except SyntaxError as err:
frappe.throw(_("Syntax error in formula or condition: {0}".format(err)))
except Exception as e:
frappe.throw(_("Error in formula or condition: {0}".format(e)))
raise
def get_data_for_eval(self):
'''Returns data for evaluating formula'''
data = frappe._dict()
data.update(frappe.get_doc("Salary Structure Assignment",
{"employee": self.employee, "salary_structure": self.salary_structure}).as_dict())
data.update(frappe.get_doc("Employee", self.employee).as_dict())
data.update(self.as_dict())
# set values for components
salary_components = frappe.get_all("Salary Component", fields=["salary_component_abbr"])
for sc in salary_components:
data.setdefault(sc.salary_component_abbr, 0)
for key in ('earnings', 'deductions'):
for d in self.get(key):
data[d.abbr] = d.amount
return data
def get_emp_and_leave_details(self):
'''First time, load all the components from salary structure'''
if self.employee:
self.set("earnings", [])
self.set("deductions", [])
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
self.validate_dates()
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
self.get_leave_details(joining_date, relieving_date)
struct = self.check_sal_struct(joining_date, relieving_date)
if struct:
self._salary_structure_doc = frappe.get_doc('Salary Structure', struct)
self.salary_slip_based_on_timesheet = self._salary_structure_doc.salary_slip_based_on_timesheet or 0
self.set_time_sheet()
self.pull_sal_struct()
def set_time_sheet(self):
if self.salary_slip_based_on_timesheet:
self.set("timesheets", [])
timesheets = frappe.db.sql(""" select * from `tabTimesheet` where employee = %(employee)s and start_date BETWEEN %(start_date)s AND %(end_date)s and (status = 'Submitted' or
status = 'Billed')""", {'employee': self.employee, 'start_date': self.start_date, 'end_date': self.end_date}, as_dict=1)
for data in timesheets:
self.append('timesheets', {
'time_sheet': data.name,
'working_hours': data.total_hours
})
def get_date_details(self):
if not self.end_date:
date_details = get_start_end_dates(self.payroll_frequency, self.start_date or self.posting_date)
self.start_date = date_details.start_date
self.end_date = date_details.end_date
def check_sal_struct(self, joining_date, relieving_date):
cond = """and sa.employee=%(employee)s and (sa.from_date <= %(start_date)s or
sa.from_date <= %(end_date)s or sa.from_date <= %(joining_date)s)"""
if self.payroll_frequency:
cond += """and ss.payroll_frequency = '%(payroll_frequency)s'""" % {"payroll_frequency": self.payroll_frequency}
st_name = frappe.db.sql("""
select sa.salary_structure
from `tabSalary Structure Assignment` sa join `tabSalary Structure` ss
where sa.salary_structure=ss.name
and sa.docstatus = 1 and ss.docstatus = 1 and ss.is_active ='Yes' %s
order by sa.from_date desc
limit 1
""" %cond, {'employee': self.employee, 'start_date': self.start_date,
'end_date': self.end_date, 'joining_date': joining_date})
if st_name:
self.salary_structure = st_name[0][0]
return self.salary_structure
else:
self.salary_structure = None
frappe.msgprint(_("No active or default Salary Structure found for employee {0} for the given dates")
.format(self.employee), title=_('Salary Structure Missing'))
def pull_sal_struct(self):
from erpnext.hr.doctype.salary_structure.salary_structure import make_salary_slip
if self.salary_slip_based_on_timesheet:
self.salary_structure = self._salary_structure_doc.name
self.hour_rate = self._salary_structure_doc.hour_rate
self.total_working_hours = sum([d.working_hours or 0.0 for d in self.timesheets]) or 0.0
wages_amount = self.hour_rate * self.total_working_hours
self.add_earning_for_hourly_wages(self, self._salary_structure_doc.salary_component, wages_amount)
make_salary_slip(self._salary_structure_doc.name, self)
def process_salary_structure(self):
'''Calculate salary after salary structure details have been updated'''
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
self.pull_emp_details()
self.get_leave_details()
self.calculate_net_pay()
def add_earning_for_hourly_wages(self, doc, salary_component, amount):
row_exists = False
for row in doc.earnings:
if row.salary_component == salary_component:
row.amount = amount
row_exists = True
break
if not row_exists:
wages_row = {
"salary_component": salary_component,
"abbr": frappe.db.get_value("Salary Component", salary_component, "salary_component_abbr"),
"amount": self.hour_rate * self.total_working_hours
}
doc.append('earnings', wages_row)
def pull_emp_details(self):
emp = frappe.db.get_value("Employee", self.employee, ["bank_name", "bank_ac_no"], as_dict=1)
if emp:
self.bank_name = emp.bank_name
self.bank_account_no = emp.bank_ac_no
def get_leave_details(self, joining_date=None, relieving_date=None, lwp=None):
if not joining_date:
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
holidays = self.get_holidays_for_employee(self.start_date, self.end_date)
working_days = date_diff(self.end_date, self.start_date) + 1
actual_lwp = self.calculate_lwp(holidays, working_days)
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
working_days -= len(holidays)
if working_days < 0:
frappe.throw(_("There are more holidays than working days this month."))
if not lwp:
lwp = actual_lwp
elif lwp != actual_lwp:
frappe.msgprint(_("Leave Without Pay does not match with approved Leave Application records"))
self.total_working_days = working_days
self.leave_without_pay = lwp
payment_days = flt(self.get_payment_days(joining_date, relieving_date)) - flt(lwp)
self.payment_days = payment_days > 0 and payment_days or 0
def get_payment_days(self, joining_date, relieving_date):
start_date = getdate(self.start_date)
if joining_date:
if getdate(self.start_date) <= joining_date <= getdate(self.end_date):
start_date = joining_date
elif joining_date > getdate(self.end_date):
return
end_date = getdate(self.end_date)
if relieving_date:
if getdate(self.start_date) <= relieving_date <= getdate(self.end_date):
end_date = relieving_date
elif relieving_date < getdate(self.start_date):
frappe.throw(_("Employee relieved on {0} must be set as 'Left'")
.format(relieving_date))
payment_days = date_diff(end_date, start_date) + 1
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
holidays = self.get_holidays_for_employee(start_date, end_date)
payment_days -= len(holidays)
return payment_days
def get_holidays_for_employee(self, start_date, end_date):
holiday_list = get_holiday_list_for_employee(self.employee)
holidays = frappe.db.sql_list('''select holiday_date from `tabHoliday`
where
parent=%(holiday_list)s
and holiday_date >= %(start_date)s
and holiday_date <= %(end_date)s''', {
"holiday_list": holiday_list,
"start_date": start_date,
"end_date": end_date
})
holidays = [cstr(i) for i in holidays]
return holidays
def calculate_lwp(self, holidays, working_days):
lwp = 0
holidays = "','".join(holidays)
for d in range(working_days):
dt = add_days(cstr(getdate(self.start_date)), d)
leave = frappe.db.sql("""
select t1.name, t1.half_day
from `tabLeave Application` t1, `tabLeave Type` t2
where t2.name = t1.leave_type
and t2.is_lwp = 1
and t1.docstatus = 1
and t1.employee = %(employee)s
and CASE WHEN t2.include_holiday != 1 THEN %(dt)s not in ('{0}') and %(dt)s between from_date and to_date and ifnull(t1.salary_slip, '') = ''
WHEN t2.include_holiday THEN %(dt)s between from_date and to_date and ifnull(t1.salary_slip, '') = ''
END
""".format(holidays), {"employee": self.employee, "dt": dt})
if leave:
lwp = cint(leave[0][1]) and (lwp + 0.5) or (lwp + 1)
return lwp
def check_existing(self):
if not self.salary_slip_based_on_timesheet:
ret_exist = frappe.db.sql("""select name from `tabSalary Slip`
where start_date = %s and end_date = %s and docstatus != 2
and employee = %s and name != %s""",
(self.start_date, self.end_date, self.employee, self.name))
if ret_exist:
self.employee = ''
frappe.throw(_("Salary Slip of employee {0} already created for this period").format(self.employee))
else:
for data in self.timesheets:
if frappe.db.get_value('Timesheet', data.time_sheet, 'status') == 'Payrolled':
frappe.throw(_("Salary Slip of employee {0} already created for time sheet {1}").format(self.employee, data.time_sheet))
def sum_components(self, component_type, total_field, precision):
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
if not relieving_date:
relieving_date = getdate(self.end_date)
if not joining_date:
frappe.throw(_("Please set the Date Of Joining for employee {0}").format(frappe.bold(self.employee_name)))
for d in self.get(component_type):
if (self.salary_structure and
cint(d.depends_on_lwp) and
(not
self.salary_slip_based_on_timesheet or
getdate(self.start_date) < joining_date or
getdate(self.end_date) > relieving_date
)):
d.amount = rounded(
(flt(d.default_amount, precision) * flt(self.payment_days)
/ cint(self.total_working_days)), self.precision("amount", component_type)
)
elif not self.payment_days and not self.salary_slip_based_on_timesheet and \
cint(d.depends_on_lwp):
d.amount = 0
elif not d.amount:
d.amount = d.default_amount
if not d.do_not_include_in_total:
self.set(total_field, self.get(total_field) + flt(d.amount, precision))
def calculate_net_pay(self):
if self.salary_structure:
self.calculate_component_amounts()
disable_rounded_total = cint(frappe.db.get_value("Global Defaults", None, "disable_rounded_total"))
precision = frappe.defaults.get_global_default("currency_precision")
self.total_deduction = 0
self.gross_pay = 0
self.sum_components('earnings', 'gross_pay', precision)
self.sum_components('deductions', 'total_deduction', precision)
self.set_loan_repayment()
self.net_pay = flt(self.gross_pay) - (flt(self.total_deduction) + flt(self.total_loan_repayment))
self.rounded_total = rounded(self.net_pay,
self.precision("net_pay") if disable_rounded_total else 0)
if self.net_pay < 0:
frappe.throw(_("Net Pay cannnot be negative"))
def set_loan_repayment(self):
self.set('loans', [])
self.total_loan_repayment = 0
self.total_interest_amount = 0
self.total_principal_amount = 0
for loan in self.get_loan_details():
self.append('loans', {
'loan': loan.name,
'total_payment': loan.total_payment,
'interest_amount': loan.interest_amount,
'principal_amount': loan.principal_amount,
'loan_account': loan.loan_account,
'interest_income_account': loan.interest_income_account
})
self.total_loan_repayment += loan.total_payment
self.total_interest_amount += loan.interest_amount
self.total_principal_amount += loan.principal_amount
def get_loan_details(self):
return frappe.db.sql("""select rps.principal_amount, rps.interest_amount, l.name,
rps.total_payment, l.loan_account, l.interest_income_account
from
`tabRepayment Schedule` as rps, `tabLoan` as l
where
l.name = rps.parent and rps.payment_date between %s and %s and
l.repay_from_salary = 1 and l.docstatus = 1 and l.applicant = %s""",
(self.start_date, self.end_date, self.employee), as_dict=True) or []
def on_submit(self):
if self.net_pay < 0:
frappe.throw(_("Net Pay cannot be less than 0"))
else:
self.set_status()
self.update_status(self.name)
self.update_salary_slip_in_additional_salary()
if (frappe.db.get_single_value("HR Settings", "email_salary_slip_to_employee")) and not frappe.flags.via_payroll_entry:
self.email_salary_slip()
def on_cancel(self):
self.set_status()
self.update_status()
self.update_salary_slip_in_additional_salary()
def on_trash(self):
from frappe.model.naming import revert_series_if_last
revert_series_if_last(self.series, self.name)
def update_salary_slip_in_additional_salary(self):
salary_slip = self.name if self.docstatus==1 else None
frappe.db.sql("""
update `tabAdditional Salary` set salary_slip=%s
where employee=%s and payroll_date between %s and %s and docstatus=1
""", (salary_slip, self.employee, self.start_date, self.end_date))
def email_salary_slip(self):
receiver = frappe.db.get_value("Employee", self.employee, "prefered_email")
if receiver:
email_args = {
"recipients": [receiver],
"message": _("Please see attachment"),
"subject": 'Salary Slip - from {0} to {1}'.format(self.start_date, self.end_date),
"attachments": [frappe.attach_print(self.doctype, self.name, file_name=self.name)],
"reference_doctype": self.doctype,
"reference_name": self.name
}
if not frappe.flags.in_test:
enqueue(method=frappe.sendmail, queue='short', timeout=300, is_async=True, **email_args)
else:
frappe.sendmail(**email_args)
else:
msgprint(_("{0}: Employee email not found, hence email not sent").format(self.employee_name))
def update_status(self, salary_slip=None):
for data in self.timesheets:
if data.time_sheet:
timesheet = frappe.get_doc('Timesheet', data.time_sheet)
timesheet.salary_slip = salary_slip
timesheet.flags.ignore_validate_update_after_submit = True
timesheet.set_status()
timesheet.save()
def set_status(self, status=None):
'''Get and update status'''
if not status:
status = self.get_status()
self.db_set("status", status)
def get_status(self):
if self.docstatus == 0:
status = "Draft"
elif self.docstatus == 1:
status = "Submitted"
elif self.docstatus == 2:
status = "Cancelled"
return status
def calculate_variable_based_on_taxable_salary(self, tax_component):
payroll_period = get_payroll_period(self.start_date, self.end_date, self.company)
if not payroll_period:
frappe.msgprint(_("Start and end dates not in a valid Payroll Period, cannot calculate {0}.")
.format(tax_component))
return False
if payroll_period.end_date <= getdate(self.end_date):
if not self.deduct_tax_for_unsubmitted_tax_exemption_proof or not\
self.deduct_tax_for_unclaimed_employee_benefits:
frappe.throw(_("You have to Deduct Tax for Unsubmitted Tax Exemption Proof and Unclaimed \
Employee Benefits in the last Salary Slip of Payroll Period"))
# calc prorata tax to be applied
return self.calculate_variable_tax(tax_component, payroll_period)
def calculate_variable_tax(self, tax_component, payroll_period):
annual_taxable_earning, period_factor = 0, 0
pro_rata_tax_paid, additional_tax_paid, benefit_tax_paid = 0, 0, 0
unclaimed_earning, unclaimed_benefit, additional_income = 0, 0, 0
# get taxable_earning, additional_income in this slip
taxable_earning = self.get_taxable_earnings()
if self.deduct_tax_for_unclaimed_employee_benefits:
# get all untaxed benefits till date, pass amount to be taxed by later methods
unclaimed_benefit = self.calculate_unclaimed_taxable_benefit(payroll_period)
# flexi's excluded from monthly tax, add flexis in this slip to unclaimed_benefit
unclaimed_benefit += self.get_taxable_earnings(only_flexi=True)["taxable_earning"]
if self.deduct_tax_for_unsubmitted_tax_exemption_proof:
# do not consider exemption, calc tax to be paid for the period till date
# considering prorata taxes paid and proofs submitted
unclaimed_earning = self.calculate_unclaimed_taxable_earning(payroll_period, tax_component)
earning_in_period = taxable_earning["taxable_earning"] + unclaimed_earning
period_factor = self.get_period_factor(payroll_period.start_date, payroll_period.end_date,
payroll_period.start_date, self.end_date)
annual_taxable_earning = earning_in_period * period_factor
additional_income += self.get_total_additional_income(payroll_period.start_date)
else:
# consider exemption declaration, find annual_earning by monthly taxable salary
period_factor = self.get_period_factor(payroll_period.start_date, payroll_period.end_date)
annual_earning = taxable_earning["taxable_earning"] * period_factor
exemption_amount = 0
if frappe.db.exists("Employee Tax Exemption Declaration", {"employee": self.employee,
"payroll_period": payroll_period.name, "docstatus": 1}):
exemption_amount = frappe.db.get_value("Employee Tax Exemption Declaration",
{"employee": self.employee, "payroll_period": payroll_period.name, "docstatus": 1},
"total_exemption_amount")
annual_taxable_earning = annual_earning - exemption_amount
if self.deduct_tax_for_unclaimed_employee_benefits or self.deduct_tax_for_unsubmitted_tax_exemption_proof:
tax_detail = self.get_tax_paid_in_period(payroll_period, tax_component)
if tax_detail:
pro_rata_tax_paid = tax_detail["total_tax_paid"] - tax_detail["additional_tax"] - tax_detail["benefit_tax"]
additional_tax_paid = tax_detail["additional_tax"]
benefit_tax_paid = tax_detail["benefit_tax"]
# add any additional income in this slip
additional_income += taxable_earning["additional_income"]
args = {"payroll_period": payroll_period.name, "tax_component": tax_component,
"annual_taxable_earning": annual_taxable_earning, "period_factor": period_factor,
"unclaimed_benefit": unclaimed_benefit, "additional_income": additional_income,
"pro_rata_tax_paid": pro_rata_tax_paid, "benefit_tax_paid": benefit_tax_paid,
"additional_tax_paid": additional_tax_paid}
return self.calculate_tax(args)
def calculate_unclaimed_taxable_benefit(self, payroll_period):
total_benefit, total_benefit_claim = 0, 0
# get total sum of benefits paid
sum_benefit = frappe.db.sql("""select sum(sd.amount) from `tabSalary Detail` sd join
`tabSalary Slip` ss on sd.parent=ss.name where sd.parentfield='earnings'
and sd.is_tax_applicable=1 and is_flexible_benefit=1 and ss.docstatus=1
and ss.employee='{0}' and ss.start_date between '{1}' and '{2}' and
ss.end_date between '{1}' and '{2}'""".format(self.employee,
payroll_period.start_date, self.start_date))
if sum_benefit and sum_benefit[0][0]:
total_benefit = sum_benefit[0][0]
# get total benefits claimed
sum_benefit_claim = frappe.db.sql("""select sum(claimed_amount) from
`tabEmployee Benefit Claim` where docstatus=1 and employee='{0}' and claim_date
between '{1}' and '{2}'""".format(self.employee, payroll_period.start_date, self.end_date))
if sum_benefit_claim and sum_benefit_claim[0][0]:
total_benefit_claim = sum_benefit_claim[0][0]
return total_benefit - total_benefit_claim
def calculate_unclaimed_taxable_earning(self, payroll_period, tax_component):
total_taxable_earning, total_exemption_amount = 0, 0
# calc total taxable amount in period
sum_taxable_earning = frappe.db.sql("""select sum(sd.amount) from `tabSalary Detail` sd join
`tabSalary Slip` ss on sd.parent=ss.name where sd.parentfield='earnings'
and sd.is_tax_applicable=1 and is_additional_component=0 and is_flexible_benefit=0
and ss.docstatus=1 and ss.employee='{0}' and ss.start_date between '{1}' and '{2}'
and ss.end_date between '{1}' and '{2}'""".format(self.employee,
payroll_period.start_date, self.start_date))
if sum_taxable_earning and sum_taxable_earning[0][0]:
total_taxable_earning = sum_taxable_earning[0][0]
# add up total Proof Submission
sum_exemption = frappe.db.sql("""select sum(exemption_amount) from
`tabEmployee Tax Exemption Proof Submission` where docstatus=1 and employee='{0}' and
payroll_period='{1}' and submission_date between '{2}' and '{3}'""".format(self.employee,
payroll_period.name, payroll_period.start_date, self.end_date))
if sum_exemption and sum_exemption[0][0]:
total_exemption_amount = sum_exemption[0][0]
total_taxable_earning -= total_exemption_amount
return total_taxable_earning
def get_total_additional_income(self, from_date):
total_additional_pay = 0
sum_additional_earning = frappe.db.sql("""select sum(sd.amount) from `tabSalary Detail` sd join
`tabSalary Slip` ss on sd.parent=ss.name where sd.parentfield='earnings'
and sd.is_tax_applicable=1 and is_additional_component=1 and is_flexible_benefit=0
and ss.docstatus=1 and ss.employee='{0}' and ss.start_date between '{1}' and '{2}'
and ss.end_date between '{1}' and '{2}'""".format(self.employee,
from_date, self.start_date))
if sum_additional_earning and sum_additional_earning[0][0]:
total_additional_pay = sum_additional_earning[0][0]
return total_additional_pay
def get_tax_paid_in_period(self, payroll_period, tax_component, only_total=False):
# find total_tax_paid, tax paid for benefit, additional_salary
sum_tax_paid = frappe.db.sql("""select sum(sd.amount), sum(tax_on_flexible_benefit),
sum(tax_on_additional_salary) from `tabSalary Detail` sd join `tabSalary Slip`
ss on sd.parent=ss.name where sd.parentfield='deductions' and sd.salary_component='{3}'
and sd.variable_based_on_taxable_salary=1 and ss.docstatus=1 and ss.employee='{0}'
and ss.start_date between '{1}' and '{2}' and ss.end_date between '{1}' and
'{2}'""".format(self.employee, payroll_period.start_date, self.start_date, tax_component))
if sum_tax_paid and sum_tax_paid[0][0]:
return {'total_tax_paid': sum_tax_paid[0][0], 'benefit_tax':sum_tax_paid[0][1], 'additional_tax': sum_tax_paid[0][2]}
def get_taxable_earnings(self, include_flexi=0, only_flexi=0):
taxable_earning = 0
additional_income = 0
for earning in self.earnings:
if earning.is_tax_applicable:
if earning.is_additional_component:
additional_income += earning.amount
continue
if only_flexi:
if earning.is_tax_applicable and earning.is_flexible_benefit:
taxable_earning += earning.amount
continue
if include_flexi:
if earning.is_tax_applicable or (earning.is_tax_applicable and earning.is_flexible_benefit):
taxable_earning += earning.amount
else:
if earning.is_tax_applicable and not earning.is_flexible_benefit:
taxable_earning += earning.amount
return {"taxable_earning": taxable_earning, "additional_income": additional_income}
def calculate_tax(self, args):
tax_amount, benefit_tax, additional_tax = 0, 0, 0
annual_taxable_earning = args.get("annual_taxable_earning")
benefit_to_tax = args.get("unclaimed_benefit")
additional_income = args.get("additional_income")
# Get tax calc by period
annual_tax = self.calculate_tax_by_tax_slab(args.get("payroll_period"), annual_taxable_earning)
# Calc prorata tax
tax_amount = annual_tax / args.get("period_factor")
# Benefit is a part of Salary Structure, add the tax diff, update annual_tax
if benefit_to_tax > 0:
annual_taxable_earning += benefit_to_tax
annual_tax_with_benefit_income = self.calculate_tax_by_tax_slab(
args.get("payroll_period"), annual_taxable_earning)
benefit_tax = annual_tax_with_benefit_income - annual_tax - args.get("benefit_tax_paid")
tax_amount += benefit_tax
annual_tax = annual_tax_with_benefit_income
# find the annual tax diff caused by additional_income, add to tax_amount
if additional_income > 0:
annual_tax_with_additional_income = self.calculate_tax_by_tax_slab(
args.get("payroll_period"), annual_taxable_earning + additional_income)
additional_tax = annual_tax_with_additional_income - annual_tax - args.get("additional_tax_paid")
tax_amount += additional_tax
# less paid taxes
if args.get("pro_rata_tax_paid"):
tax_amount -= args.get("pro_rata_tax_paid")
struct_row = self.get_salary_slip_row(args.get("tax_component"))
return [struct_row, tax_amount, benefit_tax, additional_tax]
def calculate_tax_by_tax_slab(self, payroll_period, annual_earning):
payroll_period_obj = frappe.get_doc("Payroll Period", payroll_period)
data = self.get_data_for_eval()
taxable_amount = 0
for slab in payroll_period_obj.taxable_salary_slabs:
if slab.condition and not self.eval_tax_slab_condition(slab.condition, data):
continue
if not slab.to_amount and annual_earning > slab.from_amount:
taxable_amount += (annual_earning - slab.from_amount) * slab.percent_deduction *.01
continue
if annual_earning > slab.from_amount and annual_earning < slab.to_amount:
taxable_amount += (annual_earning - slab.from_amount) * slab.percent_deduction *.01
elif annual_earning > slab.from_amount and annual_earning > slab.to_amount:
taxable_amount += (slab.to_amount - slab.from_amount) * slab.percent_deduction * .01
return taxable_amount
def eval_tax_slab_condition(self, condition, data):
try:
condition = condition.strip()
if condition:
return frappe.safe_eval(condition, self.whitelisted_globals, data)
except NameError as err:
frappe.throw(_("Name error: {0}".format(err)))
except SyntaxError as err:
frappe.throw(_("Syntax error in condition: {0}".format(err)))
except Exception as e:
frappe.throw(_("Error in formula or condition: {0}".format(e)))
raise
def get_period_factor(self, period_start, period_end, start_date=None, end_date=None):
# TODO if both deduct checked update the factor to make tax consistent
payroll_days = date_diff(period_end, period_start) + 1
if start_date and end_date:
salary_days = date_diff(end_date, start_date) + 1
return flt(payroll_days)/flt(salary_days)
# if period configured for a year and monthly frequency return 12 to make tax calc consistent
if 360 <= payroll_days <= 370 and self.payroll_frequency == "Monthly":
return 12
salary_days = date_diff(self.end_date, self.start_date) + 1
return flt(payroll_days)/flt(salary_days)
def get_salary_slip_row(self, salary_component):
component = frappe.get_doc("Salary Component", salary_component)
# Data for update_component_row
struct_row = {}
struct_row['depends_on_lwp'] = component.depends_on_lwp
struct_row['salary_component'] = component.name
struct_row['abbr'] = component.salary_component_abbr
struct_row['do_not_include_in_total'] = component.do_not_include_in_total
struct_row['is_tax_applicable'] = component.is_tax_applicable
struct_row['is_flexible_benefit'] = component.is_flexible_benefit
struct_row['variable_based_on_taxable_salary'] = component.variable_based_on_taxable_salary
return struct_row
def unlink_ref_doc_from_salary_slip(ref_no):
linked_ss = frappe.db.sql_list("""select name from `tabSalary Slip`
where journal_entry=%s and docstatus < 2""", (ref_no))
if linked_ss:
for ss in linked_ss:
ss_doc = frappe.get_doc("Salary Slip", ss)
frappe.db.set_value("Salary Slip", ss_doc.name, "journal_entry", "")
| gpl-3.0 | 4,243,228,669,414,350,300 | 42.415212 | 176 | 0.711881 | false |
SISC2014/JobAnalysis | MongoRetrieval/src/EfficiencyHistogram.py | 1 | 6076 | '''
Created on Jun 19, 2014
@author: Erik Halperin
List of Keys
_id
JobStartDate
Requirements
TransferInput
TotalSuspensions
LastJobStatus
BufferBlockSize
OrigMaxHosts
RequestMemory
WantRemoteSyscalls
LastHoldReasonCode
ExitStatus
Args
JobFinishedHookDone
JobCurrentStartDate
CompletionDate
JobLeaseDuration
Err
RemoteWallClockTime
JobUniverse
RequestCpus
RemoveReason
StreamErr
Rank
WantRemoteIO
LocalSysCpu
UsedOCWrapper
CumulativeSlotTime
TransferIn
MachineAttrCpus0
CondorPlatform
CurrentTime
ExitReason
StreamOut
WantCheckpoint
GlobalJobId
TransferInputSizeMB
JobStatus
LastPublicClaimId
MemoryUsage
NumSystemHolds
TransferOutput
PeriodicRemove
NumShadowStarts
LastHoldReasonSubCode
LastSuspensionTime
ShouldTransferFiles
QDate
RemoteSysCpu
ImageSize_RAW
LastRemoteHost
CondorVersion
DiskUsage_RAW
PeriodicRelease
NumCkpts_RAW
JobCurrentStartExecutingDate
ProjectName
CoreSize
RemoteUserCpu
BytesSent
Owner
BytesRecvd
ExitCode
NumJobStarts
ExecutableSize_RAW
Notification
ExecutableSize
Environment
StartdPrincipal
RootDir
MinHosts
CumulativeSuspensionTime
JOBGLIDEIN_ResourceName
ProcId
MATCH_EXP_JOBGLIDEIN_ResourceName
OnExitRemove
User
UserLog
CommittedSuspensionTime
NumRestarts
JobCoreDumped
Cmd
NumJobMatches
DiskUsage
LastRemotePool
CommittedSlotTime
ResidentSetSize
WhenToTransferOutput
ExitBySignal
Out
RequestDisk
ImageSize
NumCkpts
LastJobLeaseRenewal
MachineAttrSlotWeight0
ResidentSetSize_RAW
JobPrio
JobRunCount
PeriodicHold
ClusterId
NiceUser
MyType
LocalUserCpu
BufferSize
LastHoldReason
CurrentHosts
LeaveJobInQueue
OnExitHold
EnteredCurrentStatus
MaxHosts
CommittedTime
LastMatchTime
In
JobNotification
'''
import re
import matplotlib.pyplot as plt
from pymongo import MongoClient
#takes a list of dictionaries and returns a list of floats
def parseList(l):
l = map(str, l)
newlist = []
for k in l:
newlist.append(re.sub('[RemoteWallClockTimeUsrpu_id\"\'{}: ]', '', k))
newlist = map(float, newlist)
return list(newlist)
#returns a list of dictionaries
#item is from list of keys, username: "[email protected]", cluster: "123456", site: "phys.ucconn.edu",
#coll: MongoDB collection
#username/cluster/site may be None, in which case they will not be used
#item should be _id
def dbFindItemFromUser(item, username, cluster, site, coll):
mylist = []
rgx = "$regex"
if(username != None):
username = '\"' + username + '\"'
dicU = {'User': username }
else:
dicU = {}
if(cluster != None):
dicC = { 'ClusterId': cluster }
else:
dicC = {}
if(site != None):
dicS = { 'LastRemoteHost': { rgx: site } }
else:
dicS = {}
dicU.update(dicC)
dicU.update(dicS)
pr = { item: 1, '_id': 0 }
for condor_history in coll.find(dicU, pr):
mylist.append(condor_history)
return mylist
#returns a list of dictionaries
#username and coll are same as above
def dbFindIdFromUser(username, coll):
mylist = []
username = '\"' + username + '\"'
cr = { 'User': username }
pr = { '_id': 1 }
for condor_history in coll.find(cr, pr):
mylist.append(condor_history)
return mylist
#creates a scatterplot of two items
def plotScatter(item1, item2, username, cluster, coll, xlab, ylab, title):
lst1 = parseList(dbFindItemFromUser(item1, username, cluster, coll))
lst2 = parseList(dbFindItemFromUser(item2, username, cluster, coll))
plt.plot(lst1, lst2, 'bo')
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.title(title)
plt.show()
#creates a histogram of a list
#l: list to plot, bs: number of bins
def plotHist(l, bs, xlab, ylab, title):
plt.hist(l, bins=bs)
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.show()
def getEfficiency(username, cluster, site, coll):
ruc = parseList(dbFindItemFromUser("RemoteUserCpu", username, cluster, site, coll))
rwct = parseList(dbFindItemFromUser("RemoteWallClockTime", username, cluster, site, coll))
efflist = []
totcount = 0
goodcount = 0 #certain efficiency values are >1 due to a condor error. these values are discarded
zerocount = 0 #testing possible condor bug where RemoteUserCpu is 0 but RemoteWallClockTime is quite large
for x,y in zip(ruc, rwct):
if(y == 0):
totcount += 1
elif(x/y > 1):
totcount += 1
else:
if(x == 0):
zerocount +=1
efflist.append(x/y)
totcount += 1
goodcount +=1
return [efflist, goodcount, totcount]
#Given at least one input for username/cluster/site, creates a histogram of the RemoteUserCpu/RemoteWallClockTime for the results
def efficiencyHistogram(username, cluster, site, coll, bins, xlab, ylab, title):
retlist = getEfficiency(username, cluster, site, coll) #0: efflist, 1: goodcount, 2: totcount
print("Jobs Plotted:", retlist[1], "/", retlist[2])
plotHist(retlist[0], bins, xlab, ylab, title)
def fourEffHists(lst1, lst2, lst3, lst4, lab1, lab2, lab3, lab4, bs, xlab, ylab, title):
plt.hist(lst1, bins=bs, histtype='stepfilled', label=lab1)
plt.hist(lst2, bins=bs, histtype='stepfilled', label=lab2)
plt.hist(lst3, bins=bs, histtype='stepfilled', label=lab3)
plt.hist(lst4, bins=bs, histtype='stepfilled', label=lab4)
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.legend()
plt.show()
def mainEH(host, port):
client = MongoClient(host, port)
db = client.condor_history
coll = db.history_records
#sites: uc.mwt2.org, phys.uconn.edu, hpc.smu.edu, usatlas.bnl.gov
#names (@login01.osgconnect.net): lfzhao, sthapa, echism, wcatino, bamitchell
str_name = "[email protected]"
efficiencyHistogram(str_name, None, None, coll, 75, "UserCPU/WallClockTime", "Frequency", "Efficiencies for " + str_name)
mainEH('mc.mwt2.org', 27017) | mit | 4,049,639,096,715,589,000 | 22.46332 | 129 | 0.697334 | false |
openprocurement/openprocurement.edge | openprocurement/edge/views/auctions.py | 1 | 7066 | # -*- coding: utf-8 -*-
from functools import partial
from openprocurement.edge.utils import (
context_unpack,
decrypt,
encrypt,
APIResource,
json_view
)
from openprocurement.edge.utils import eaopresource
from openprocurement.edge.design import (
by_dateModified_view_ViewDefinition,
real_by_dateModified_view_ViewDefinition,
test_by_dateModified_view_ViewDefinition,
by_local_seq_view_ViewDefinition,
real_by_local_seq_view_ViewDefinition,
test_by_local_seq_view_ViewDefinition,
)
from openprocurement.edge.design import AUCTION_FIELDS as FIELDS
VIEW_MAP = {
u'': real_by_dateModified_view_ViewDefinition('auctions'),
u'test': test_by_dateModified_view_ViewDefinition('auctions'),
u'_all_': by_dateModified_view_ViewDefinition('auctions'),
}
CHANGES_VIEW_MAP = {
u'': real_by_local_seq_view_ViewDefinition('auctions'),
u'test': test_by_local_seq_view_ViewDefinition('auctions'),
u'_all_': by_local_seq_view_ViewDefinition('auctions'),
}
FEED = {
u'dateModified': VIEW_MAP,
u'changes': CHANGES_VIEW_MAP,
}
@eaopresource(name='Auctions',
path='/auctions',
description="Open Contracting compatible data exchange format. See http://ocds.open-contracting.org/standard/r/master/#auction for more info")
class AuctionsResource(APIResource):
def __init__(self, request, context):
super(AuctionsResource, self).__init__(request, context)
self.server = request.registry.couchdb_server
self.update_after = request.registry.update_after
@json_view()
def get(self):
"""Auctions List
Get Auctions List
----------------
Example request to get auctions list:
.. sourcecode:: http
GET /auctions HTTP/1.1
Host: example.com
Accept: application/json
This is what one should expect in response:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"data": [
{
"id": "64e93250be76435397e8c992ed4214d1",
"dateModified": "2014-10-27T08:06:58.158Z"
}
]
}
"""
# http://wiki.apache.org/couchdb/HTTP_view_API#Querying_Options
params = {}
pparams = {}
fields = self.request.params.get('opt_fields', '')
if fields:
params['opt_fields'] = fields
pparams['opt_fields'] = fields
fields = fields.split(',')
view_fields = fields + ['dateModified', 'id']
limit = self.request.params.get('limit', '')
if limit:
params['limit'] = limit
pparams['limit'] = limit
limit = int(limit) if limit.isdigit() and 1000 >= int(limit) > 0 else 100
descending = bool(self.request.params.get('descending'))
offset = self.request.params.get('offset', '')
if descending:
params['descending'] = 1
else:
pparams['descending'] = 1
feed = self.request.params.get('feed', '')
view_map = FEED.get(feed, VIEW_MAP)
changes = view_map is CHANGES_VIEW_MAP
if feed and feed in FEED:
params['feed'] = feed
pparams['feed'] = feed
mode = self.request.params.get('mode', '')
if mode and mode in view_map:
params['mode'] = mode
pparams['mode'] = mode
view_limit = limit + 1 if offset else limit
if changes:
if offset:
view_offset = decrypt(self.server.uuid, self.db.name, offset)
if view_offset and view_offset.isdigit():
view_offset = int(view_offset)
else:
self.request.errors.add('params', 'offset', 'Offset expired/invalid')
self.request.errors.status = 404
return
if not offset:
view_offset = 'now' if descending else 0
else:
if offset:
view_offset = offset
else:
view_offset = '9' if descending else ''
list_view = view_map.get(mode, view_map[u''])
if self.update_after:
view = partial(list_view, self.db, limit=view_limit, startkey=view_offset, descending=descending, stale='update_after')
else:
view = partial(list_view, self.db, limit=view_limit, startkey=view_offset, descending=descending)
if fields:
if not changes and set(fields).issubset(set(FIELDS)):
results = [
(dict([(i, j) for i, j in x.value.items() + [('id', x.id), ('dateModified', x.key)] if i in view_fields]), x.key)
for x in view()
]
elif changes and set(fields).issubset(set(FIELDS)):
results = [
(dict([(i, j) for i, j in x.value.items() + [('id', x.id)] if i in view_fields]), x.key)
for x in view()
]
elif fields:
self.LOGGER.info('Used custom fields for auctions list: {}'.format(','.join(sorted(fields))),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_list_custom'}))
results = [
(dict([(k, j) for k, j in i[u'doc'].items() if k in view_fields]), i.key)
for i in view(include_docs=True)
]
else:
results = [
({'id': i.id, 'dateModified': i.value['dateModified']} if changes else {'id': i.id, 'dateModified': i.key}, i.key)
for i in view()
]
if results:
params['offset'], pparams['offset'] = results[-1][1], results[0][1]
if offset and view_offset == results[0][1]:
results = results[1:]
elif offset and view_offset != results[0][1]:
results = results[:limit]
params['offset'], pparams['offset'] = results[-1][1], view_offset
results = [i[0] for i in results]
if changes:
params['offset'] = encrypt(self.server.uuid, self.db.name, params['offset'])
pparams['offset'] = encrypt(self.server.uuid, self.db.name, pparams['offset'])
else:
params['offset'] = offset
pparams['offset'] = offset
data = {
'data': results,
'next_page': {
"offset": params['offset'],
"path": self.request.route_path('Auctions', _query=params),
"uri": self.request.route_url('Auctions', _query=params)
}
}
if descending or offset:
data['prev_page'] = {
"offset": pparams['offset'],
"path": self.request.route_path('Auctions', _query=pparams),
"uri": self.request.route_url('Auctions', _query=pparams)
}
return data
| apache-2.0 | -6,018,885,631,503,351,000 | 37.612022 | 154 | 0.538353 | false |
IhToN/DAW1-PRG | Ejercicios/SeguTrim/Objetos/Punto.py | 1 | 4377 | """
Clase Punto
coord x
coord y
suma(punto)
resta(punto)
Clase Traza
instancias Punto en una Lista
añadir punto
comparar dos trazas (dos trazas serán iguales si sus puntos son iguales)
"""
import math
import turtle
class Punto:
def __init__(self, x=0.0, y=0.0):
self.x = float(x)
self.y = float(y)
def __str__(self):
return "Punto({}, {})".format(self.x, self.y)
def __eq__(self, other):
return (self.x, self.y) == (other.x, other.y)
def suma(self, punto):
""" Devuelve la suma vectorial del punto con otro
"""
return Punto(self.x + punto.x, self.y + punto.y)
def resta(self, punto):
""" Devuelve la resta vectorial del punto con otro
"""
return self.suma(-punto)
def distancia(self, punto):
""" Devuelve la distancia que hay entre un punto y otro
"""
return math.hypot(self.x - punto.x, self.y - punto.y)
class Traza:
def __init__(self, *args):
self.trazado = []
self.i = -1
for arg in args:
if isinstance(arg, Punto):
self.trazado.append(arg)
else:
raise ValueError(arg, "no es un punto.")
def __str__(self):
out = ""
for punto in self.trazado:
out += str(punto) + " "
return out
def __eq__(self, other):
return self.trazado == other.trazado
def __next__(self):
self.i += 1
if self.i < len(self.trazado):
return self.trazado[self.i]
else:
raise StopIteration
def __iter__(self):
return self
def add_punto(self, punto):
""" Añade un punto nuevo a la Traza
"""
if isinstance(punto, Punto):
self.trazado.append(punto)
else:
raise ValueError("¡Ioputa, que en las trazas sólo puede haber puntos y no cosas raras!")
def longitud_traza(self):
""" Devuelve la suma de la distancia entre todos los puntos de la traza
"""
ret = 0
for p in range(len(self.trazado) - 1):
ret += self.trazado[p].distancia(self.trazado[p + 1])
return ret
def dump_traza(self, fichero='traza.txt'):
""" Guardamos la traza en un fichero de trazas
"""
fichero = open(fichero, 'w', encoding="utf-8")
for punto in self.trazado:
fichero.write("{},{}\n".format(punto.x, punto.y))
fichero.close()
def load_traza(self, fichero):
try:
fichero = open(fichero, encoding="utf-8")
self.trazado = []
for linea in fichero:
if linea != "":
punto = linea.split(",")
self.add_punto(Punto(punto[0].strip(), punto[1].strip()))
except FileNotFoundError:
print("No existe el fichero.")
def dibuja(self):
tortuga = self.turtle
tortuga.down()
for punto in self.trazado:
tortuga.setpos(punto.x, punto.y)
tortuga.up()
def toggle_capture(self):
"""Activamos o desactivamos el modo captura, según toque"""
self.capture_mode = not self.capture_mode
if not self.capture_mode:
self.turtle.reset()
self.turtle.up()
self.turtle.setpos(self.trazado[0].x, self.trazado[0].y)
self.dibuja()
fichero = self.screen.textinput("Guardar Traza", "Dime el nombre del fichero:")
self.dump_traza(fichero + ".txt")
print(self)
def move_turtle(self, x, y):
"""Si estamos en modo captura, movemos la tortuga y vamos guardando los puntos"""
tortuga = self.turtle
if self.capture_mode:
tortuga.setheading(tortuga.towards(x, y))
tortuga.setpos(x, y)
self.add_punto(Punto(x, y))
def test():
p = Punto(3, 0)
k = Punto(0, 4)
tr = Traza(p, k)
print(tr)
tr.dump_traza("traza.txt")
tr.load_traza("traza.txt")
print(tr)
s = turtle.Screen()
t = turtle.Turtle()
tr.turtle = t
tr.screen = s
tr.capture_mode = False
s.onkey(tr.toggle_capture, 'space')
s.onclick(tr.move_turtle)
s.listen()
tr.dibuja()
turtle.done()
tr.dump_traza("traza.txt")
test()
| apache-2.0 | -7,849,022,810,489,935,000 | 25.815951 | 100 | 0.538778 | false |
pmediano/ComputationalNeurodynamics | Fall2016/Exercise_1/Solutions/IzNeuronRK4.py | 1 | 1897 | """
Computational Neurodynamics
Exercise 1
Simulates Izhikevich's neuron model using the Runge-Kutta 4 method.
Parameters for regular spiking, fast spiking and bursting
neurons extracted from:
http://www.izhikevich.org/publications/spikes.htm
(C) Murray Shanahan et al, 2016
"""
import numpy as np
import matplotlib.pyplot as plt
# Create time points
Tmin = 0
Tmax = 200 # Simulation time
dt = 0.01 # Step size
T = np.arange(Tmin, Tmax+dt, dt)
# Base current
I = 10
## Parameters of Izhikevich's model (regular spiking)
a = 0.02
b = 0.2
c = -65
d = 8
## Parameters of Izhikevich's model (fast spiking)
# a = 0.02
# b = 0.25
# c = -65
# d = 2
## Parameters of Izhikevich's model (bursting)
# a = 0.02
# b = 0.2
# c = -50
# d = 2
## Make a state vector that has a (v, u) pair for each timestep
s = np.zeros((len(T), 2))
## Initial values
s[0, 0] = -65
s[0, 1] = -1
# Note that s1[0] is v, s1[1] is u. This is Izhikevich equation in vector form
def s_dt(s1, I):
v_dt = 0.04*(s1[0]**2) + 5*s1[0] + 140 - s1[1] + I
u_dt = a*(b*s1[0] - s1[1])
return np.array([v_dt, u_dt])
## SIMULATE
for t in range(len(T)-1):
# Calculate the four constants of Runge-Kutta method
k_1 = s_dt(s[t], I)
k_2 = s_dt(s[t] + 0.5*dt*k_1, I)
k_3 = s_dt(s[t] + 0.5*dt*k_2, I)
k_4 = s_dt(s[t] + dt*k_3, I)
s[t+1] = s[t] + (1.0/6)*dt*(k_1 + 2*k_2 + 2*k_3 + k_4)
# Reset the neuron if it has spiked
if s[t+1, 0] >= 30:
s[t, 0] = 30 # Add a Dirac pulse for visualisation
s[t+1, 0] = c # Reset to resting potential
s[t+1, 1] += d # Update recovery variable
v = s[:, 0]
u = s[:, 1]
## Plot the membrane potential
plt.subplot(211)
plt.plot(T, v)
plt.xlabel('Time (ms)')
plt.ylabel('Membrane potential v (mV)')
plt.title('Izhikevich Neuron')
# Plot the reset variable
plt.subplot(212)
plt.plot(T, u)
plt.xlabel('Time (ms)')
plt.ylabel('Reset variable u')
plt.show()
| gpl-3.0 | 194,822,784,332,023,800 | 19.397849 | 78 | 0.618345 | false |
jkettleb/iris | lib/iris/tests/test_cube_to_pp.py | 1 | 15153 | # (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import os
import tempfile
import mock
import numpy as np
import iris
import iris.coords
import iris.coord_systems
import iris.fileformats.pp as ff_pp
from iris.fileformats.pp import PPField3
import iris.io
import iris.unit
import iris.tests.pp as pp
import iris.util
import iris.tests.stock as stock
def itab_callback(cube, field, filename):
cube.add_aux_coord(iris.coords.AuxCoord([field.lbrel], long_name='MOUMHeaderReleaseNumber', units='no_unit'))
cube.add_aux_coord(iris.coords.AuxCoord([field.lbexp], long_name='ExperimentNumber(ITAB)', units='no_unit'))
@tests.skip_data
class TestPPSave(tests.IrisTest, pp.PPTest):
def test_no_forecast_time(self):
cube = stock.lat_lon_cube()
coord = iris.coords.DimCoord(np.array([24], dtype=np.int64),
standard_name='time',
units='hours since epoch')
cube.add_aux_coord(coord)
self.assertCML(cube, ['cube_to_pp', 'no_forecast_time.cml'])
reference_txt_path = tests.get_result_path(('cube_to_pp', 'no_forecast_time.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=cube) as temp_pp_path:
iris.save(cube, temp_pp_path)
def test_no_forecast_period(self):
cube = stock.lat_lon_cube()
# Add a bounded scalar time coord and a forecast_reference_time.
time_coord = iris.coords.DimCoord(
10.958333, standard_name='time',
units='days since 2013-05-10 12:00',
bounds=[10.916667, 11.0])
cube.add_aux_coord(time_coord)
forecast_reference_time = iris.coords.DimCoord(
2.0, standard_name='forecast_reference_time',
units='weeks since 2013-05-07')
cube.add_aux_coord(forecast_reference_time)
self.assertCML(cube, ['cube_to_pp', 'no_forecast_period.cml'])
reference_txt_path = tests.get_result_path(('cube_to_pp',
'no_forecast_period.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=cube) as \
temp_pp_path:
iris.save(cube, temp_pp_path)
def test_pp_save_rules(self):
# Test pp save rules without user rules.
#read
in_filename = tests.get_data_path(('PP', 'simple_pp', 'global.pp'))
cubes = iris.load(in_filename, callback=itab_callback)
reference_txt_path = tests.get_result_path(('cube_to_pp', 'simple.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=cubes) as temp_pp_path:
iris.save(cubes, temp_pp_path)
def test_user_pp_save_rules(self):
# Test pp save rules with user rules.
#create a user rules file
user_rules_filename = iris.util.create_temp_filename(suffix='.txt')
try:
with open(user_rules_filename, "wt") as user_rules_file:
user_rules_file.write("IF\ncm.standard_name == 'air_temperature'\nTHEN\npp.lbuser[3] = 9222")
iris.fileformats.pp.add_save_rules(user_rules_filename)
try:
#read pp
in_filename = tests.get_data_path(('PP', 'simple_pp', 'global.pp'))
cubes = iris.load(in_filename, callback=itab_callback)
reference_txt_path = tests.get_result_path(('cube_to_pp', 'user_rules.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=cubes) as temp_pp_path:
iris.save(cubes, temp_pp_path)
finally:
iris.fileformats.pp.reset_save_rules()
finally:
os.remove(user_rules_filename)
def test_pp_append_singles(self):
# Test pp append saving - single cubes.
# load 2 arrays of >2D cubes
cube = stock.simple_pp()
reference_txt_path = tests.get_result_path(('cube_to_pp', 'append_single.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=[cube, cube]) as temp_pp_path:
iris.save(cube, temp_pp_path) # Create file
iris.save(cube, temp_pp_path, append=True) # Append to file
reference_txt_path = tests.get_result_path(('cube_to_pp', 'replace_single.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=cube) as temp_pp_path:
iris.save(cube, temp_pp_path) # Create file
iris.save(cube, temp_pp_path) # Replace file
def test_pp_append_lists(self):
# Test PP append saving - lists of cubes.
# For each of the first four time-steps in the 4D cube,
# pull out the bottom two levels.
cube_4d = stock.realistic_4d()
cubes = [cube_4d[i, :2, :, :] for i in range(4)]
reference_txt_path = tests.get_result_path(('cube_to_pp', 'append_multi.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=cubes) as temp_pp_path:
iris.save(cubes[:2], temp_pp_path)
iris.save(cubes[2:], temp_pp_path, append=True)
reference_txt_path = tests.get_result_path(('cube_to_pp', 'replace_multi.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=cubes[2:]) as temp_pp_path:
iris.save(cubes[:2], temp_pp_path)
iris.save(cubes[2:], temp_pp_path)
def add_coords_to_cube_and_test(self, coord1, coord2):
# a wrapper for creating arbitrary 2d cross-sections and run pp-saving tests
dataarray = np.arange(16, dtype='>f4').reshape(4, 4)
cm = iris.cube.Cube(data=dataarray)
cm.add_dim_coord(coord1, 0)
cm.add_dim_coord(coord2, 1)
# TODO: This is the desired line of code...
# reference_txt_path = tests.get_result_path(('cube_to_pp', '%s.%s.pp.txt' % (coord1.name(), coord2.name())))
# ...but this is required during the CF change, to maintain the original filename.
coord1_name = coord1.name().replace("air_", "")
coord2_name = coord2.name().replace("air_", "")
reference_txt_path = tests.get_result_path(('cube_to_pp', '%s.%s.pp.txt' % (coord1_name, coord2_name)))
# test with name
with self.cube_save_test(reference_txt_path, reference_cubes=cm,
field_coords=[coord1.name(), coord2.name()]) as temp_pp_path:
iris.save(cm, temp_pp_path, field_coords=[coord1.name(), coord2.name()])
# test with coord
with self.cube_save_test(reference_txt_path, reference_cubes=cm,
field_coords=[coord1, coord2]) as temp_pp_path:
iris.save(cm, temp_pp_path, field_coords=[coord1, coord2])
def test_non_standard_cross_sections(self):
#ticket #1037, the five variants being dealt with are
# 'pressure.latitude',
# 'depth.latitude',
# 'eta.latitude',
# 'pressure.time',
# 'depth.time',
f = FakePPEnvironment()
self.add_coords_to_cube_and_test(
iris.coords.DimCoord(f.z, long_name='air_pressure', units='hPa', bounds=f.z_bounds),
iris.coords.DimCoord(f.y, standard_name='latitude', units='degrees', bounds=f.y_bounds, coord_system=f.geog_cs()))
self.add_coords_to_cube_and_test(
iris.coords.DimCoord(f.z, long_name='depth', units='m', bounds=f.z_bounds),
iris.coords.DimCoord(f.y, standard_name='latitude', units='degrees', bounds=f.y_bounds, coord_system=f.geog_cs()))
self.add_coords_to_cube_and_test(
iris.coords.DimCoord(f.z, long_name='eta', units='1', bounds=f.z_bounds),
iris.coords.DimCoord(f.y, standard_name='latitude', units='degrees', bounds=f.y_bounds, coord_system=f.geog_cs()))
self.add_coords_to_cube_and_test(
iris.coords.DimCoord(f.z, long_name='air_pressure', units='hPa', bounds=f.z_bounds),
iris.coords.DimCoord(f.y, standard_name='time', units=iris.unit.Unit('days since 0000-01-01 00:00:00', calendar=iris.unit.CALENDAR_360_DAY), bounds=f.y_bounds))
self.add_coords_to_cube_and_test(
iris.coords.DimCoord(f.z, standard_name='depth', units='m', bounds=f.z_bounds),
iris.coords.DimCoord(f.y, standard_name='time', units=iris.unit.Unit('days since 0000-01-01 00:00:00', calendar=iris.unit.CALENDAR_360_DAY), bounds=f.y_bounds))
def test_365_calendar_export(self):
# test for 365 day calendar export
cube = stock.simple_pp()
new_unit = iris.unit.Unit('hours since 1970-01-01 00:00:00',
calendar=iris.unit.CALENDAR_365_DAY)
cube.coord('time').units = new_unit
pp_field = mock.MagicMock(spec=PPField3)
iris.fileformats.pp._ensure_save_rules_loaded()
iris.fileformats.pp._save_rules.verify(cube, pp_field)
self.assertEqual(pp_field.lbtim.ic, 4)
class FakePPEnvironment(object):
''' fake a minimal PP environment for use in cross-section coords, as in PP save rules '''
y = [1, 2, 3, 4]
z = [111, 222, 333, 444]
y_bounds = [[0.9, 1.1], [1.9, 2.1], [2.9, 3.1], [3.9, 4.1]]
z_bounds = [[110.9, 111.1], [221.9, 222.1], [332.9, 333.1], [443.9, 444.1]]
def geog_cs(self):
"""Return a GeogCS for this PPField.
Returns:
A GeogCS with the appropriate earth shape, meridian and pole position.
"""
return iris.coord_systems.GeogCS(6371229.0)
class TestPPSaveRules(tests.IrisTest, pp.PPTest):
def test_default_coord_system(self):
GeogCS = iris.coord_systems.GeogCS
cube = iris.tests.stock.lat_lon_cube()
reference_txt_path = tests.get_result_path(('cube_to_pp',
'default_coord_system.txt'))
# Remove all coordinate systems.
for coord in cube.coords():
coord.coord_system = None
# Ensure no coordinate systems available.
self.assertIsNone(cube.coord_system(GeogCS))
self.assertIsNone(cube.coord_system(None))
with self.cube_save_test(reference_txt_path, reference_cubes=cube) as \
temp_pp_path:
# Save cube to PP with no coordinate system.
iris.save(cube, temp_pp_path)
pp_cube = iris.load_cube(temp_pp_path)
# Ensure saved cube has the default coordinate system.
self.assertIsInstance(pp_cube.coord_system(GeogCS),
iris.coord_systems.GeogCS)
self.assertIsNotNone(pp_cube.coord_system(None))
self.assertIsInstance(pp_cube.coord_system(None),
iris.coord_systems.GeogCS)
self.assertIsNotNone(pp_cube.coord_system())
self.assertIsInstance(pp_cube.coord_system(),
iris.coord_systems.GeogCS)
def lbproc_from_pp(self, filename):
# Gets the lbproc field from the ppfile
pp_file = iris.fileformats.pp.load(filename)
field = next(pp_file)
return field.lbproc
def test_pp_save_rules(self):
# Test single process flags
for _, process_desc in iris.fileformats.pp.LBPROC_PAIRS[1:]:
# Get basic cube and set process flag manually
ll_cube = stock.lat_lon_cube()
ll_cube.attributes["ukmo__process_flags"] = (process_desc,)
# Save cube to pp
temp_filename = iris.util.create_temp_filename(".pp")
iris.save(ll_cube, temp_filename)
# Check the lbproc is what we expect
self.assertEqual(self.lbproc_from_pp(temp_filename),
iris.fileformats.pp.lbproc_map[process_desc])
os.remove(temp_filename)
# Test mutiple process flags
multiple_bit_values = ((128, 64), (4096, 1024), (8192, 1024))
# Maps lbproc value to the process flags that should be created
multiple_map = {sum(bits) : [iris.fileformats.pp.lbproc_map[bit] for bit in bits] for bits in multiple_bit_values}
for lbproc, descriptions in multiple_map.iteritems():
ll_cube = stock.lat_lon_cube()
ll_cube.attributes["ukmo__process_flags"] = descriptions
# Save cube to pp
temp_filename = iris.util.create_temp_filename(".pp")
iris.save(ll_cube, temp_filename)
# Check the lbproc is what we expect
self.assertEqual(self.lbproc_from_pp(temp_filename), lbproc)
os.remove(temp_filename)
def test_lbvc(self):
cube = stock.realistic_4d_no_derived()[0, :4, ...]
v_coord = iris.coords.DimCoord(standard_name='depth',
units='m', points=[-5, -10, -15, -20])
cube.remove_coord('level_height')
cube.remove_coord('sigma')
cube.remove_coord('surface_altitude')
cube.add_aux_coord(v_coord, 0)
expected = ([2, 1, -5.0],
[2, 2, -10.0],
[2, 3, -15.0],
[2, 4, -20.0])
for field, (lbvc, lblev, blev) in zip(fields_from_cube(cube), expected):
self.assertEqual(field.lbvc, lbvc)
self.assertEqual(field.lblev, lblev)
self.assertEqual(field.blev, blev)
def fields_from_cube(cubes):
"""
Return an iterator of PP fields generated from saving the given cube(s)
to a temporary file, and then subsequently loading them again
"""
with tempfile.NamedTemporaryFile('w+b', suffix='.pp') as tmp_file:
fh = tmp_file.file
iris.save(cubes, fh, saver='pp')
# make sure the fh is written to disk, and move it back to the
# start of the file
fh.flush()
os.fsync(fh)
fh.seek(0)
# load in the saved pp fields and check the appropriate metadata
for field in ff_pp.load(tmp_file.name):
yield field
if __name__ == "__main__":
tests.main()
| lgpl-3.0 | -5,607,596,048,526,821,000 | 43.177843 | 172 | 0.598825 | false |
BenBoZ/hexaco | Engine/Components/tst/Test_PositionComponent.py | 1 | 4037 | """
This file is part of HexACO.
HexACO is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
HexACO is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with HexACO. If not, see <http://www.gnu.org/licenses/>.
########################################################################
Position Component test Class
* Run file separate to run unit tests
########################################################################
Description
-----------
Class for a position component """
import unittest
from .. import PositionComponent
class TestPositionComponent(unittest.TestCase):
""" Tests the position component """
######################################################
@classmethod
def setUpClass(cls):
"This method is called once, when starting the tests"
cls.posComp = PositionComponent(None)
@classmethod
def tearDownClass(cls):
"This method is called after finishing all tests"
pass
#######################################################
def setUp(self):
"This method is called before each test case"
self.posComp.pos.x = 0.0
self.posComp.pos.y = 0.0
self.posComp.pos.z = 0.0
def tearDown(self):
"This method is called after each test case"
pass
#######################################################
def test_default_position(self):
""" Test if default position of position component is in at 0,0,0"""
self.assertEqual(self.posComp.pos.x, 0.0)
self.assertEqual(self.posComp.pos.y, 0.0)
self.assertEqual(self.posComp.pos.z, 0.0)
def test_default_orientation(self):
""" Test that default orientation is in 0 direction """
self.assertEqual(self.posComp.orientation, 0)
def test_center_of_tile_valid(self):
""" Test if for valid center of tile position
the value true is returned """
self.assertTrue(self.posComp.center_of_tile())
self.posComp.pos.x = -1.0
self.posComp.pos.z = 1.0
self.assertTrue(self.posComp.center_of_tile())
def test_center_of_tile_invalid(self):
""" Test if for invalid center of tile position
the value false is returned """
self.posComp.pos.x = 0.01
self.posComp.pos.y = -0.01
self.assertFalse(self.posComp.center_of_tile())
def test_center_of_tile_series(self):
""" Test for large series of center of tiles
if the expected amount of trues is returned """
counter = 0
self.posComp.pos.x = -1.0
self.posComp.pos.y = 1.0
for i in range(2555):
if self.posComp.center_of_tile():
counter += 1
self.posComp.pos.x += 0.01
self.posComp.pos.y -= 0.01
self.assertEqual(counter, 26)
def test_set_position_xyz(self):
""" Test if setting the position is propagated to the position """
self.posComp.set_position_xyz((3, -3, 0))
self.assertEqual(self.posComp.pos.xyz, (3, -3, 0))
def test_set_position_xyz(self):
""" Test if setting the position is propagated to the position """
self.posComp.set_position_xyz((-2.0, 0.0, 2.0))
self.assertEqual(self.posComp.pos.xyz, (-2.0, 0.0, 2.0))
def test_xyz(self):
""" Test if getting position through
xyz returns the expected result """
xyz = [1, -2, 1]
self.posComp.set_position_xyz(xyz)
self.assertEqual(xyz, self.posComp.xyz())
if __name__ == '__main__':
unittest.main(verbosity=2)
| gpl-3.0 | -2,831,596,542,244,316,700 | 28.467153 | 76 | 0.58162 | false |
RaminderSinghSahni/micro-ram-bot | tasks.py | 1 | 4411 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from microsoftbotframework import Response
import celery
import os
import sys
import json
# import argparse
from google.cloud import language
import google.auth
# import language
try:
import apiai
except ImportError:
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
import apiai
# CLIENT_ACCESS_TOKEN = 'd18b44be8d0b41269a42704c00d44d77'
CLIENT_ACCESS_TOKEN = '039129d3176644e9ac91464ee9e7b5df'
def respond_to_conversation_update(message):
if message["type"] == "conversationUpdate":
response = Response(message)
message_response = 'Have fun with the Microsoft Bot Framework'
response.reply_to_activity(message_response, recipient={"id": response["conversation"]["id"]})
def echo_response(message):
if message["type"] == "message":
response = Response(message)
message_response = message["text"]
# response_info = response.reply_to_activity("in this")
# response_info = response.reply_to_activity(message_response)
with open('intervention.json') as data_file:
iData = json.load(data_file)
if iData["intervention"] == "0":
ai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)
request1 = ai.text_request()
# request1.lang = 'en' # optional, default value equal 'en'
# request.session_id = "<SESSION ID, UBIQUE FOR EACH USER>"
# print("\n\nYour Input : ",end=" ")
# response.reply_to_activity(ai)
with open('session.json') as data_file:
data = json.load(data_file)
if data["session_id"] != "":
request1.session_id = data["session_id"]
request1.query = message_response
# request1.resetContexts = False
# request1.
# response_info = response.reply_to_activity("hello"+request1)
# print("\n\nBot\'s response :",end=" ")
response1 = request1.getresponse()
responsestr = response1.read().decode('utf-8')
response_obj = json.loads(responsestr)
with open('session.json', 'w') as outfile:
json.dump({"session_id": response_obj["sessionId"]}, outfile)
# print(response_obj["result"]["fulfillment"]["speech"])
response_info = response.reply_to_activity(response_obj["result"]["fulfillment"]["speech"])
else:
with open('message.json') as data_file:
data = json.load(data_file)
if data["message"] != "":
new_response = Response(data["message"])
# language_client = language.Client()
language_client = language.Client.from_service_account_json('Micro-av-bot-1.json')
# language_client = language.client
document = language_client.document_from_text(message_response)
# Detects sentiment in the document.
annotations = document.annotate_text(include_sentiment=True,
include_syntax=False,
include_entities=False)
score = annotations.sentiment.score
magnitude = annotations.sentiment.magnitude
# response_info = new_response.reply_to_activity('Overall Sentiment: score')
response_info = new_response.reply_to_activity('Overall Sentiment: score of {} with magnitude of {}'.format(score, magnitude))
# response_info = response.reply_to_activity("Intervention is turned on")
# from time import sleep
# sleep(2)
# response.delete_activity(activity_id=response_info.json()['id'])
# sleep(2)
# response.create_conversation('lets talk about something really interesting')
# This is a asynchronous task
@celery.task()
def echo_response_async(message):
if message["type"] == "message":
response = Response(message)
message_response = message["text"]
response.send_to_conversation(message_response)
| mit | -5,725,038,473,106,126,000 | 37.692982 | 150 | 0.568579 | false |
malcolmwhite/DungeonsAndDragons | src/test/managers/test_model_conflict_manager.py | 1 | 3021 | from unittest import TestCase
from itertools import permutations
from src.main.managers.players.hard_coded_player_manager import HardCodedPlayerManager
from src.main.managers.conflict.hard_coded_example_conflict_manager import HardCodedExampleConflictManager
from src.main.managers.conflict.automated_conflict_manager import AutomatedConflictManager
from src.main.beans.players.standard_player import StandardPlayer
from src.main.managers.players.base_player_manager import BasePlayerManager
class TestModelConflictManager(TestCase):
def testStandardConflict(self):
player_manager = HardCodedPlayerManager()
conflict_manager = HardCodedExampleConflictManager(player_manager)
winner = conflict_manager.run_conflict()
self._validate_alice(winner)
active_players = player_manager.get_active_players()
inactive_players = player_manager.get_inactive_players()
self.assertEqual(len(active_players), 1)
self.assertEqual(len(inactive_players), 1)
alice = active_players[0]
bob = inactive_players[0]
self._validate_alice(alice)
self._validate_bob(bob)
def test_add_players(self):
player0 = StandardPlayer(speed=100, name="FASTEST")
player1 = StandardPlayer(speed=5, name="Middle1")
player2 = StandardPlayer(speed=5, name="Middle2")
player3 = StandardPlayer(speed=1, name="SLOWEST")
players = [player0, player1, player2, player3]
for players_perm in permutations(players):
players_perm = list(players_perm)
player_manager = BasePlayerManager(players_perm)
conflict_manager = AutomatedConflictManager(player_manager)
sorted_players = conflict_manager._order_players_for_new_round(players_perm)
self.validate_players(sorted_players, player0, player1, player2, player3)
def validate_players(self, players, player0, player1, player2, player3):
self.assertEqual(players[0], player0)
self.assertTrue((players[1] == player1) or (players[1] == player2))
self.assertTrue((players[2] == player1) or (players[2] == player2))
self.assertEqual(players[3], player3)
def _validate_alice(self, alice):
self.assertEqual(alice.NAME, "Alice")
self.assertEqual(alice.HP, 4)
self.assertEqual(len(alice.item_manager._SWORD_BAG), 0)
self.assertEqual(len(alice.item_manager._SHIELD_BAG), 1)
self.assertEqual(len(alice.item_manager._SHOES_BAG), 0)
self.assertEqual(len(alice.item_manager._HAT_BAG), 1)
self.assertFalse(alice.is_spooked())
def _validate_bob(self, bob):
self.assertEqual(bob.NAME, "Bob")
self.assertEqual(bob.HP, 0)
self.assertEqual(len(bob.item_manager._SWORD_BAG), 0)
self.assertEqual(len(bob.item_manager._SHIELD_BAG), 0)
self.assertEqual(len(bob.item_manager._SHOES_BAG), 0)
self.assertEqual(len(bob.item_manager._HAT_BAG), 0)
self.assertFalse(bob.is_spooked()) | mit | 2,843,952,573,210,768,000 | 47.741935 | 106 | 0.69712 | false |
ooici/marine-integrations | mi/dataset/test/test_single_dir_harvester.py | 1 | 22218 | #!/usr/bin/env python
"""
@package mi.dataset.test.test_single_dir_harvester.py
@file mi/dataset/test/test_single_dir_harvester.py
@author Emily Hahn
@brief Test code to exercize the single directory harvester
"""
import os
import glob
import gevent
import time
import shutil
import hashlib
from mi.core.log import get_logger ; log = get_logger()
from nose.plugins.attrib import attr
from mi.core.unit_test import MiUnitTest
from mi.dataset.harvester import SingleDirectoryHarvester
from mi.dataset.dataset_driver import DriverStateKey, DataSetDriverConfigKeys
TESTDIR = '/tmp/dsatest'
STOREDIR = '/tmp/stored_dsatest'
BIG_FILE = 'mi/dataset/driver/moas/gl/engineering/resource/unit_363_2013_245_6_6.mrg'
CONFIG = {
DataSetDriverConfigKeys.DIRECTORY: TESTDIR,
DataSetDriverConfigKeys.STORAGE_DIRECTORY: STOREDIR,
DataSetDriverConfigKeys.PATTERN: '*.txt',
DataSetDriverConfigKeys.FREQUENCY: 5,
DataSetDriverConfigKeys.FILE_MOD_WAIT_TIME: 30
}
INDICIES = ['363_2013_0245_6_8', '363_2013_0245_6_9', '363_2013_0245_6_10', '363_2013_0245_6_11',
'363_2013_0245_7_0', '363_2013_0245_7_1', '363_2013_0245_7_10', '363_2013_0246_0_0',
'363_2013_0246_7_0', '363_2013_0246_7_1', '363_2014_0012_0_0', '363_2014_0012_0_1', ]
@attr('INT', group='eoi')
class TestSingleDirHarvester(MiUnitTest):
found_file_count = 0
found_modified_count = 0
def setUp(self):
"""
reset counters and ensure we have test directories in place
"""
log.info('*** Starting test %s ***', self._testMethodName)
self.found_file_count = 0
self.found_modified_count = 0
if(not os.path.exists(TESTDIR)):
os.makedirs(TESTDIR)
self.clean_directory(TESTDIR, CONFIG[DataSetDriverConfigKeys.PATTERN])
if(not os.path.exists(STOREDIR)):
os.makedirs(STOREDIR)
self.clean_directory(STOREDIR, CONFIG[DataSetDriverConfigKeys.PATTERN])
def tearDown(self):
"""
cleanup the files we have created
"""
self.clean_directory(TESTDIR, CONFIG[DataSetDriverConfigKeys.PATTERN])
self.clean_directory(STOREDIR, CONFIG[DataSetDriverConfigKeys.PATTERN])
def test_init(self):
"""
Test initialize
"""
# start the harvester from scratch
memento = None
file_harvester = SingleDirectoryHarvester(CONFIG, memento,
self.new_file_found_callback,
self.modified_files_found_callback,
self.file_exception_callback)
file_harvester.start()
file_harvester.shutdown()
def test_harvester_from_scratch(self):
"""
Test that the harvester can find files as they are added to a directory,
starting with just the base file in the directory
"""
# start the harvester from scratch
memento = None
config = CONFIG.copy()
config[DataSetDriverConfigKeys.FILE_MOD_WAIT_TIME] = 10
file_harvester = SingleDirectoryHarvester(CONFIG, memento,
self.new_file_found_callback,
self.modified_files_found_callback,
self.file_exception_callback)
file_harvester.start()
# start a new event which will increase the file index using INDICIES
self.directory_filler = gevent.spawn(self.fill_directory_with_files,
CONFIG[DataSetDriverConfigKeys.DIRECTORY],
CONFIG[DataSetDriverConfigKeys.PATTERN], 0, 5, 10)
# Wait for new files to be discovered
self.wait_for_file(0, 5)
self.wait_for_file(self.found_file_count, 5)
self.wait_for_file(self.found_file_count, 5)
self.wait_for_file(self.found_file_count, 5)
self.wait_for_file(self.found_file_count, 5)
file_harvester.shutdown()
def test_harvester_without_frequency(self):
"""
Test that we can use a default frequency
"""
config = {DataSetDriverConfigKeys.DIRECTORY: TESTDIR,
DataSetDriverConfigKeys.STORAGE_DIRECTORY: TESTDIR,
DataSetDriverConfigKeys.PATTERN: CONFIG[DataSetDriverConfigKeys.PATTERN],
DataSetDriverConfigKeys.FILE_MOD_WAIT_TIME: 15}
# start the harvester from scratch
memento = None
file_harvester = SingleDirectoryHarvester(config, memento,
self.new_file_found_callback,
self.modified_files_found_callback,
self.file_exception_callback)
file_harvester.start()
# start a new event which will increase the file index using INDICIES
self.directory_filler = gevent.spawn(self.fill_directory_with_files,
CONFIG[DataSetDriverConfigKeys.DIRECTORY],
CONFIG[DataSetDriverConfigKeys.PATTERN], 0, 2)
# Wait for three sets of new files to be discovered
self.wait_for_file(0, 2)
self.wait_for_file(self.found_file_count, 2)
file_harvester.shutdown()
def test_harvester_without_mod_time(self):
"""
Test that we can use a default frequency
"""
config = {DataSetDriverConfigKeys.DIRECTORY: TESTDIR,
DataSetDriverConfigKeys.STORAGE_DIRECTORY: TESTDIR,
DataSetDriverConfigKeys.PATTERN: CONFIG[DataSetDriverConfigKeys.PATTERN],
DataSetDriverConfigKeys.FREQUENCY: 5}
# start the harvester from scratch
memento = None
file_harvester = SingleDirectoryHarvester(config, memento,
self.new_file_found_callback,
self.modified_files_found_callback,
self.file_exception_callback)
file_harvester.start()
# start a new event which will increase the file index using INDICIES
self.directory_filler = gevent.spawn(self.fill_directory_with_files,
CONFIG[DataSetDriverConfigKeys.DIRECTORY],
CONFIG[DataSetDriverConfigKeys.PATTERN], 0, 2)
# Wait for two sets of new files to be discovered
self.wait_for_file(0, 2)
self.wait_for_file(self.found_file_count, 2)
file_harvester.shutdown()
def test_harvester_multi_file(self):
"""
Set the timing so the harvester finds multiple new files at once
"""
config = CONFIG.copy()
config[DataSetDriverConfigKeys.FREQUENCY] = 1
config[DataSetDriverConfigKeys.FILE_MOD_WAIT_TIME] = 15
# start the harvester from scratch
memento = None
file_harvester = SingleDirectoryHarvester(config, memento,
self.new_file_found_callback,
self.modified_files_found_callback,
self.file_exception_callback)
file_harvester.start()
# set the file filler to generate files with only .5 secs between,
# meaning 2 files will appear in the 1 seconds between the
# harvester checking
self.directory_filler = gevent.spawn(self.fill_directory_with_files,
CONFIG[DataSetDriverConfigKeys.DIRECTORY],
CONFIG[DataSetDriverConfigKeys.PATTERN], 0, 12, .5)
# Wait for sets of new files to be discovered
self.wait_for_file(0)
self.wait_for_file(self.found_file_count)
self.wait_for_file(self.found_file_count)
self.wait_for_file(self.found_file_count)
self.wait_for_file(self.found_file_count)
file_harvester.shutdown()
def test_file_mod_wait_time(self):
"""
that the file mod wait time is actually waiting before finding files
"""
memento = None
file_harvester = SingleDirectoryHarvester(CONFIG, memento,
self.new_file_found_callback,
self.modified_files_found_callback,
self.file_exception_callback)
file_harvester.start()
# put a file in the directory, the mod time will be the create time
self.fill_directory_with_files(CONFIG[DataSetDriverConfigKeys.DIRECTORY],
CONFIG[DataSetDriverConfigKeys.PATTERN], 0, 1, 0)
# wait until just before the file mod time should allow us to find the files
# keep track of how long it takes to find the file approximately
file_found_time = 0;
while(self.found_file_count == 0):
time.sleep(1)
file_found_time += 1
if file_found_time > 60:
raise Exception("Timeout waiting to find file")
if file_found_time < CONFIG.get(DataSetDriverConfigKeys.FILE_MOD_WAIT_TIME):
# we found the file before the mod time, this is bad!
file_harvester.shutdown()
self.fail('Files found in %s seconds' % file_found_time)
log.debug('File found in %s seconds', file_found_time)
file_harvester.shutdown()
def test_harvester_with_memento(self):
"""
Test that the harvester can find file as they are added to a directory,
using a memento to start partway through the indices
"""
# make sure we have 2 files already in the directory
self.fill_directory_with_files(CONFIG[DataSetDriverConfigKeys.DIRECTORY],
CONFIG[DataSetDriverConfigKeys.PATTERN], 0, 2, 0)
filename_1 = 'unit_' + INDICIES[0] + CONFIG[DataSetDriverConfigKeys.PATTERN].replace('*', '')
filename_2 = 'unit_' + INDICIES[1] + CONFIG[DataSetDriverConfigKeys.PATTERN].replace('*', '')
# get metadata for the files
metadata_1 = self.get_file_metadata(filename_1)
metadata_1[DriverStateKey.INGESTED] = True
metadata_1[DriverStateKey.PARSER_STATE] = None
metadata_2 = self.get_file_metadata(filename_2)
metadata_2[DriverStateKey.INGESTED] = True
metadata_2[DriverStateKey.PARSER_STATE] = None
# generate memento with two files ingested (parser state is not looked at)
memento = {DriverStateKey.VERSION: 0.1,
filename_1: metadata_1,
filename_2: metadata_2
}
log.debug("starting with memento %s", memento)
config = CONFIG.copy()
config[DataSetDriverConfigKeys.FILE_MOD_WAIT_TIME] = 15
file_harvester = SingleDirectoryHarvester(CONFIG, memento,
self.new_file_found_callback,
self.modified_files_found_callback,
self.file_exception_callback)
file_harvester.start()
# start a new event which will increase the file index using INDICIES
# with a delay in between
self.directory_filler = gevent.spawn(self.fill_directory_with_files,
CONFIG[DataSetDriverConfigKeys.DIRECTORY],
CONFIG[DataSetDriverConfigKeys.PATTERN], 2, 9, 5)
# Wait for three sets of new files to be discovered
self.wait_for_file(0, 2)
self.wait_for_file(self.found_file_count, 2)
self.wait_for_file(self.found_file_count, 2)
self.wait_for_file(self.found_file_count, 2)
self.wait_for_file(self.found_file_count, 2)
self.wait_for_file(self.found_file_count, 2)
file_harvester.shutdown()
def test_harvester_with_memento_not_ingested(self):
"""
Test that the harvester can find file as they are added to a directory,
using a memento to start partway through the indices
"""
# make sure we have 2 files already in the directory
self.fill_directory_with_files(CONFIG[DataSetDriverConfigKeys.DIRECTORY],
CONFIG[DataSetDriverConfigKeys.PATTERN], 0, 2, 0)
filename_1 = 'unit_' + INDICIES[0] + CONFIG[DataSetDriverConfigKeys.PATTERN].replace('*', '')
filename_2 = 'unit_' + INDICIES[1] + CONFIG[DataSetDriverConfigKeys.PATTERN].replace('*', '')
# get metadata for the files
metadata_1 = self.get_file_metadata(filename_1)
metadata_1[DriverStateKey.INGESTED] = True
metadata_1[DriverStateKey.PARSER_STATE] = None
metadata_2 = self.get_file_metadata(filename_2)
metadata_2[DriverStateKey.INGESTED] = False
metadata_2[DriverStateKey.PARSER_STATE] = None
# generate memento with two files ingested (parser state is not looked at)
memento = {DriverStateKey.VERSION: 0.1,
filename_1: metadata_1,
filename_2: metadata_2
}
log.debug("starting with memento %s", memento)
config = CONFIG.copy()
config[DataSetDriverConfigKeys.FILE_MOD_WAIT_TIME] = 10
file_harvester = SingleDirectoryHarvester(CONFIG, memento,
self.new_file_found_callback,
self.modified_files_found_callback,
self.file_exception_callback)
file_harvester.start()
# start a new event which will increase the file index using INDICIES
# with a delay in between
self.directory_filler = gevent.spawn(self.fill_directory_with_files,
CONFIG[DataSetDriverConfigKeys.DIRECTORY],
CONFIG[DataSetDriverConfigKeys.PATTERN], 2, 4, 10)
# Should find the 4 new files plus 1 not ingested file from state, so 5 total
self.wait_for_file(0, 5)
self.wait_for_file(self.found_file_count, 5)
self.wait_for_file(self.found_file_count, 5)
self.wait_for_file(self.found_file_count, 5)
self.wait_for_file(self.found_file_count, 5)
file_harvester.shutdown()
def test_harvester_with_modified(self):
"""
Test that the harvester can find file as they are added to a directory,
using a memento to start partway through the indices
"""
# make sure we have 2 files already in the directory
self.fill_directory_with_files(CONFIG[DataSetDriverConfigKeys.DIRECTORY],
CONFIG[DataSetDriverConfigKeys.PATTERN], 0, 2, 0)
filename_1 = 'unit_' + INDICIES[0] + CONFIG[DataSetDriverConfigKeys.PATTERN].replace('*', '')
filename_2 = 'unit_' + INDICIES[1] + CONFIG[DataSetDriverConfigKeys.PATTERN].replace('*', '')
# get metadata for the files
metadata_1 = self.get_file_metadata(filename_1)
metadata_1[DriverStateKey.INGESTED] = True
metadata_1[DriverStateKey.PARSER_STATE] = None
metadata_2 = self.get_file_metadata(filename_2)
metadata_2[DriverStateKey.INGESTED] = True
metadata_2[DriverStateKey.PARSER_STATE] = None
# generate memento with two files ingested (parser state is not looked at)
memento = {DriverStateKey.VERSION: 0.1,
filename_1: metadata_1,
filename_2: metadata_2
}
log.debug("starting with memento %s", memento)
config = CONFIG.copy()
config[DataSetDriverConfigKeys.FILE_MOD_WAIT_TIME] = 15
file_harvester = SingleDirectoryHarvester(CONFIG, memento,
self.new_file_found_callback,
self.modified_files_found_callback,
self.file_exception_callback)
file_harvester.start()
file_path = os.path.join(CONFIG[DataSetDriverConfigKeys.DIRECTORY], filename_1)
with open(file_path, 'a') as filehandle:
filehandle.write('a b c d')
end_time = 0
while(self.found_modified_count == 0):
log.debug("Waiting for modified file...")
time.sleep(2)
end_time += 2
if end_time > 60:
raise Exception("Timeout waiting to find modified files")
file_harvester.shutdown()
def test_harvester_exception(self):
"""
Verify exceptions
"""
config = "blah"
memento = None
self.assertRaises(TypeError, SingleDirectoryHarvester, (config, memento,
self.new_file_found_callback,
self.modified_files_found_callback,
self.file_exception_callback))
config = CONFIG.copy()
config[DataSetDriverConfigKeys.FILE_MOD_WAIT_TIME] = -5
self.assertRaises(TypeError, SingleDirectoryHarvester, (config, memento,
self.new_file_found_callback,
self.modified_files_found_callback,
self.file_exception_callback))
def test_harvester_1000(self):
"""
The harvester is taking a really long time to run, find out how long for 1000 files
"""
self.fill_directory_1000_files(CONFIG[DataSetDriverConfigKeys.PATTERN])
memento = None
config = CONFIG.copy()
config[DataSetDriverConfigKeys.FILE_MOD_WAIT_TIME] = 1
file_harvester = SingleDirectoryHarvester(CONFIG, memento,
self.new_file_found_callback,
self.modified_files_found_callback,
self.file_exception_callback)
start_time = time.time()
file_harvester.start()
while(self.found_file_count < 1000):
self.wait_for_file(self.found_file_count, 5, 60)
end_time = time.time()
log.debug('harvester found all files in %s', (end_time - start_time))
def clean_directory(self, data_directory, pattern = "*"):
"""
Clean out the data directory of all files
"""
dir_files = glob.glob(data_directory + '/' + pattern)
for file_name in dir_files:
#log.debug("Removing file %s", file_name)
os.remove(file_name)
def new_file_found_callback(self, file_name):
"""
Callback when a new file is found by the harvester. This should pass the file
to the parser, but from this test we don't have the parser, so just close the file.
"""
self.found_file_count += 1
def modified_files_found_callback(self):
"""
Callback when a new file is found by the harvester. This should pass the file
to the parser, but from this test we don't have the parser, so just close the file.
"""
self.found_modified_count += 1
log.info("Found modified file")
def file_exception_callback(self, exception):
"""
Callback if there is an exception in the harvester, just raise an exception
"""
log.error("Error polling for files: %s", exception)
raise Exception("Error polling for files: %s", exception)
def fill_directory_with_files(self, data_directory, pattern, start_idx=0, num_files=1, delay=4):
"""
Copy the first index file to generate files with sequential increasing indices.
Search the directory to find the current highest index, then increase one index.
"""
for i in range(0, num_files):
time.sleep(delay)
next_file = data_directory + '/' + 'unit_' + INDICIES[start_idx + i] + pattern.replace('*', '')
open(next_file, 'a').close()
log.debug("Added file %s to directory, index %d", next_file, start_idx + i)
log.debug("Done with directory filler")
def fill_directory_1000_files(self, pattern):
for i in range(0, 1000):
next_file = TESTDIR + '/' + 'test_' + str(i) + pattern.replace('*', '')
shutil.copy(BIG_FILE, next_file)
log.debug("Done with long directory filler")
def get_file_metadata(self, filename):
"""
Get the file size, modification time and checksum and return it in a dictionary
"""
file_path = os.path.join(CONFIG.get(DataSetDriverConfigKeys.DIRECTORY), filename)
# even though the file is copied, copy2 preserves the original file modification time
mod_time = os.path.getmtime(file_path)
file_size = os.path.getsize(file_path)
with open(file_path) as filehandle:
md5_checksum = hashlib.md5(filehandle.read()).hexdigest()
return {DriverStateKey.FILE_SIZE: file_size,
DriverStateKey.FILE_MOD_DATE: mod_time,
DriverStateKey.FILE_CHECKSUM: md5_checksum}
def wait_for_file(self, starting_count, delay=1, timeout=60):
"""
Wait for a new round of files to be discovered
"""
end_time = 0
while(self.found_file_count == starting_count):
log.debug("Waiting for next set of files...")
time.sleep(delay)
end_time += delay
if end_time > timeout:
raise Exception("Timeout waiting to find files")
| bsd-2-clause | -3,474,479,848,297,348,000 | 44.622177 | 109 | 0.57854 | false |
orlenko/sfpirg | sfpirgapp/migrations/0022_auto__del_field_organization_mailing_address__add_field_organization_m.py | 1 | 25180 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Organization.mailing_address'
db.delete_column(u'sfpirgapp_organization', 'mailing_address_id')
# Adding field 'Organization.mailing_city'
db.add_column(u'sfpirgapp_organization', 'mailing_city',
self.gf('django.db.models.fields.CharField')(default='-', max_length=255),
keep_default=False)
# Adding field 'Organization.mailing_street'
db.add_column(u'sfpirgapp_organization', 'mailing_street',
self.gf('django.db.models.fields.CharField')(default='-', max_length=255),
keep_default=False)
# Adding field 'Organization.mailing_street2'
db.add_column(u'sfpirgapp_organization', 'mailing_street2',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'Organization.mailing_postal_code'
db.add_column(u'sfpirgapp_organization', 'mailing_postal_code',
self.gf('django.db.models.fields.CharField')(default='-', max_length=255),
keep_default=False)
# Changing field 'Organization.contact_phone'
db.alter_column(u'sfpirgapp_organization', 'contact_phone', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
# Changing field 'Organization.contact_position'
db.alter_column(u'sfpirgapp_organization', 'contact_position', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
# Changing field 'Organization.contact_email'
db.alter_column(u'sfpirgapp_organization', 'contact_email', self.gf('django.db.models.fields.EmailField')(max_length=255, null=True))
# Changing field 'Organization.contact_name'
db.alter_column(u'sfpirgapp_organization', 'contact_name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
def backwards(self, orm):
# Adding field 'Organization.mailing_address'
db.add_column(u'sfpirgapp_organization', 'mailing_address',
self.gf('django.db.models.fields.related.ForeignKey')(default='-', to=orm['sfpirgapp.Address']),
keep_default=False)
# Deleting field 'Organization.mailing_city'
db.delete_column(u'sfpirgapp_organization', 'mailing_city')
# Deleting field 'Organization.mailing_street'
db.delete_column(u'sfpirgapp_organization', 'mailing_street')
# Deleting field 'Organization.mailing_street2'
db.delete_column(u'sfpirgapp_organization', 'mailing_street2')
# Deleting field 'Organization.mailing_postal_code'
db.delete_column(u'sfpirgapp_organization', 'mailing_postal_code')
# Changing field 'Organization.contact_phone'
db.alter_column(u'sfpirgapp_organization', 'contact_phone', self.gf('django.db.models.fields.CharField')(default='-', max_length=255))
# Changing field 'Organization.contact_position'
db.alter_column(u'sfpirgapp_organization', 'contact_position', self.gf('django.db.models.fields.CharField')(default='-', max_length=255))
# Changing field 'Organization.contact_email'
db.alter_column(u'sfpirgapp_organization', 'contact_email', self.gf('django.db.models.fields.EmailField')(default='-', max_length=255))
# Changing field 'Organization.contact_name'
db.alter_column(u'sfpirgapp_organization', 'contact_name', self.gf('django.db.models.fields.CharField')(default='-', max_length=255))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': u"orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
u'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'sfpirgapp.actiongroup': {
'Meta': {'ordering': "('_order',)", 'object_name': 'ActionGroup'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'action_groups'", 'to': u"orm['sfpirgapp.Category']"}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'theme_color': ('django.db.models.fields.CharField', [], {'default': "'grey'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actiongroups'", 'to': u"orm['auth.User']"})
},
u'sfpirgapp.address': {
'Meta': {'object_name': 'Address'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'street2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'sfpirgapp.application': {
'Meta': {'object_name': 'Application'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sfpirgapp.Project']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'})
},
u'sfpirgapp.category': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Category'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'theme_color': ('django.db.models.fields.CharField', [], {'default': "'grey'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categorys'", 'to': u"orm['auth.User']"})
},
u'sfpirgapp.dummytable': {
'Meta': {'object_name': 'DummyTable'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'sfpirgapp.liaison': {
'Meta': {'object_name': 'Liaison'},
'alt_email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'liaisons'", 'to': u"orm['sfpirgapp.Organization']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'sfpirgapp.organization': {
'Meta': {'object_name': 'Organization'},
'communities': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'contact_alt_email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'contact_position': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_registered': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mailing_city': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mailing_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mailing_street': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mailing_street2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'mandate': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'sources_of_funding': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'sfpirgapp.profile': {
'Meta': {'object_name': 'Profile'},
'bio': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'on_mailing_list': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sfpirgapp.Organization']", 'null': 'True', 'blank': 'True'}),
'photo': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'sfpirgapp.project': {
'Meta': {'object_name': 'Project'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'arx_projects'", 'to': u"orm['sfpirgapp.Category']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description_long': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_short': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'issues_addressed': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'larger_goal': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'length': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'liaison': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sfpirgapp.Liaison']", 'null': 'True', 'blank': 'True'}),
'logo': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'project_subject': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sfpirgapp.ProjectSubject']", 'symmetrical': 'False'}),
'project_subject_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'project_type': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sfpirgapp.ProjectType']", 'symmetrical': 'False'}),
'project_type_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'researcher_qualities': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'results_plan': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'size': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'support_method': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time_per_week': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'sfpirgapp.projectsubject': {
'Meta': {'object_name': 'ProjectSubject'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'sfpirgapp.projecttype': {
'Meta': {'object_name': 'ProjectType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'sfpirgapp.testimonial': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Testimonial'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'testimonials'", 'to': u"orm['sfpirgapp.Category']"}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'theme_color': ('django.db.models.fields.CharField', [], {'default': "'grey'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'testimonials'", 'to': u"orm['auth.User']"})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['sfpirgapp'] | bsd-2-clause | -7,932,003,057,941,393,000 | 78.686709 | 187 | 0.562311 | false |
QudevETH/PycQED_py3 | pycqed/simulations/chevron_sim.py | 1 | 2373 | """
Based on Olli's mathematica notebook used to simulate chevrons
"""
import numpy as np
from scipy.linalg import expm
ham = lambda e, g: np.array([[0.5*e, g], [g, -0.5*e]])
evol = lambda e, g, dt: expm(dt*1j*ham(e, g))
def rabisim(efun, g, t, dt):
"""
This function returns the evolution of a system described by the hamiltonian:
H = efun sigma_z + g sigma_x
Inputs:
efun, Function that returns the energy parameter vs time.s
g, Coupling parameter
t, Final time of the evolution
dt, Stepsize of the time evolution
Outputs:
f_vec, Evolution for times (1, 1+dt, ..., t)
"""
s0 = np.array([1, 0])
ts = np.arange(1., t+0.5*dt, dt)
f = lambda st, ti: np.dot(evol(efun(ti), g, dt), st)
f_vec = np.zeros((len(ts), 2), dtype=np.complex128)
f_vec[0, :] = s0
for i, t in enumerate(ts[:-1]):
f_vec[i+1, :] = f(f_vec[i], t)
return f_vec
qamp = lambda vec: np.abs(vec[:, 1])**2
def chevron(e0, emin, emax, n, g, t, dt, sf):
"""
Inputs:
e0, set energy scale at the center(detuning).
emin, sets min energy to simulate, in e0 units.
emax, sets max energy to simulate, in e0 units.
n, sets number of points in energy array.
g, Coupling parameter.
t, Final time of the evolution.
dt, Stepsize of the time evolution.
sf, Step function of the distortion kernel.
"""
energy_func = lambda energy, t: e0*(1.-(energy*sf(t))**2)
energy_vec = np.arange(1+emin, 1+emax, (emax-emin)/(n-1))
chevron_vec = []
for ee in energy_vec:
chevron_vec.append(
qamp(rabisim(lambda t: energy_func(ee, t), g, t, dt)))
return np.array(chevron_vec)
def chevron_slice(e0, energy, g, t, dt, sf):
"""
Inputs:
e0, set energy scale at the center(detuning).
energy, energy of the slice to simulate, in e0 units.
g, Coupling parameter.
t, Final time of the evolution.
dt, Stepsize of the time evolution.
sf, Step function of the distortion kernel.
"""
energy_func = lambda energy, t: e0*(1.-(energy*sf(t))**2)
return qamp(rabisim(lambda t: energy_func(energy, t), g, t, dt))
| mit | -6,412,169,818,103,500,000 | 33.897059 | 81 | 0.554994 | false |
theoryno3/gensim | gensim/test/test_models.py | 1 | 26406 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import os
import os.path
import tempfile
import numpy
import scipy.linalg
from gensim.corpora import mmcorpus, Dictionary
from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamulticore
from gensim.models.wrappers import ldamallet
from gensim import matutils
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
# set up vars used in testing ("Deerwester" from the web tutorial)
texts = [['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
def testfile():
# temporary data will be stored to this file
return os.path.join(tempfile.gettempdir(), 'gensim_models.tst')
class TestLsiModel(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
def testTransform(self):
"""Test lsi[vector] transformation."""
# create the transformation model
model = lsimodel.LsiModel(self.corpus, num_topics=2)
# make sure the decomposition is enough accurate
u, s, vt = scipy.linalg.svd(matutils.corpus2dense(self.corpus, self.corpus.num_terms), full_matrices=False)
self.assertTrue(numpy.allclose(s[:2], model.projection.s)) # singular values must match
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = numpy.array([-0.6594664, 0.142115444]) # scaled LSI version
# expected = numpy.array([-0.1973928, 0.05591352]) # non-scaled LSI version
self.assertTrue(numpy.allclose(abs(vec), abs(expected))) # transformed entries must be equal up to sign
def testCorpusTransform(self):
"""Test lsi[corpus] transformation."""
model = lsimodel.LsiModel(self.corpus, num_topics=2)
got = numpy.vstack(matutils.sparse2full(doc, 2) for doc in model[corpus])
expected = numpy.array([
[ 0.65946639, 0.14211544],
[ 2.02454305, -0.42088759],
[ 1.54655361, 0.32358921],
[ 1.81114125, 0.5890525 ],
[ 0.9336738 , -0.27138939],
[ 0.01274618, -0.49016181],
[ 0.04888203, -1.11294699],
[ 0.08063836, -1.56345594],
[ 0.27381003, -1.34694159]])
self.assertTrue(numpy.allclose(abs(got), abs(expected))) # must equal up to sign
def testOnlineTransform(self):
corpus = list(self.corpus)
doc = corpus[0] # use the corpus' first document for testing
# create the transformation model
model2 = lsimodel.LsiModel(corpus=corpus, num_topics=5) # compute everything at once
model = lsimodel.LsiModel(corpus=None, id2word=model2.id2word, num_topics=5) # start with no documents, we will add them later
# train model on a single document
model.add_documents([corpus[0]])
# transform the testing document with this partial transformation
transformed = model[doc]
vec = matutils.sparse2full(transformed, model.num_topics) # convert to dense vector, for easier equality tests
expected = numpy.array([-1.73205078, 0.0, 0.0, 0.0, 0.0]) # scaled LSI version
self.assertTrue(numpy.allclose(abs(vec), abs(expected), atol=1e-6)) # transformed entries must be equal up to sign
# train on another 4 documents
model.add_documents(corpus[1:5], chunksize=2) # train on 4 extra docs, in chunks of 2 documents, for the lols
# transform a document with this partial transformation
transformed = model[doc]
vec = matutils.sparse2full(transformed, model.num_topics) # convert to dense vector, for easier equality tests
expected = numpy.array([-0.66493785, -0.28314203, -1.56376302, 0.05488682, 0.17123269]) # scaled LSI version
self.assertTrue(numpy.allclose(abs(vec), abs(expected), atol=1e-6)) # transformed entries must be equal up to sign
# train on the rest of documents
model.add_documents(corpus[5:])
# make sure the final transformation is the same as if we had decomposed the whole corpus at once
vec1 = matutils.sparse2full(model[doc], model.num_topics)
vec2 = matutils.sparse2full(model2[doc], model2.num_topics)
self.assertTrue(numpy.allclose(abs(vec1), abs(vec2), atol=1e-5)) # the two LSI representations must equal up to sign
def testPersistence(self):
fname = testfile()
model = lsimodel.LsiModel(self.corpus, num_topics=2)
model.save(fname)
model2 = lsimodel.LsiModel.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.projection.u, model2.projection.u))
self.assertTrue(numpy.allclose(model.projection.s, model2.projection.s))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceCompressed(self):
fname = testfile() + '.gz'
model = lsimodel.LsiModel(self.corpus, num_topics=2)
model.save(fname)
model2 = lsimodel.LsiModel.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.projection.u, model2.projection.u))
self.assertTrue(numpy.allclose(model.projection.s, model2.projection.s))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmap(self):
fname = testfile()
model = lsimodel.LsiModel(self.corpus, num_topics=2)
# test storing the internal arrays into separate files
model.save(fname, sep_limit=0)
# now load the external arrays via mmap
model2 = lsimodel.LsiModel.load(fname, mmap='r')
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(isinstance(model2.projection.u, numpy.memmap))
self.assertTrue(isinstance(model2.projection.s, numpy.memmap))
self.assertTrue(numpy.allclose(model.projection.u, model2.projection.u))
self.assertTrue(numpy.allclose(model.projection.s, model2.projection.s))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmapCompressed(self):
fname = testfile() + '.gz'
model = lsimodel.LsiModel(self.corpus, num_topics=2)
# test storing the internal arrays into separate files
model.save(fname, sep_limit=0)
# now load the external arrays via mmap
return
# turns out this test doesn't exercise this because there are no arrays
# to be mmaped!
self.assertRaises(IOError, lsimodel.LsiModel.load, fname, mmap='r')
#endclass TestLsiModel
class TestRpModel(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
def testTransform(self):
# create the transformation model
numpy.random.seed(13) # HACK; set fixed seed so that we always get the same random matrix (and can compare against expected results)
model = rpmodel.RpModel(self.corpus, num_topics=2)
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = numpy.array([-0.70710677, 0.70710677])
self.assertTrue(numpy.allclose(vec, expected)) # transformed entries must be equal up to sign
def testPersistence(self):
fname = testfile()
model = rpmodel.RpModel(self.corpus, num_topics=2)
model.save(fname)
model2 = rpmodel.RpModel.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.projection, model2.projection))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceCompressed(self):
fname = testfile() + '.gz'
model = rpmodel.RpModel(self.corpus, num_topics=2)
model.save(fname)
model2 = rpmodel.RpModel.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.projection, model2.projection))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
#endclass TestRpModel
class TestLdaModel(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
def testTransform(self):
passed = False
# sometimes, LDA training gets stuck at a local minimum
# in that case try re-training the model from scratch, hoping for a
# better random initialization
for i in range(5): # restart at most 5 times
# create the transformation model
model = ldamodel.LdaModel(id2word=dictionary, num_topics=2, passes=100)
model.update(corpus)
# transform one document
doc = list(corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = [0.13, 0.87]
passed = numpy.allclose(sorted(vec), sorted(expected), atol=1e-2) # must contain the same values, up to re-ordering
if passed:
break
logging.warning("LDA failed to converge on attempt %i (got %s, expected %s)" %
(i, sorted(vec), sorted(expected)))
self.assertTrue(passed)
def testTopicSeeding(self):
passed = False
for topic in range(2):
# try seeding it both ways round, check you get the same
# topics out but with which way round they are depending
# on the way round they're seeded
for i in range(5): # restart at most 5 times
eta = numpy.ones((2, len(dictionary))) * 0.5
system = dictionary.token2id[u'system']
trees = dictionary.token2id[u'trees']
# aggressively seed the word 'system', in one of the
# two topics, 10 times higher than the other words
eta[topic, system] *= 10
model = ldamodel.LdaModel(id2word=dictionary, num_topics=2, passes=200, eta=eta)
model.update(corpus)
topics = [dict((word, p) for p, word in model.show_topic(j)) for j in range(2)]
# check that the word system in the topic we seeded, got a high weight,
# and the word 'trees' (the main word in the other topic) a low weight --
# and vice versa for the other topic (which we didn't seed with 'system')
result = [[topics[topic].get(u'system',0), topics[topic].get(u'trees',0)],
[topics[1-topic].get(u'system',0), topics[1-topic].get(u'trees',0)]]
expected = [[0.385, 0.022],
[0.025, 0.157]]
passed = numpy.allclose(result, expected, atol=1e-2)
if passed:
break
logging.warning("LDA failed to converge on attempt %i (got %s, expected %s)" %
(i, result, expected))
self.assertTrue(passed)
def testPersistence(self):
fname = testfile()
model = ldamodel.LdaModel(self.corpus, num_topics=2)
model.save(fname)
model2 = ldamodel.LdaModel.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceCompressed(self):
fname = testfile() + '.gz'
model = ldamodel.LdaModel(self.corpus, num_topics=2)
model.save(fname)
model2 = ldamodel.LdaModel.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmap(self):
fname = testfile()
model = ldamodel.LdaModel(self.corpus, num_topics=2)
# simulate storing large arrays separately
model.save(testfile(), sep_limit=0)
# test loading the large model arrays with mmap
model2 = ldamodel.LdaModel.load(testfile(), mmap='r')
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(isinstance(model2.expElogbeta, numpy.memmap))
self.assertTrue(numpy.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmapCompressed(self):
fname = testfile() + '.gz'
model = ldamodel.LdaModel(self.corpus, num_topics=2)
# simulate storing large arrays separately
model.save(fname, sep_limit=0)
# test loading the large model arrays with mmap
self.assertRaises(IOError, ldamodel.LdaModel.load, fname, mmap='r')
#endclass TestLdaModel
class TestLdaMulticore(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
def testTransform(self):
passed = False
# sometimes, LDA training gets stuck at a local minimum
# in that case try re-training the model from scratch, hoping for a
# better random initialization
for i in range(5): # restart at most 5 times
# create the transformation model
model = ldamulticore.LdaMulticore(id2word=dictionary, num_topics=2, passes=100)
model.update(corpus)
# transform one document
doc = list(corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = [0.13, 0.87]
passed = numpy.allclose(sorted(vec), sorted(expected), atol=1e-2) # must contain the same values, up to re-ordering
if passed:
break
logging.warning("LDA failed to converge on attempt %i (got %s, expected %s)" %
(i, sorted(vec), sorted(expected)))
self.assertTrue(passed)
def testTopicSeeding(self):
passed = False
for topic in range(2):
# try seeding it both ways round, check you get the same
# topics out but with which way round they are depending
# on the way round they're seeded
for i in range(5): # restart at most 5 times
eta = numpy.ones((2, len(dictionary))) * 0.5
system = dictionary.token2id[u'system']
# aggressively seed the word 'system', in one of the
# two topics, 10 times higher than the other words
eta[topic, system] *= 10
model = ldamulticore.LdaMulticore(id2word=dictionary, num_topics=2, passes=200, eta=eta)
model.update(corpus)
topics = [dict((word, p) for p, word in model.show_topic(j)) for j in range(2)]
# check that the word system in the topic we seeded, got a high weight,
# and the word 'trees' (the main word in the other topic) a low weight --
# and vice versa for the other topic (which we didn't seed with 'system')
result = [[topics[topic].get(u'system',0), topics[topic].get(u'trees',0)],
[topics[1-topic].get(u'system',0), topics[1-topic].get(u'trees',0)]]
expected = [[0.385, 0.022],
[0.025, 0.157]]
passed = numpy.allclose(result, expected, atol=1e-2)
if passed:
break
logging.warning("LDA failed to converge on attempt %i (got %s, expected %s)" %
(i, result, expected))
self.assertTrue(passed)
def testPersistence(self):
fname = testfile()
model = ldamulticore.LdaMulticore(self.corpus, num_topics=2)
model.save(fname)
model2 = ldamulticore.LdaMulticore.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceCompressed(self):
fname = testfile() + '.gz'
model = ldamulticore.LdaMulticore(self.corpus, num_topics=2)
model.save(fname)
model2 = ldamulticore.LdaMulticore.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmap(self):
fname = testfile()
model = ldamulticore.LdaMulticore(self.corpus, num_topics=2)
# simulate storing large arrays separately
model.save(testfile(), sep_limit=0)
# test loading the large model arrays with mmap
model2 = ldamulticore.LdaModel.load(testfile(), mmap='r')
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(isinstance(model2.expElogbeta, numpy.memmap))
self.assertTrue(numpy.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmapCompressed(self):
fname = testfile() + '.gz'
model = ldamulticore.LdaMulticore(self.corpus, num_topics=2)
# simulate storing large arrays separately
model.save(fname, sep_limit=0)
# test loading the large model arrays with mmap
self.assertRaises(IOError, ldamulticore.LdaModel.load, fname, mmap='r')
#endclass TestLdaMulticore
class TestLdaMallet(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
mallet_home = os.environ.get('MALLET_HOME', None)
self.mallet_path = os.path.join(mallet_home, 'bin', 'mallet') if mallet_home else None
def testTransform(self):
if not self.mallet_path:
return
passed = False
for i in range(5): # restart at most 5 times
# create the transformation model
model = ldamallet.LdaMallet(self.mallet_path, corpus, id2word=dictionary, num_topics=2, iterations=200)
# transform one document
doc = list(corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = [0.49, 0.51]
passed = numpy.allclose(sorted(vec), sorted(expected), atol=1e-2) # must contain the same values, up to re-ordering
if passed:
break
logging.warning("LDA failed to converge on attempt %i (got %s, expected %s)" %
(i, sorted(vec), sorted(expected)))
self.assertTrue(passed)
def testPersistence(self):
if not self.mallet_path:
return
fname = testfile()
model = ldamallet.LdaMallet(self.mallet_path, self.corpus, num_topics=2, iterations=100)
model.save(fname)
model2 = ldamallet.LdaMallet.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.wordtopics, model2.wordtopics))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceCompressed(self):
if not self.mallet_path:
return
fname = testfile() + '.gz'
model = ldamallet.LdaMallet(self.mallet_path, self.corpus, num_topics=2, iterations=100)
model.save(fname)
model2 = ldamallet.LdaMallet.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.wordtopics, model2.wordtopics))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmap(self):
if not self.mallet_path:
return
fname = testfile()
model = ldamallet.LdaMallet(self.mallet_path, self.corpus, num_topics=2, iterations=100)
# simulate storing large arrays separately
model.save(testfile(), sep_limit=0)
# test loading the large model arrays with mmap
model2 = ldamodel.LdaModel.load(testfile(), mmap='r')
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(isinstance(model2.wordtopics, numpy.memmap))
self.assertTrue(numpy.allclose(model.wordtopics, model2.wordtopics))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmapCompressed(self):
if not self.mallet_path:
return
fname = testfile() + '.gz'
model = ldamallet.LdaMallet(self.mallet_path, self.corpus, num_topics=2, iterations=100)
# simulate storing large arrays separately
model.save(fname, sep_limit=0)
# test loading the large model arrays with mmap
self.assertRaises(IOError, ldamodel.LdaModel.load, fname, mmap='r')
#endclass TestLdaMallet
class TestTfidfModel(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
def testTransform(self):
# create the transformation model
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
expected = [(0, 0.57735026918962573), (1, 0.57735026918962573), (2, 0.57735026918962573)]
self.assertTrue(numpy.allclose(transformed, expected))
def testInit(self):
# create the transformation model by analyzing a corpus
# uses the global `corpus`!
model1 = tfidfmodel.TfidfModel(corpus)
# make sure the dfs<->idfs transformation works
self.assertEqual(model1.dfs, dictionary.dfs)
self.assertEqual(model1.idfs, tfidfmodel.precompute_idfs(model1.wglobal, dictionary.dfs, len(corpus)))
# create the transformation model by directly supplying a term->docfreq
# mapping from the global var `dictionary`.
model2 = tfidfmodel.TfidfModel(dictionary=dictionary)
self.assertEqual(model1.idfs, model2.idfs)
def testPersistence(self):
fname = testfile()
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname)
self.assertTrue(model.idfs == model2.idfs)
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceCompressed(self):
fname = testfile() + '.gz'
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname, mmap=None)
self.assertTrue(model.idfs == model2.idfs)
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
#endclass TestTfidfModel
class TestLogEntropyModel(unittest.TestCase):
def setUp(self):
self.corpus_small = mmcorpus.MmCorpus(datapath('test_corpus_small.mm'))
self.corpus_ok = mmcorpus.MmCorpus(datapath('test_corpus_ok.mm'))
def testTransform(self):
# create the transformation model
model = logentropy_model.LogEntropyModel(self.corpus_ok, normalize=False)
# transform one document
doc = list(self.corpus_ok)[0]
transformed = model[doc]
expected = [(0, 0.3748900964125389),
(1, 0.30730215324230725),
(3, 1.20941755462856)]
self.assertTrue(numpy.allclose(transformed, expected))
def testPersistence(self):
fname = testfile()
model = logentropy_model.LogEntropyModel(self.corpus_ok, normalize=True)
model.save(fname)
model2 = logentropy_model.LogEntropyModel.load(fname)
self.assertTrue(model.entr == model2.entr)
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec]))
def testPersistenceCompressed(self):
fname = testfile() + '.gz'
model = logentropy_model.LogEntropyModel(self.corpus_ok, normalize=True)
model.save(fname)
model2 = logentropy_model.LogEntropyModel.load(fname, mmap=None)
self.assertTrue(model.entr == model2.entr)
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec]))
#endclass TestLogEntropyModel
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| gpl-3.0 | -7,871,270,214,023,811,000 | 42.006515 | 140 | 0.642998 | false |
sde1000/quicktill | quicktill/tillweb/urls.py | 1 | 4931 | from django.urls import include, path, re_path
from quicktill.tillweb.views import *
from .stocktake import *
tillurls = [
path('', pubroot, name="tillweb-pubroot"),
path('session/', sessionfinder, name="tillweb-sessions"),
path('session/<int:sessionid>/', include([
path('', session, name="tillweb-session"),
path('spreadsheet.ods', session_spreadsheet,
name="tillweb-session-spreadsheet"),
path('takings-by-dept.html', session_takings_by_dept,
name="tillweb-session-takings-by-dept"),
path('takings-by-user.html', session_takings_by_user,
name="tillweb-session-takings-by-user"),
path('discounts.html', session_discounts,
name="tillweb-session-discounts"),
path('stock-sold.html', session_stock_sold,
name="tillweb-session-stock-sold"),
path('transactions.html', session_transactions,
name="tillweb-session-transactions"),
path('sales-pie-chart.svg', session_sales_pie_chart,
name="tillweb-session-sales-pie-chart"),
path('users-pie-chart.svg', session_users_pie_chart,
name="tillweb-session-users-pie-chart"),
path('dept<int:dept>/', sessiondept,
name="tillweb-session-department"),
])),
path('transaction/deferred/', transactions_deferred,
name="tillweb-deferred-transactions"),
path('transaction/<int:transid>/', transaction, name="tillweb-transaction"),
path('transline/<int:translineid>/', transline, name="tillweb-transline"),
path('supplier/', supplierlist, name="tillweb-suppliers"),
path('supplier/<int:supplierid>/', supplier, name="tillweb-supplier"),
path('new/supplier/', create_supplier, name="tillweb-create-supplier"),
path('delivery/', deliverylist, name="tillweb-deliveries"),
path('delivery/<int:deliveryid>/', delivery, name="tillweb-delivery"),
path('new/delivery/', create_delivery, name="tillweb-create-delivery"),
path('stocktake/', stocktakelist, name="tillweb-stocktakes"),
path('stocktake/<int:stocktake_id>/', stocktake, name="tillweb-stocktake"),
path('stocktype/', stocktypesearch, name="tillweb-stocktype-search"),
path('stocktype/<int:stocktype_id>/', stocktype, name="tillweb-stocktype"),
path('new/stocktype/', create_stocktype, name="tillweb-create-stocktype"),
path('stocktype/search.json', stocktype_search_json,
name="tillweb-stocktype-search-json"),
path('stocktype/search-with-stockunits.json', stocktype_search_json,
name="tillweb-stocktype-search-stockunits-json",
kwargs={'include_stockunits': True}),
path('stocktype/info.json', stocktype_info_json,
name="tillweb-stocktype-info-json"),
path('stock/', stocksearch, name="tillweb-stocksearch"),
path('stock/<int:stockid>/', stock, name="tillweb-stock"),
path('unit/', units, name="tillweb-units"),
path('unit/<int:unit_id>/', unit, name="tillweb-unit"),
path('new/unit/', create_unit, name="tillweb-create-unit"),
path('stockunit/', stockunits, name="tillweb-stockunits"),
path('stockunit/<int:stockunit_id>/', stockunit, name="tillweb-stockunit"),
path('new/stockunit/', create_stockunit, name="tillweb-create-stockunit"),
path('stockline/', stocklinelist, name="tillweb-stocklines"),
path('stockline/<int:stocklineid>/', stockline, name="tillweb-stockline"),
path('plu/', plulist, name="tillweb-plus"),
path('plu/<int:pluid>/', plu, name="tillweb-plu"),
path('new/plu/', create_plu, name="tillweb-create-plu"),
path('location/', locationlist, name="tillweb-locations"),
re_path(r'^location/(?P<location>[\w\- ]+)/$', location,
name="tillweb-location"),
path('department/', departmentlist, name="tillweb-departments"),
path('department/<int:departmentid>/', department,
name="tillweb-department"),
path('department/<int:departmentid>/spreadsheet.ods', department,
{'as_spreadsheet': True}, name="tillweb-department-sheet"),
path('stockcheck/', stockcheck, name="tillweb-stockcheck"),
path('user/', userlist, name="tillweb-till-users"),
path('user/<int:userid>/', userdetail, name="tillweb-till-user"),
path('group/', grouplist, name="tillweb-till-groups"),
re_path(r'^group/(?P<groupid>[\w\- ]+)/$', group,
name="tillweb-till-group"),
path('new/group/', create_group, name="tillweb-create-till-group"),
path('logs/', logsindex, name="tillweb-logs"),
path('logs/<int:logid>', logdetail, name="tillweb-logentry"),
path('config/', configindex, name="tillweb-config-index"),
path('config/<key>/', configitem, name="tillweb-config-item"),
path('reports/', reportindex, name="tillweb-reports"),
]
urls = [
# Index page
path('', publist, name="tillweb-publist"),
re_path(r'^(?P<pubname>[\w\-]+)/', include(tillurls)),
]
| gpl-3.0 | -821,744,168,511,656,600 | 43.827273 | 80 | 0.654634 | false |
felipead/breakout | source/breakout/game/GameController.py | 1 | 4147 | from OpenGL.GL import *
from OpenGL.GLU import *
import pygame
from pygame.constants import *
from breakout.game.GameEngine import GameEngine
_FRAMES_PER_SECOND = 60
_MOUSE_VISIBLE = True
_CANVAS_WIDTH = 250
_CANVAS_HEIGHT = 300
_DEFAULT_SCREEN_WIDTH = 500
_DEFAULT_SCREEN_HEIGHT = 600
class GameController(object):
def __init__(self):
self.__engine = GameEngine(_CANVAS_WIDTH, _CANVAS_HEIGHT)
self.__screenWidth = _DEFAULT_SCREEN_WIDTH
self.__screenHeight = _DEFAULT_SCREEN_HEIGHT
def run(self):
self.__initialize()
self.__gameLoop()
def __initialize(self):
pygame.init()
pygame.mouse.set_visible(_MOUSE_VISIBLE)
pygame.display.set_mode((self.__screenWidth, self.__screenHeight), OPENGL | DOUBLEBUF)
glClearColor(0.0, 0.0, 0.0, 1.0)
glShadeModel(GL_FLAT)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glBlendEquation(GL_FUNC_ADD)
self.__handleScreenResizeEvent(self.__screenWidth, self.__screenHeight)
self.__engine.initialize()
def __gameLoop(self):
clock = pygame.time.Clock()
ticks = 0
while True:
for event in pygame.event.get():
self.__handleInputEvent(event)
milliseconds = clock.tick(_FRAMES_PER_SECOND)
ticks += 1
self.__engine.update(milliseconds, ticks)
self.__engine.display(milliseconds, ticks, self.__screenWidth, self.__screenHeight, clock.get_fps())
pygame.display.flip() # swap buffers
def __handleInputEvent(self, event):
if event.type == QUIT:
exit()
elif event.type == VIDEORESIZE:
self.__handleScreenResizeEvent(event.w, event.h)
elif event.type == MOUSEMOTION:
self.__handleMouseMoveEvent(event.pos, event.rel, event.buttons)
elif event.type == MOUSEBUTTONUP:
self.__handleMouseButtonUpEvent(event.button, event.pos)
elif event.type == MOUSEBUTTONDOWN:
self.__handleMouseButtonDownEvent(event.button, event.pos)
elif event.type == KEYUP:
self.__handleKeyUpEvent(event.key, event.mod)
elif event.type == KEYDOWN:
self.__handleKeyDownEvent(event.key, event.mod, event.unicode)
def __handleScreenResizeEvent(self, width, height):
self.__screenWidth = width
self.__screenHeight = height
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(self.__engine.canvas.left, self.__engine.canvas.right,
self.__engine.canvas.bottom, self.__engine.canvas.top)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def __handleMouseButtonUpEvent(self, button, coordinates):
mappedCoordinates = self.__mapScreenCoordinatesToCanvas(coordinates)
self.__engine.handleMouseButtonUpEvent(button, mappedCoordinates)
def __handleMouseButtonDownEvent(self, button, coordinates):
mappedCoordinates = self.__mapScreenCoordinatesToCanvas(coordinates)
self.__engine.handleMouseButtonDownEvent(button, mappedCoordinates)
def __handleMouseMoveEvent(self, absolute_coordinates, relative_coordinates, buttons):
mapped_absolute_coordinates = self.__mapScreenCoordinatesToCanvas(absolute_coordinates)
self.__engine.handleMouseMoveEvent(mapped_absolute_coordinates, relative_coordinates, buttons)
def __handleKeyUpEvent(self, key, modifiers):
self.__engine.handleKeyUpEvent(key, modifiers)
def __handleKeyDownEvent(self, key, modifiers, char):
self.__engine.handleKeyDownEvent(key, modifiers, char)
def __mapScreenCoordinatesToCanvas(self, coordinates):
horizontalCanvasToScreenRatio = self.__engine.canvas.width / float(self.__screenWidth)
verticalCanvasToScreenRatio = self.__engine.canvas.height / float(self.__screenHeight)
(x, y) = coordinates
x *= horizontalCanvasToScreenRatio
y *= verticalCanvasToScreenRatio
y = self.__engine.canvas.top - y
return x, y
| gpl-2.0 | 2,273,906,769,018,133,000 | 35.699115 | 112 | 0.661201 | false |
pampi/pad | backend.py | 1 | 26210 | #Copyright (C) 2014 Adrian "APi" Pielech
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
from PySide.QtNetwork import *
from PySide.QtCore import *
from PySide.QtGui import *
from subprocess import Popen, PIPE
class EPISODE(QListWidgetItem):
def __init__(self, parent=None, title='<Title>', value='<Value>'):
super(EPISODE, self).__init__(parent)
self.title = title
self.value = value
def setText(self, title):
self.title = title
super(EPISODE, self).setText(self.title)
def setValue(self, value):
self.value = value
def getValue(self):
return self.value
class DownloadEpisodeThread(QThread):
saveTo = None
who_am_i = 0
def __init__(self, parent, threadId):
super(DownloadEpisodeThread, self).__init__()
self.parentObject = parent
self.who_am_i = threadId
def run(self):
qNetMgr = QNetworkAccessManager()
downloadLoop = QEventLoop()
loopArg = True
item = None
p = self.parentObject
while(loopArg is True):
p.downloadMutex.tryLock(-1)
if(p.lstwToDownload.count() > 0):
item = p.lstwToDownload.takeItem(0)
p.appendLogs.emit('Zaczynam pobierać: ' + item.text())
else:
loopArg = False
item = None
if p.downloadedEps == p.mustDownloadEps:
p.btnDownload.setEnabled(True)
p.freezeSettings(True)
p.btnDownloadEpisodesList.setEnabled(True)
p.downloadMutex.unlock()
if not(item is None):
qReply = qNetMgr.get(QNetworkRequest(QUrl(item.getValue())))
if item.getValue().count('https://') > 0:
qReply.sslErrors.connect(qReply.ignoreSslErrors)
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 302:
redirURL = qReply.attribute(QNetworkRequest.RedirectionTargetAttribute)
qReply = qNetMgr.get(QNetworkRequest(QUrl(redirURL)))
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 200:
p.lblThreadArray[self.who_am_i].setText(item.text())
p.pbThreadArray[self.who_am_i].setEnabled(True)
self.saveTo = QFile(item.text())
if not self.saveTo.open(QIODevice.WriteOnly):
print('Nie moge otworzyc panie ;_;')
qReply.downloadProgress.connect(self.saveToFile)
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
p.pbThreadArray[self.who_am_i].setEnabled(False)
self.saveTo.write(qReply.readAll())
self.saveTo.close()
p.downloadMutex.tryLock(-1)
p.downloadedEps = p.downloadedEps + 1
p.pbDownloaded.setValue(p.downloadedEps)
p.appendLogs.emit(item.text() + ' pobrano!')
if p.chkbConvert.isChecked() is True:
p.lstwToConvert.addItem(item)
p.sigConvert.emit()
p.downloadMutex.unlock()
else:
p.downloadMutex.tryLock(-1)
p.appendLogs.emit('Nie udało się pobrać ' + item.text() + '! Błąd: ' + str(qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute)) + '.')
p.downloadedEps = p.downloadedEps + 1
p.pbDownloaded.setValue(p.downloadedEps)
p.downloadMutex.unlock()
def saveToFile(self, received, total):
if total != self.parentObject.pbThreadArray[self.who_am_i].maximum():
self.parentObject.pbThreadArray[self.who_am_i].setMaximum(total)
self.parentObject.pbThreadArray[self.who_am_i].setValue(received)
class Backend:
def writeLog(self, log_message):
self.logDest.setPlainText(log_message + "\n" + self.logDest.toPlainText())
def convertEpisode(self):
self.convertMutex.tryLock(-1)
self.downloadMutex.tryLock(-1)
workItem = self.lstwToConvert.takeItem(0)
self.downloadMutex.unlock()
output_file = workItem.text()[:len(workItem.text()) - 3] + self.cbOutputFormat.currentText()
file_info = Popen(['ffmpeg', '-i', workItem.text()], stderr=PIPE)
file_info.wait()
file_info = file_info.stderr.read(-1).decode('utf-8')
file_info = file_info[file_info.find('Duration:') + 10:]
file_info = file_info[:file_info.find(',')]
file_time_info = file_info.split(':')
file_time_info = file_time_info + file_time_info[2].split('.')
length = int(file_time_info[0]) * 3600 + int(file_time_info[1]) * 60 + int(file_time_info[3])
self.pbConverted.setMaximum(length)
self.pbConverted.setValue(0)
self.appendLogs.emit('Zaczynam konwertować: ' + workItem.text())
'''TO DO Start converting'''
self.convertMutex.unlock()
def getEpisodesListFromWeb(self, linkToSeries, lblSeriesName, lstItems, log):
lstItems.clear()
self.logDest = log
if len(linkToSeries) > 15:
if linkToSeries.find('animeon.pl') >= 0:
lblSeriesName.setText(self.getAnimeOnList(linkToSeries, lstItems))
elif linkToSeries.find('anime-shinden.info') >= 0:
lblSeriesName.setText(self.getAShindenList(linkToSeries, lstItems))
else:
self.writeLog("Podano URL do nieobsługiwanego serwisu!")
else:
self.writeLog("Nieprawidłowy URL do serii!")
def getVideoListFromURL(self, get_from):
ret_val = [None]
basic_filename = get_from.text()
episode_page_url = get_from.getValue()
'''print(episode_page_url)'''
if episode_page_url.find('animeon.pl') > 0:
ret_val = self.extractLinksFromAnimeOn(episode_page_url, basic_filename)
elif (episode_page_url.find('anime-shinden.info') > 0) or (episode_page_url.find('shinden-anime.info') > 0):
episode_page_url = episode_page_url.replace('shinden-anime.info', 'anime-shinden.info')
ret_val = self.extractLinksFromAShinden(episode_page_url, basic_filename)
else:
self.writeLog('Coś poszło nie tak... Nie rozpoznano serwisu anime.\nCzy przypadkiem nie bawisz się w inżynierię odwrotną?')
return ret_val
def extractLinksFromAShinden(self, link, basename):
ret_val = [None]
self.writeLog('Pobieranie i parsowanie strony ' + basename + '...')
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(link)))
downloadLoop = QEventLoop()
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.error() == 0:
if qReply.size() < 1024:
self.writeLog('Serwer zwrócił niepoprawną zawartość...')
self.writeLog(str(qReply.readAll().data()))
else:
done = 0
data = str(qReply.readAll().data())
data = data[data.find('video_tabs'):]
data = data[:data.find('<script')]
fb_count = int(data.count('http://anime-shinden.info/player/hd.php') / 2)
sibnet_count = data.count('video.sibnet.ru')
daily_count = data.count('www.dailymotion.com/embed/video')
if daily_count == 0:
daily_count = int(data.count('www.dailymotion.com/swf/video/') / 2)
data_backup = data
'''#jwplayer - fb'''
if fb_count > 0:
done = 1
fb_table = [None]
for i in range(0, fb_count):
data = data[data.find('http://anime-shinden.info/player/hd.php') + 10:]
data = data[data.find('http://anime-shinden.info/player/hd.php'):]
data = data[data.find('link=') + 5:]
vid = data[:data.find('.mp4')]
vid = 'https://www.facebook.com/video/embed?video_id=' + vid
link_to_face = self.getEmbedFacebookVideoLink(vid)
if len(link_to_face) > 0:
ep = EPISODE()
if fb_count == 1:
ep.setText(basename + ".mp4")
ep.setValue(link_to_face)
fb_table.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(link_to_face)
fb_table.append(ep)
else:
self.writeLog('Nie udało się wydobyć linku do fejsa...')
done = 0
if done == 1:
ret_val = fb_table
if (done == 0) and (sibnet_count > 0):
data = data_backup
done = 1
sib_table = [None]
for i in range(0, sibnet_count):
data = data[data.find('http://video.sibnet.ru/'):]
data = data[data.find('=') + 1:]
vid = data[:data.find('''"''')]
link_to_sib = self.getEmbedSibnetRUVideoLink(vid)
if len(link_to_sib) > 0:
ep = EPISODE()
if sibnet_count > 0:
ep.setText(basename + ".mp4")
ep.setValue(link_to_sib)
sib_table.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(link_to_sib)
fb_table.append(ep)
else:
self.writeLog('Nie udało się wydobyć linku do Sibnetu...')
done = 0
if done == 1:
ret_val = sib_table
print('Sibnet :D')
if (done == 0) and (daily_count > 0):
print('Daily lol')
data = data_backup
data = data.replace('http://www.dailymotion.com/swf/video/', 'http://www.dailymotion.com/embed/video/')
done = 1
daily_table = [None]
for i in range(0, daily_count):
data = data[data.find('http://www.dailymotion.com/embed/video/'):]
daily_temple_link = data[:data.find('''"''')]
data = data[data.find('''"'''):]
link_to_daily = self.getEmbedDailyVideoLink(daily_temple_link)
if len(link_to_daily) > 0:
ep = EPISODE()
if daily_count == 1:
ep.setText(basename + ".mp4")
ep.setValue(link_to_daily)
daily_table.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(link_to_daily)
daily_table.append(ep)
else:
self.writeLog('Nie udało się wydobyć linku do DailyMotion...')
done = 0
if done == 1:
ret_val = daily_table
if done == 0:
self.writeLog('Wybacz, nie udało mi się znaleźć linku do żadnego działającego serwisu :(')
return ret_val
def extractLinksFromAnimeOn(self, link, basename):
ret_val = [None]
self.writeLog('Pobieranie i parsowanie strony ' + basename + '...')
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(link)))
downloadLoop = QEventLoop()
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.error() == 0:
if qReply.size() < 1024:
self.writeLog('Serwer zwrócił niepoprawną zawartość...')
else:
data = str(qReply.readAll().data())
data = data[data.find('float-left player-container'):]
data = data[:data.find('float-left episode-nav')]
if data.count('<iframe src=') > 0:
counter = data.count('<iframe src=')
for i in range(0, data.count('<iframe src=')):
data = data[data.find('<iframe src='):]
data = data[data.find("""'""") + 1:]
the_link = data[:data.find("\\")]
data = data[data.find('</iframe>'):]
if the_link.find('facebook.com') > 0:
link_to_face = self.getEmbedFacebookVideoLink(the_link)
if len(link_to_face) > 0:
'''link_to_face = download'''
ep = EPISODE()
if counter == 1:
ep.setText(basename + ".mp4")
ep.setValue(link_to_face)
ret_val.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(link_to_face)
ret_val.append(ep)
else:
self.writeLog('Nie udało się wydobyć linku do fejsa...')
elif the_link.find('vk.com') > 0:
link_to_vk = self.getEmbedVKVideoLink(the_link)
if len(link_to_vk) > 0:
ep = EPISODE()
if counter == 1:
ep.setText(basename + ".mp4")
ep.setValue(link_to_vk)
ret_val.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(link_to_vk)
ret_val.append(ep)
else:
self.writeLog('Nie udało się wydobyć linku do VK...')
else:
self.writeLog('I dont know this player...')
elif data.count('<embed src=') > 0:
counter = data.count('<embed src=')
for i in range(0, data.count('<embed src=')):
data = data[data.find('<embed src='):]
data = data[data.find("""'""") + 1:]
the_link = data[:data.find("\\")]
data = data[data.find('</embed>'):]
if the_link.find('video.sibnet.ru') > 0:
the_link = the_link[the_link.find('=') + 1:]
link_to_sibnet = self.getEmbedSibnetRUVideoLink(the_link)
if len(link_to_sibnet) > 0:
ep = EPISODE()
if counter == 1:
ep.setText(basename + ".mp4")
ep.setValue(link_to_sibnet)
ret_val.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(link_to_sibnet)
ret_val.append(ep)
else:
self.writeLog('Nie udało się wydobyć linku do Sibnetu...')
else:
self.writeLog('I dont know this player...')
elif data.count('jwplayer(') > 0:
counter = data.count('jwplayer(')
for i in range(0, counter):
data = data[data.find('jwplayer('):]
data = data[data.find('http://'):]
jw_link = data[:data.find("""'""") - 1]
qReply = qNetMgr.get(QNetworkRequest(QUrl(jw_link)))
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if not ((qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 200) or (qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 302)):
jw_link = ''
if len(jw_link) > 0:
ep = EPISODE()
if counter == 1:
ep.setText(basename + ".mp4")
ep.setValue(jw_link)
ret_val.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(jw_link)
ret_val.append(ep)
else:
self.writeLog('No player found.')
return ret_val
def getEmbedDailyVideoLink(self, url):
ret_val = ''
if url.count('/swf/') > 0:
url = url.replace('/swf/', '/embed/')
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(url)))
downloadLoop = QEventLoop()
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.error() == 0:
print((qReply.size()))
data = qReply.readAll().data().decode('UTF-8')
if data.count('''"stream_h264_hd_url"''') > 0:
data = data[data.find('''"stream_h264_hd_url"'''):]
data = data[data.find('http:'):]
data = data[:data.find('''"''')]
data = data.replace("\\", '')
qReply = qNetMgr.get(QNetworkRequest(QUrl(data)))
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 302:
'''302 Found'''
ret_val = data
return ret_val
def getEmbedSibnetRUVideoLink(self, vid):
ret_val = ''
url = 'http://video.sibnet.ru/shell_config_xml.php?videoid=' + vid + '&type=video.sibnet.ru'
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(url)))
downloadLoop = QEventLoop()
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.error() == 0:
data = qReply.readAll().data().decode('UTF-8')
data = data[data.find('<file>') + 6:data.find('</file>')]
qReply = qNetMgr.get(QNetworkRequest(QUrl(data)))
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 302:
'''302 Found'''
ret_val = data
return ret_val
def getEmbedVKVideoLink(self, url):
ret_val = ''
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(url)))
downloadLoop = QEventLoop()
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.error() == 0:
data = qReply.readAll().data().decode('windows-1251')
data = data[data.find('url720=') + 7:]
data = data[:data.find('&')]
qReply = qNetMgr.get(QNetworkRequest(QUrl(data)))
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 200:
'''200 OK'''
ret_val = data
return ret_val
def getEmbedFacebookVideoLink(self, url):
ret_val = ''
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(url)))
qReply.sslErrors.connect(qReply.ignoreSslErrors)
downloadLoop = QEventLoop()
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.error() == 0:
data = qReply.readAll().data().decode('UTF-8')
if data.count('hd_src') > 0:
data = data[data.find('hd_src'):]
data = data[data.find('https'):]
data = data[:data.find('u002522') - 1]
data = data.replace("\\", "/")
data = data.replace("/u00255C", "").replace("/u00252F", "/").replace("/u00253F", "?").replace("/u00253D", "=").replace("/u002526", "&").replace("/u00253A",":")
qReply = qNetMgr.get(QNetworkRequest(QUrl(data)))
qReply.sslErrors.connect(qReply.ignoreSslErrors)
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 200:
'''200 OK'''
ret_val = data
return ret_val
def getAShindenList(self, url, items):
series_name = "-"
self.writeLog('Trwa pobieranie listy odcinków serii(A-Shinden)...')
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(url)))
loop = QEventLoop()
qReply.finished.connect(loop.quit)
loop.exec_()
if qReply.error() == 0:
if qReply.size() < 1024:
self.writeLog('Pobieranie danych o liście odcników nie powiodło się!')
else:
self.writeLog('Pobrano dane. Trwa parsowanie danych...')
data = str(qReply.readAll().data())
series_name = data[data.find('base fullstory'):]
series_name = series_name[:series_name.find('</a>')]
series_name = series_name[series_name.find('>', series_name.find('<a href=') + 7) + 1:]
series_name = series_name[:series_name.find('(') - 1]
self.writeLog('Pobierana seria: ' + series_name)
'''Extract episode list'''
'''Shrink data'''
data = data[data.find('daj online'):]
data = data[:data.find('</table>')]
data = data[data.find('<a href='):]
data = data[:data.find('</td>')]
i = data.find('<a href=')
while i >= 0:
ep = EPISODE()
ep.setValue(data[i + 9:data.find("\"", i + 9)])
data = data[data.find('>') + 1:]
ep.setText(data[:data.find('</a>')])
if data.find('<a href') >= 0:
data = data[data.find('<a href'):]
i = data.find('<a href')
if (ep.text().lower().find('odcinek') >= 0) or (ep.text().lower().find('ova') >= 0) or (ep.text().lower().find('odc') >= 0):
items.addItem(ep)
self.writeLog('Lista odcinków pobrana.')
else:
self.writeLog('Błąd połączenia. Pobieranie danych o liście odcników nie powiodło się!')
return series_name
def getAnimeOnList(self, url, items):
series_name = "-"
self.writeLog('Trwa pobieranie listy odcinków serii(AnimeOn)...')
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(url)))
loop = QEventLoop()
qReply.finished.connect(loop.quit)
loop.exec_()
if qReply.error() == 0:
if qReply.size() < 1024:
self.writeLog('Pobieranie danych o liście odcników nie powiodło się!')
else:
self.writeLog('Pobrano dane. Trwa parsowanie danych...')
data = str(qReply.readAll().data())
series_name = data[data.find('<title>') + 7: data.find(' Anime Online PL')]
data = data[data.find('episode-table') + 13:]
data = data[:data.find('</table')]
i = data.find('http://')
while i >= 0:
ep = EPISODE()
data = data[data.find('http://'):]
ep.setValue(data[:data.find('\\')])
ep.setText(data[data.find('odcinek'):data.find('</a>')])
items.addItem(ep)
data = data[data.find('</a>'):]
i = data.find('http://')
else:
self.writeLog('Błąd połączenia. Pobieranie danych o liście odcników nie powiodło się!')
return series_name
| gpl-3.0 | -7,560,106,690,500,875,000 | 44.687063 | 175 | 0.488578 | false |
N-Parsons/exercism-python | exercises/palindrome-products/palindrome_products_test.py | 1 | 3862 | """
Notes regarding the implementation of smallest_palindrome and
largest_palindrome:
Both functions must take two keyword arguments:
max_factor -- int
min_factor -- int, default 0
Their return value must be a tuple (value, factors) where value is the
palindrome itself, and factors is an iterable containing both factors of the
palindrome in arbitrary order.
"""
import unittest
from palindrome_products import smallest_palindrome, largest_palindrome
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.2.0
class PalindromeProductsTest(unittest.TestCase):
def test_smallest_palindrome_from_single_digit_factors(self):
value, factors = smallest_palindrome(min_factor=1, max_factor=9)
self.assertEqual(value, 1)
self.assertFactorsEqual(factors, {(1, 1)})
def test_largest_palindrome_from_single_digit_factors(self):
value, factors = largest_palindrome(min_factor=1, max_factor=9)
self.assertEqual(value, 9)
self.assertFactorsEqual(factors, {(1, 9), (3, 3)})
def test_smallest_palindrome_from_double_digit_factors(self):
value, factors = smallest_palindrome(min_factor=10, max_factor=99)
self.assertEqual(value, 121)
self.assertFactorsEqual(factors, {(11, 11)})
def test_largest_palindrome_from_double_digit_factors(self):
value, factors = largest_palindrome(min_factor=10, max_factor=99)
self.assertEqual(value, 9009)
self.assertFactorsEqual(factors, {(91, 99)})
def test_smallest_palindrome_from_triple_digit_factors(self):
value, factors = smallest_palindrome(min_factor=100, max_factor=999)
self.assertEqual(value, 10201)
self.assertFactorsEqual(factors, {(101, 101)})
def test_largest_palindrome_from_triple_digit_factors(self):
value, factors = largest_palindrome(min_factor=100, max_factor=999)
self.assertEqual(value, 906609)
self.assertFactorsEqual(factors, {(913, 993)})
def test_smallest_palindrome_from_four_digit_factors(self):
value, factors = smallest_palindrome(min_factor=1000, max_factor=9999)
self.assertEqual(value, 1002001)
self.assertFactorsEqual(factors, {(1001, 1001)})
def test_largest_palindrome_from_four_digit_factors(self):
value, factors = largest_palindrome(min_factor=1000, max_factor=9999)
self.assertEqual(value, 99000099)
self.assertFactorsEqual(factors, {(9901, 9999)})
def test_empty_for_smallest_palindrome_if_none_in_range(self):
value, factors = smallest_palindrome(min_factor=1002, max_factor=1003)
self.assertIsNone(value)
self.assertFactorsEqual(factors, [])
def test_empty_for_largest_palindrome_if_none_in_range(self):
value, factors = largest_palindrome(min_factor=15, max_factor=15)
self.assertIsNone(value)
self.assertFactorsEqual(factors, [])
def test_error_for_smallest_if_min_is_more_than_max(self):
with self.assertRaisesWithMessage(ValueError):
value, factors = smallest_palindrome(min_factor=10000,
max_factor=1)
def test_error_for_largest_if_min_is_more_than_max(self):
with self.assertRaisesWithMessage(ValueError):
value, factors = largest_palindrome(min_factor=2, max_factor=1)
# Utility functions
def setUp(self):
try:
self.assertRaisesRegex
except AttributeError:
self.assertRaisesRegex = self.assertRaisesRegexp
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
def assertFactorsEqual(self, actual, expected):
self.assertEqual(set(map(frozenset, actual)),
set(map(frozenset, expected)))
if __name__ == '__main__':
unittest.main()
| mit | -3,542,696,955,969,475,600 | 38.814433 | 78 | 0.685137 | false |
endlessm/chromium-browser | third_party/chromite/scripts/test_image.py | 1 | 4062 | # -*- coding: utf-8 -*-
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to mount a built image and run tests on it."""
from __future__ import print_function
import os
import sys
import unittest
from chromite.lib import constants
from chromite.lib import commandline
from chromite.lib import image_lib
from chromite.lib import image_test_lib
from chromite.lib import osutils
from chromite.lib import path_util
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
def ParseArgs(args):
"""Return parsed commandline arguments."""
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('--test_results_root', type='path',
help='Directory to store test results')
parser.add_argument('--board', type=str, help='Board (wolf, beaglebone...)')
parser.add_argument('image', type='path',
help='Image directory (or file) with mount_image.sh and '
'umount_image.sh')
parser.add_argument('-l', '--list', default=False, action='store_true',
help='List all the available tests')
parser.add_argument('tests', nargs='*', metavar='test',
help='Specific tests to run (default runs all)')
opts = parser.parse_args(args)
opts.Freeze()
return opts
def FindImage(image_path):
"""Return the path to the image file.
Args:
image_path: A path to the image file, or a directory containing the base
image.
Returns:
ImageFileAndMountScripts containing absolute paths to the image,
the mount and umount invocation commands
"""
if os.path.isdir(image_path):
# Assume base image.
image_file = os.path.join(image_path, constants.BASE_IMAGE_NAME + '.bin')
if not os.path.exists(image_file):
raise ValueError('Cannot find base image %s' % image_file)
elif os.path.isfile(image_path):
image_file = image_path
else:
raise ValueError('%s is neither a directory nor a file' % image_path)
return image_file
def main(args):
opts = ParseArgs(args)
# Build up test suites.
loader = unittest.TestLoader()
loader.suiteClass = image_test_lib.ImageTestSuite
# We use a different prefix here so that unittest DO NOT pick up the
# image tests automatically because they depend on a proper environment.
loader.testMethodPrefix = 'Test'
tests_namespace = 'chromite.cros.test.image_test'
if opts.tests:
tests = ['%s.%s' % (tests_namespace, x) for x in opts.tests]
else:
tests = (tests_namespace,)
all_tests = loader.loadTestsFromNames(tests)
# If they just want to see the lists of tests, show them now.
if opts.list:
def _WalkSuite(suite):
for test in suite:
if isinstance(test, unittest.BaseTestSuite):
for result in _WalkSuite(test):
yield result
else:
yield (test.id()[len(tests_namespace) + 1:],
test.shortDescription() or '')
test_list = list(_WalkSuite(all_tests))
maxlen = max(len(x[0]) for x in test_list)
for name, desc in test_list:
print('%-*s %s' % (maxlen, name, desc))
return
# Run them in the image directory.
runner = image_test_lib.ImageTestRunner()
runner.SetBoard(opts.board)
runner.SetResultDir(opts.test_results_root)
image_file = FindImage(opts.image)
tmp_in_chroot = path_util.FromChrootPath('/tmp')
with osutils.TempDir(base_dir=tmp_in_chroot) as temp_dir:
with image_lib.LoopbackPartitions(image_file, temp_dir) as image:
# Due to the lack of mount context, we mount the partitions
# but do not reference directly. This will be removed with the
# submission of http://crrev/c/1795578
_ = image.Mount((constants.PART_ROOT_A,))[0]
_ = image.Mount((constants.PART_STATE,))[0]
with osutils.ChdirContext(temp_dir):
result = runner.run(all_tests)
if result and not result.wasSuccessful():
return 1
return 0
| bsd-3-clause | -3,099,706,782,971,267,600 | 32.295082 | 79 | 0.670852 | false |
OCA/contract | contract_variable_quantity/models/contract_line.py | 1 | 2127 | # Copyright 2016 Tecnativa - Pedro M. Baeza
# Copyright 2018 Tecnativa - Carlos Dauden
# Copyright 2018 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, models
from odoo.tools import float_is_zero
from odoo.tools.safe_eval import safe_eval
class AccountAnalyticInvoiceLine(models.Model):
_inherit = 'contract.line'
@api.multi
def _get_quantity_to_invoice(
self, period_first_date, period_last_date, invoice_date
):
quantity = super(
AccountAnalyticInvoiceLine, self
)._get_quantity_to_invoice(
period_first_date, period_last_date, invoice_date
)
if not period_first_date or not period_last_date or not invoice_date:
return quantity
if self.qty_type == 'variable':
eval_context = {
'env': self.env,
'context': self.env.context,
'user': self.env.user,
'line': self,
'quantity': quantity,
'period_first_date': period_first_date,
'period_last_date': period_last_date,
'invoice_date': invoice_date,
'contract': self.contract_id,
}
safe_eval(
self.qty_formula_id.code.strip(),
eval_context,
mode="exec",
nocopy=True,
) # nocopy for returning result
quantity = eval_context.get('result', 0)
return quantity
@api.multi
def _prepare_invoice_line(self, invoice_id=False, invoice_values=False):
vals = super(AccountAnalyticInvoiceLine, self)._prepare_invoice_line(
invoice_id=invoice_id, invoice_values=invoice_values,
)
if (
'quantity' in vals
and self.contract_id.skip_zero_qty
and float_is_zero(
vals['quantity'],
self.env['decimal.precision'].precision_get(
'Product Unit of Measure'
),
)
):
vals = {}
return vals
| agpl-3.0 | 1,996,008,599,080,476,700 | 33.306452 | 77 | 0.5496 | false |
justanr/flask-plugins | tests/pluginmanager_tests.py | 1 | 4662 |
import unittest
from flask import Flask
from flask.ext.plugins import PluginManager, PluginError, get_plugins_list, \
get_plugin
class InitializationTestCase(unittest.TestCase):
def setUp(self):
self.app = Flask(__name__)
self.app.config['TESTING'] = True
def test_init_app(self):
plugin_manager = PluginManager()
plugin_manager.init_app(self.app)
self.assertIsInstance(plugin_manager, PluginManager)
def test_class_init(self):
plugin_manager = PluginManager(self.app)
self.assertIsInstance(plugin_manager, PluginManager)
class PluginManagerTests(unittest.TestCase):
def setUp(self):
self.app = Flask(__name__)
self.app.config['TESTING'] = True
self.plugin_manager = PluginManager()
self.plugin_manager.init_app(self.app)
def test_find_plugins(self):
found_plugins = self.plugin_manager.find_plugins()
self.assertEqual(len(found_plugins), 3)
expected_plugins = ['TestOnePlugin', 'TestTwoPlugin', 'TestThreePlugin']
self.assertEquals(sorted(found_plugins), sorted(expected_plugins))
def test_load_plugins(self):
self.plugin_manager._plugins = None
self.assertEquals(self.plugin_manager._plugins, None)
self.plugin_manager.load_plugins()
self.assertEquals(
sorted(self.plugin_manager.plugins.keys()), ["test1", "test2"]
)
self.assertEquals(
sorted(self.plugin_manager.all_plugins.keys()),
["test1", "test2", "test3"]
)
class PluginManagerGetPlugins(unittest.TestCase):
def setUp(self):
self.app = Flask(__name__)
self.app.config['TESTING'] = True
self.plugin_manager = PluginManager()
self.plugin_manager.init_app(self.app)
def test_get_plugins_list(self):
with self.app.test_request_context():
plugins = get_plugins_list()
self.assertEquals(
set(plugins),
set(self.plugin_manager.plugins.values())
)
def test_get_plugin(self):
with self.app.test_request_context():
plugin = get_plugin("test1")
self.assertEquals(plugin, self.plugin_manager.plugins["test1"])
class PluginManagerOtherDirectoryTests(unittest.TestCase):
def setUp(self):
self.app = Flask(__name__)
self.app.config['TESTING'] = True
self.plugin_manager = PluginManager()
def test_wrong_plugin(self):
# should raise an exception because the plugin in the "plugs" folder
# have set the __plugin__ variable not correctly.
with self.assertRaises(PluginError):
self.plugin_manager.init_app(self.app, plugin_folder="plugs")
class PluginManagerOnePluginTests(unittest.TestCase):
"""Tests the plugin.setup(), plugin.enabled()... methods"""
def setUp(self):
self.app = Flask(__name__)
self.app.config['TESTING'] = True
self.plugin_manager = PluginManager()
self.plugin_manager.init_app(self.app)
self.plugin_manager.load_plugins()
def test_plugin_setup(self):
plugin = self.plugin_manager.plugins["test1"]
plugin.setup()
self.assertTrue(plugin.setup_called)
def test_plugin_install(self):
plugin = self.plugin_manager.plugins["test1"]
plugin.install()
self.assertTrue(plugin.install_called)
def test_plugin_uninstall(self):
plugin = self.plugin_manager.plugins["test1"]
plugin.uninstall()
self.assertTrue(plugin.uninstall_called)
class PluginManagerManagementMethodsTests(unittest.TestCase):
def setUp(self):
self.app = Flask(__name__)
self.app.config['TESTING'] = True
self.plugin_manager = PluginManager()
self.plugin_manager.init_app(self.app)
# Call load_plugins to reload them without calling setup_plugins()
self.plugin_manager.load_plugins()
def test_setup_plugins(self):
plugin = self.plugin_manager.plugins["test1"]
self.assertFalse(plugin.setup_called)
self.plugin_manager.setup_plugins()
self.assertTrue(plugin.setup_called)
def test_install_plugins(self):
plugin = self.plugin_manager.plugins["test1"]
self.assertFalse(plugin.install_called)
self.plugin_manager.install_plugins()
self.assertTrue(plugin.install_called)
def test_uninstall_plugins(self):
plugin = self.plugin_manager.plugins["test1"]
self.assertFalse(plugin.uninstall_called)
self.plugin_manager.uninstall_plugins()
self.assertTrue(plugin.uninstall_called)
| bsd-3-clause | -858,463,977,783,038,500 | 29.874172 | 80 | 0.649292 | false |
fusionbox/satchless | examples/demo/core/views.py | 1 | 1250 | # -*- coding:utf-8 -*-
from django.contrib import messages
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from django.utils.translation import ugettext_lazy as _
from satchless.order.app import order_app
def home_page(request):
messages.success(request, _(u'<strong>Welcome!</strong> This is a demo shop built on Satchless. Enjoy!'))
return TemplateResponse(request, 'core/home_page.html')
def thank_you_page(request, order_token):
order = order_app.get_order(request, order_token)
if not order.status in ('payment-failed', 'payment-complete', 'delivery'):
return redirect(order_app.reverse('satchless-order-view',
args=(order.token,)))
if order.status == 'payment-failed':
return redirect('payment-failed', order_token=order.token)
return TemplateResponse(request, 'satchless/checkout/thank_you.html', {
'order': order,
})
def payment_failed(request, order_token):
order = order_app.get_order(request, order_token)
if order.status != 'payment-failed':
return redirect(order)
return TemplateResponse(request, 'satchless/checkout/payment_failed.html', {
'order': order,
})
| bsd-3-clause | -7,441,253,123,787,888,000 | 36.878788 | 109 | 0.6872 | false |
Robbie1977/AlignmentPipe | align/settings.py | 1 | 10620 | import psycopg2, os
# import subprocess
from socket import gethostname
host = gethostname()
con = psycopg2.connect(host='bocian.inf.ed.ac.uk', database='alignment', user='aligner_admin', password='default99')
cur = con.cursor()
cur.execute("SELECT upload_dir FROM system_server WHERE host_name like '" + host + "'")
record = cur.fetchone()
if record == None:
print 'Missing server settings for ' + str(host)
cur.execute("SELECT upload_dir, host_name FROM system_server")
record = cur.fetchone()
print 'Having to use settings for ' + str(record[1])
host = str(record[1])
uploadfolder = str(record[0])
cur.close()
con.close()
del cur, con, record
# Use to reset file permission only if error occurs
# for file in os.listdir(uploadfolder):
# try:
# # os.chmod(uploadfolder + file, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
# subprocess.call(['chmod', '0777', uploadfolder + file])
# print 'OK: ' + file
# except:
# print 'Error: ' + file
#
# Django settings for align project.
# DEBUG = True
# TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Robert Court', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'django_mongodb_engine'.
'NAME': 'alignment', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'aligner_admin',
'PASSWORD': 'default99',
'HOST': 'bocian.inf.ed.ac.uk', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost', '127.0.0.1', 'bocian.inf.ed.ac.uk', 'vfbaligner.inf.ed.ac.uk']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
# SITE_ID=u'5395bb746c132991c57933f6'
SITE_ID=1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = '/disk/data/VFB/aligner/uploads/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = '/disk/data/VFB/aligner/static/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'dv16bwh3f1x%p9csb3o7l9k#o8d_oqp-)aa=afq%yj+2$s96_('
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
LOGIN_URL = '/'
LOGIN_REDIRECT_URL = '/'
LOGIN_ERROR_URL = '/'
SOCIAL_AUTH_COMPLETE_URL_NAME = 'socialauth_complete'
SOCIAL_AUTH_ASSOCIATE_URL_NAME = 'socialauth_associate_complete'
SOCIAL_AUTH_GITHUB_KEY = 'e8bfae9142f86f36b391'
SOCIAL_AUTH_GITHUB_SECRET = 'b7617cf006cace2e60d90f089816924e0eabbd0f'
SOCIAL_AUTH_GITHUB_SCOPE = ['user:email']
# SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '884257168498-3gec80pdfpullsaeavbg2nqra3aflru5.apps.googleusercontent.com'
# SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'pvsqhFUx1kmBiGlVWERy_Q-b'
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ.get(
'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY',
'884257168498-3gec80pdfpullsaeavbg2nqra3aflru5.apps.googleusercontent.com'
)
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ.get(
'SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET',
'pvsqhFUx1kmBiGlVWERy_Q-b'
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
# MIDDLEWARE_CLASSES = (
# 'django.middleware.common.CommonMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
# 'social.apps.django_app.middleware.SocialAuthExceptionMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# # 'permission_backend_nonrel.backends.NonrelPermissionBackend'
# 'django.contrib.messages.middleware.MessageMiddleware',
# # Uncomment the next line for simple clickjacking protection:
# # 'django.middleware.clickjacking.XFrameOptionsMiddleware',
# )
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social.apps.django_app.middleware.SocialAuthExceptionMiddleware',
)
ROOT_URLCONF = 'urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'align.wsgi.application'
TEMPLATE_DIRS = (
"/disk/data/VFB/aligner/AlignmentPipe/align/images/templates/admin_copies" ,
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# AUTHENTICATION_BACKENDS = (
# 'permission_backend_nonrel.backends.NonrelPermissionBackend',
# )
# AUTHENTICATION_BACKENDS = (
# 'social.backends.google.GoogleOAuth2',
# 'social.backends.google.GooglePlusAuth',
# 'django.contrib.auth.backends.ModelBackend',
# )
# AUTHENTICATION_BACKENDS = (
# 'social.backends.open_id.OpenIdAuth',
# 'social.backends.google.GoogleOpenId',
# 'social.backends.google.GoogleOAuth2',
# 'social.backends.google.GoogleOAuth',
# 'social.backends.twitter.TwitterOAuth',
# 'social.backends.yahoo.YahooOpenId',
# 'django.contrib.auth.backends.ModelBackend',
# )
AUTHENTICATION_BACKENDS = (
# 'social.backends.facebook.FacebookOAuth2',
'social.backends.google.GoogleOAuth2',
'social.backends.google.GooglePlusAuth',
# 'social.backends.twitter.TwitterOAuth',
'django.contrib.auth.backends.ModelBackend',
'social.backends.github.GithubOAuth2',
# 'social_auth.backends.contrib.linkedin.LinkedinBackend',
)
# SOCIAL_AUTH_USER_MODEL = 'users.User'
SOCIAL_AUTH_ADMIN_USER_SEARCH_FIELDS = ['username', 'first_name', 'email']
LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_AUTHENTICATION_BACKENDS = (
'social.backends.open_id.OpenIdAuth',
'social.backends.google.GoogleOpenId',
'social.backends.google.GoogleOAuth2',
'social.backends.google.GoogleOAuth',
'social.backends.google.GooglePlusAuth',
# 'social.backends.twitter.TwitterOAuth',
# 'social.backends.yahoo.YahooOpenId',
'social.backends.github.GithubOAuth2',
# 'social_auth.backends.contrib.linkedin.LinkedinBackend',
)
INSTALLED_APPS = (
'adminactions',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django_mongodb_engine',
'django.db.backends.postgresql_psycopg2',
# 'djangotoolbox',
# 'permission_backend_nonrel',
'system',
'images',
'users',
'bootstrap3',
'images.templatetags.images_extras',
'users.templatetags.backend_utils',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'social.apps.django_app.default',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| mit | -6,374,926,531,072,543,000 | 34.165563 | 146 | 0.706874 | false |
apel/rest | api/tests/test_cloud_record_summary_get.py | 1 | 9827 | """This module tests GET requests to the Cloud Sumamry Record endpoint."""
import logging
import MySQLdb
from api.utils.TokenChecker import TokenChecker
from django.core.urlresolvers import reverse
from django.test import Client, TestCase
from mock import patch
QPATH_TEST = '/tmp/django-test/'
class CloudRecordSummaryGetTest(TestCase):
"""Tests GET requests to the Cloud Sumamry Record endpoint."""
def setUp(self):
"""Prevent logging from appearing in test output."""
logging.disable(logging.CRITICAL)
@patch.object(TokenChecker, 'valid_token_to_id')
def test_cloud_record_summary_get_IAM_fail(self, mock_valid_token_to_id):
"""
Test what happens if we fail to contact the IAM.
i.e, _token_to_id returns None
IAM = Identity and Access Management
"""
# Mock the functionality of the IAM
# Used in the underlying GET method
# Simulates a failure to translate a token to an ID
mock_valid_token_to_id.return_value = None
with self.settings(ALLOWED_FOR_GET='TestService'):
# Make (and check) the GET request
self._check_summary_get(401,
options=("?group=TestGroup"
"&from=20000101&to=20191231"),
authZ_header_cont="Bearer TestToken")
@patch.object(TokenChecker, 'valid_token_to_id')
def test_cloud_record_summary_get_400(self, mock_valid_token_to_id):
"""Test a GET request without the from field."""
# Mock the functionality of the IAM
# Simulates the translation of a token to an ID
# Used in the underlying GET method
mock_valid_token_to_id.return_value = 'TestService'
with self.settings(ALLOWED_FOR_GET='TestService'):
# Make (and check) the GET request
self._check_summary_get(400, options="?group=TestGroup",
authZ_header_cont="Bearer TestToken")
@patch.object(TokenChecker, 'valid_token_to_id')
def test_cloud_record_summary_get_403(self, mock_valid_token_to_id):
"""Test an unauthorized service cannot make a GET request."""
# Mock the functionality of the IAM
# Simulates the translation of a token to an unauthorized ID
# Used in the underlying GET method
mock_valid_token_to_id.return_value = 'FakeService'
with self.settings(ALLOWED_FOR_GET='TestService'):
# Make (and check) the GET request
self._check_summary_get(403,
options=("?group=TestGroup"
"&from=20000101&to=20191231"),
authZ_header_cont="Bearer TestToken")
def test_cloud_record_summary_get_401(self):
"""Test an unauthenticated GET request."""
# Test without the HTTP_AUTHORIZATION header
# Make (and check) the GET request
self._check_summary_get(401,
options=("?group=TestGroup"
"&from=20000101&to=20191231"))
# Test with a malformed HTTP_AUTHORIZATION header
# Make (and check) the GET request
self._check_summary_get(401,
options=("?group=TestGroup"
"&from=20000101&to=20191231"),
authZ_header_cont="TestToken")
@patch.object(TokenChecker, 'valid_token_to_id')
def test_cloud_record_summary_get_200(self, mock_valid_token_to_id):
"""Test a successful GET request."""
# Connect to database
database = self._connect_to_database()
# Clean up any lingering example data.
self._clear_database(database)
# Add example data
self._populate_database(database)
# Mock the functionality of the IAM
mock_valid_token_to_id.return_value = 'TestService'
expected_response = ('{'
'"count":2,'
'"next":null,'
'"previous":null,'
'"results":[{'
'"WallDuration":86399,'
'"Year":2016,'
'"Day":30,'
'"Month":7'
'},{'
'"WallDuration":43200,'
'"Year":2016,'
'"Day":31,'
'"Month":7}]}')
with self.settings(ALLOWED_FOR_GET='TestService',
RETURN_HEADERS=["WallDuration",
"Day",
"Month",
"Year"]):
try:
self._check_summary_get(200,
expected_response=expected_response,
options=("?group=TestGroup"
"&from=20000101&to=20191231"),
authZ_header_cont="Bearer TestToken")
finally:
# Clean up after test.
self._clear_database(database)
database.close()
def tearDown(self):
"""Delete any messages under QPATH and re-enable logging.INFO."""
logging.disable(logging.NOTSET)
def _check_summary_get(self, expected_status, expected_response=None,
options='', authZ_header_cont=None):
"""Helper method to make a GET request."""
test_client = Client()
# Form the URL to make the GET request to
url = ''.join((reverse('CloudRecordSummaryView'), options))
if authZ_header_cont is not None:
# If content for a HTTP_AUTHORIZATION has been provided,
# make the GET request with the appropriate header
response = test_client.get(url,
HTTP_AUTHORIZATION=authZ_header_cont)
else:
# Otherise, make a GET request without a HTTP_AUTHORIZATION header
response = test_client.get(url)
# Check the expected response code has been received.
self.assertEqual(response.status_code, expected_status)
if expected_response is not None:
# Check the response received is as expected.
self.assertEqual(response.content, expected_response)
def _populate_database(self, database):
"""Populate the database with example summaries."""
cursor = database.cursor()
# Insert example usage data
cursor.execute('INSERT INTO CloudRecords '
'(VMUUID, SiteID, GlobalUserNameID, VOID, '
'VOGroupID, VORoleID, Status, StartTime, '
'SuspendDuration, WallDuration, PublisherDNID, '
'CloudType, ImageId, '
'CloudComputeServiceID) '
'VALUES '
'("TEST-VM", 1, 1, 1, 1, 1, "Running", '
'"2016-07-30 00:00:00", 0, 86399, 1, "TEST", "1", '
'1);')
# Insert example usage data
cursor.execute('INSERT INTO CloudRecords '
'(VMUUID, SiteID, GlobalUserNameID, VOID, '
'VOGroupID, VORoleID, Status, StartTime, '
'SuspendDuration, WallDuration, PublisherDNID, '
'CloudType, ImageId, '
'CloudComputeServiceID) '
'VALUES '
'("TEST-VM", 1, 1, 1, 1, 1, "Running", '
'"2016-07-30 00:00:00", 0, 129599, 1, "TEST", "1", '
'1);')
# These INSERT statements are needed
# because we query VCloudSummaries
cursor.execute('INSERT INTO Sites VALUES (1, "TestSite");')
cursor.execute('INSERT INTO VOs VALUES (1, "TestVO");')
cursor.execute('INSERT INTO VOGroups VALUES (1, "TestGroup");')
cursor.execute('INSERT INTO VORoles VALUES (1, "TestRole");')
cursor.execute('INSERT INTO DNs VALUES (1, "TestDN");')
cursor.execute('INSERT INTO CloudComputeServices '
'VALUES (1, "TestService");')
# Summarise example usage data
cursor.execute('CALL SummariseVMs();')
database.commit()
def _clear_database(self, database):
"""Clear the database of example data."""
cursor = database.cursor()
cursor.execute('DELETE FROM CloudRecords '
'WHERE VMUUID="TEST-VM";')
cursor.execute('DELETE FROM CloudSummaries '
'WHERE CloudType="TEST";')
cursor.execute('DELETE FROM Sites '
'WHERE id=1;')
cursor.execute('DELETE FROM VOs '
'WHERE id=1;')
cursor.execute('DELETE FROM VOGroups '
'WHERE id=1;')
cursor.execute('DELETE FROM VORoles '
'WHERE id=1;')
cursor.execute('DELETE FROM DNs '
'WHERE id=1;')
cursor.execute('DELETE FROM CloudComputeServices '
'WHERE id=1;')
database.commit()
def _connect_to_database(self,
host='localhost',
user='root',
password='',
name='apel_rest'):
"""Connect to and return a cursor to the given database."""
database = MySQLdb.connect(host, user, password, name)
return database
| apache-2.0 | -8,940,463,494,486,466,000 | 40.817021 | 79 | 0.51908 | false |
leighpauls/k2cro4 | tools/swarm_client/tests/isolate_test.py | 1 | 28440 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import hashlib
import logging
import os
import sys
import tempfile
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
import isolate
# Create shortcuts.
from isolate import KEY_TOUCHED, KEY_TRACKED, KEY_UNTRACKED
def _size(*args):
return os.stat(os.path.join(ROOT_DIR, *args)).st_size
def _sha1(*args):
with open(os.path.join(ROOT_DIR, *args), 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
class IsolateTest(unittest.TestCase):
def setUp(self):
super(IsolateTest, self).setUp()
# Everything should work even from another directory.
os.chdir(os.path.dirname(ROOT_DIR))
def test_load_isolate_for_flavor_empty(self):
content = "{}"
command, infiles, touched, read_only = isolate.load_isolate_for_flavor(
content, isolate.get_flavor())
self.assertEqual([], command)
self.assertEqual([], infiles)
self.assertEqual([], touched)
self.assertEqual(None, read_only)
def test_isolated_load_empty(self):
values = {
}
expected = {
'command': [],
'files': {},
'os': isolate.get_flavor(),
}
self.assertEqual(expected, isolate.IsolatedFile.load(values).flatten())
def test_isolated_load(self):
values = {
'command': 'maybe',
'files': {'foo': 42},
'read_only': 2,
}
expected = {
'command': 'maybe',
'files': {'foo': 42},
'os': isolate.get_flavor(),
'read_only': 2,
}
self.assertEqual(expected, isolate.IsolatedFile.load(values).flatten())
def test_isolated_load_unexpected(self):
values = {
'foo': 'bar',
}
expected = (
("Found unexpected entry {'foo': 'bar'} while constructing an "
"object IsolatedFile"),
{'foo': 'bar'},
'IsolatedFile')
try:
isolate.IsolatedFile.load(values)
self.fail()
except ValueError, e:
self.assertEqual(expected, e.args)
def test_savedstate_load_empty(self):
values = {
}
expected = {
'variables': {},
}
self.assertEqual(expected, isolate.SavedState.load(values).flatten())
def test_savedstate_load(self):
values = {
'isolate_file': os.path.join(ROOT_DIR, 'maybe'),
'variables': {'foo': 42},
}
expected = {
'isolate_file': os.path.join(ROOT_DIR, 'maybe'),
'variables': {'foo': 42},
}
self.assertEqual(expected, isolate.SavedState.load(values).flatten())
def test_unknown_key(self):
try:
isolate.verify_variables({'foo': [],})
self.fail()
except AssertionError:
pass
def test_unknown_var(self):
try:
isolate.verify_condition({'variables': {'foo': [],}})
self.fail()
except AssertionError:
pass
def test_union(self):
value1 = {
'a': set(['A']),
'b': ['B', 'C'],
'c': 'C',
}
value2 = {
'a': set(['B', 'C']),
'b': [],
'd': set(),
}
expected = {
'a': set(['A', 'B', 'C']),
'b': ['B', 'C'],
'c': 'C',
'd': set(),
}
self.assertEqual(expected, isolate.union(value1, value2))
def test_eval_content(self):
try:
# Intrinsics are not available.
isolate.eval_content('map(str, [1, 2])')
self.fail()
except NameError:
pass
def test_load_isolate_as_config_empty(self):
self.assertEqual({}, isolate.load_isolate_as_config(
{}, None, []).flatten())
def test_load_isolate_as_config(self):
value = {
'variables': {
KEY_TRACKED: ['a'],
KEY_UNTRACKED: ['b'],
KEY_TOUCHED: ['touched'],
},
'conditions': [
['OS=="atari"', {
'variables': {
KEY_TRACKED: ['c', 'x'],
KEY_UNTRACKED: ['d'],
KEY_TOUCHED: ['touched_a'],
'command': ['echo', 'Hello World'],
'read_only': True,
},
}, { # else
'variables': {
KEY_TRACKED: ['e', 'x'],
KEY_UNTRACKED: ['f'],
KEY_TOUCHED: ['touched_e'],
'command': ['echo', 'You should get an Atari'],
},
}],
['OS=="amiga"', {
'variables': {
KEY_TRACKED: ['g'],
'read_only': False,
},
}],
['OS=="dendy"', {
}],
['OS=="coleco"', {
}, { # else
'variables': {
KEY_UNTRACKED: ['h'],
'read_only': None,
},
}],
],
}
expected = {
'amiga': {
'command': ['echo', 'You should get an Atari'],
KEY_TOUCHED: ['touched', 'touched_e'],
KEY_TRACKED: ['a', 'e', 'g', 'x'],
KEY_UNTRACKED: ['b', 'f', 'h'],
'read_only': False,
},
'atari': {
'command': ['echo', 'Hello World'],
KEY_TOUCHED: ['touched', 'touched_a'],
KEY_TRACKED: ['a', 'c', 'x'],
KEY_UNTRACKED: ['b', 'd', 'h'],
'read_only': True,
},
'coleco': {
'command': ['echo', 'You should get an Atari'],
KEY_TOUCHED: ['touched', 'touched_e'],
KEY_TRACKED: ['a', 'e', 'x'],
KEY_UNTRACKED: ['b', 'f'],
},
'dendy': {
'command': ['echo', 'You should get an Atari'],
KEY_TOUCHED: ['touched', 'touched_e'],
KEY_TRACKED: ['a', 'e', 'x'],
KEY_UNTRACKED: ['b', 'f', 'h'],
},
}
self.assertEqual(
expected, isolate.load_isolate_as_config(value, None, []).flatten())
def test_load_isolate_as_config_duplicate_command(self):
value = {
'variables': {
'command': ['rm', '-rf', '/'],
},
'conditions': [
['OS=="atari"', {
'variables': {
'command': ['echo', 'Hello World'],
},
}],
],
}
try:
isolate.load_isolate_as_config(value, None, [])
self.fail()
except AssertionError:
pass
def test_load_isolate_as_config_no_condition(self):
value = {
'variables': {
KEY_TRACKED: ['a'],
KEY_UNTRACKED: ['b'],
},
}
expected = {
KEY_TRACKED: ['a'],
KEY_UNTRACKED: ['b'],
}
actual = isolate.load_isolate_as_config(value, None, [])
# Flattening the whole config will discard 'None'.
self.assertEqual({}, actual.flatten())
self.assertEqual([None], actual.per_os.keys())
# But the 'None' value is still available as a backup.
self.assertEqual(expected, actual.per_os[None].flatten())
def test_invert_map(self):
value = {
'amiga': {
'command': ['echo', 'You should get an Atari'],
KEY_TOUCHED: ['touched', 'touched_e'],
KEY_TRACKED: ['a', 'e', 'g', 'x'],
KEY_UNTRACKED: ['b', 'f', 'h'],
'read_only': False,
},
'atari': {
'command': ['echo', 'Hello World'],
KEY_TOUCHED: ['touched', 'touched_a'],
KEY_TRACKED: ['a', 'c', 'x'],
KEY_UNTRACKED: ['b', 'd', 'h'],
'read_only': True,
},
'coleco': {
'command': ['echo', 'You should get an Atari'],
KEY_TOUCHED: ['touched', 'touched_e'],
KEY_TRACKED: ['a', 'e', 'x'],
KEY_UNTRACKED: ['b', 'f'],
},
'dendy': {
'command': ['echo', 'You should get an Atari'],
KEY_TOUCHED: ['touched', 'touched_e'],
KEY_TRACKED: ['a', 'e', 'x'],
KEY_UNTRACKED: ['b', 'f', 'h'],
},
}
expected_values = {
'command': {
('echo', 'Hello World'): set(['atari']),
('echo', 'You should get an Atari'): set(['amiga', 'coleco', 'dendy']),
},
KEY_TRACKED: {
'a': set(['amiga', 'atari', 'coleco', 'dendy']),
'c': set(['atari']),
'e': set(['amiga', 'coleco', 'dendy']),
'g': set(['amiga']),
'x': set(['amiga', 'atari', 'coleco', 'dendy']),
},
KEY_UNTRACKED: {
'b': set(['amiga', 'atari', 'coleco', 'dendy']),
'd': set(['atari']),
'f': set(['amiga', 'coleco', 'dendy']),
'h': set(['amiga', 'atari', 'dendy']),
},
KEY_TOUCHED: {
'touched': set(['amiga', 'atari', 'coleco', 'dendy']),
'touched_a': set(['atari']),
'touched_e': set(['amiga', 'coleco', 'dendy']),
},
'read_only': {
None: set(['coleco', 'dendy']),
False: set(['amiga']),
True: set(['atari']),
},
}
expected_oses = set(['amiga', 'atari', 'coleco', 'dendy'])
actual_values, actual_oses = isolate.invert_map(value)
self.assertEqual(expected_values, actual_values)
self.assertEqual(expected_oses, actual_oses)
def test_reduce_inputs(self):
values = {
'command': {
('echo', 'Hello World'): set(['atari']),
('echo', 'You should get an Atari'): set(['amiga', 'coleco', 'dendy']),
},
KEY_TRACKED: {
'a': set(['amiga', 'atari', 'coleco', 'dendy']),
'c': set(['atari']),
'e': set(['amiga', 'coleco', 'dendy']),
'g': set(['amiga']),
'x': set(['amiga', 'atari', 'coleco', 'dendy']),
},
KEY_UNTRACKED: {
'b': set(['amiga', 'atari', 'coleco', 'dendy']),
'd': set(['atari']),
'f': set(['amiga', 'coleco', 'dendy']),
'h': set(['amiga', 'atari', 'dendy']),
},
KEY_TOUCHED: {
'touched': set(['amiga', 'atari', 'coleco', 'dendy']),
'touched_a': set(['atari']),
'touched_e': set(['amiga', 'coleco', 'dendy']),
},
'read_only': {
None: set(['coleco', 'dendy']),
False: set(['amiga']),
True: set(['atari']),
},
}
oses = set(['amiga', 'atari', 'coleco', 'dendy'])
expected_values = {
'command': {
('echo', 'Hello World'): set(['atari']),
('echo', 'You should get an Atari'): set(['!atari']),
},
KEY_TRACKED: {
'a': set([None]),
'c': set(['atari']),
'e': set(['!atari']),
'g': set(['amiga']),
'x': set([None]),
},
KEY_UNTRACKED: {
'b': set([None]),
'd': set(['atari']),
'f': set(['!atari']),
'h': set(['!coleco']),
},
KEY_TOUCHED: {
'touched': set([None]),
'touched_a': set(['atari']),
'touched_e': set(['!atari']),
},
'read_only': {
None: set(['coleco', 'dendy']),
False: set(['amiga']),
True: set(['atari']),
},
}
actual_values, actual_oses = isolate.reduce_inputs(values, oses)
self.assertEqual(expected_values, actual_values)
self.assertEqual(oses, actual_oses)
def test_reduce_inputs_merge_subfolders_and_files(self):
values = {
'command': {},
KEY_TRACKED: {
'folder/tracked_file': set(['win']),
'folder_helper/tracked_file': set(['win']),
},
KEY_UNTRACKED: {
'folder/': set(['linux', 'mac', 'win']),
'folder/subfolder/': set(['win']),
'folder/untracked_file': set(['linux', 'mac', 'win']),
'folder_helper/': set(['linux']),
},
KEY_TOUCHED: {
'folder/touched_file': set (['win']),
'folder/helper_folder/deep_file': set(['win']),
'folder_helper/touched_file1': set (['mac', 'win']),
'folder_helper/touched_file2': set (['linux']),
},
}
oses = set(['linux', 'mac', 'win'])
expected_values = {
'command': {},
KEY_TRACKED: {
'folder_helper/tracked_file': set(['win']),
},
KEY_UNTRACKED: {
'folder/': set([None]),
'folder_helper/': set(['linux']),
},
KEY_TOUCHED: {
'folder_helper/touched_file1': set (['!linux']),
},
'read_only': {},
}
actual_values, actual_oses = isolate.reduce_inputs(values, oses)
self.assertEqual(expected_values, actual_values)
self.assertEqual(oses, actual_oses)
def test_reduce_inputs_take_strongest_dependency(self):
values = {
'command': {
('echo', 'Hello World'): set(['atari']),
('echo', 'You should get an Atari'): set(['amiga', 'coleco', 'dendy']),
},
KEY_TRACKED: {
'a': set(['amiga', 'atari', 'coleco', 'dendy']),
'b': set(['amiga', 'atari', 'coleco']),
},
KEY_UNTRACKED: {
'c': set(['amiga', 'atari', 'coleco', 'dendy']),
'd': set(['amiga', 'coleco', 'dendy']),
},
KEY_TOUCHED: {
'a': set(['amiga', 'atari', 'coleco', 'dendy']),
'b': set(['atari', 'coleco', 'dendy']),
'c': set(['amiga', 'atari', 'coleco', 'dendy']),
'd': set(['atari', 'coleco', 'dendy']),
},
}
oses = set(['amiga', 'atari', 'coleco', 'dendy'])
expected_values = {
'command': {
('echo', 'Hello World'): set(['atari']),
('echo', 'You should get an Atari'): set(['!atari']),
},
KEY_TRACKED: {
'a': set([None]),
'b': set(['!dendy']),
},
KEY_UNTRACKED: {
'c': set([None]),
'd': set(['!atari']),
},
KEY_TOUCHED: {
'b': set(['dendy']),
'd': set(['atari']),
},
'read_only': {},
}
actual_values, actual_oses = isolate.reduce_inputs(values, oses)
self.assertEqual(expected_values, actual_values)
self.assertEqual(oses, actual_oses)
def test_convert_map_to_isolate_dict(self):
values = {
'command': {
('echo', 'Hello World'): set(['atari']),
('echo', 'You should get an Atari'): set(['!atari']),
},
KEY_TRACKED: {
'a': set([None]),
'c': set(['atari']),
'e': set(['!atari']),
'g': set(['amiga']),
'x': set([None]),
},
KEY_UNTRACKED: {
'b': set([None]),
'd': set(['atari']),
'f': set(['!atari']),
'h': set(['!coleco']),
},
KEY_TOUCHED: {
'touched': set([None]),
'touched_a': set(['atari']),
'touched_e': set(['!atari']),
},
'read_only': {
None: set(['coleco', 'dendy']),
False: set(['amiga']),
True: set(['atari']),
},
}
oses = set(['amiga', 'atari', 'coleco', 'dendy'])
expected = {
'variables': {
KEY_TRACKED: ['a', 'x'],
KEY_UNTRACKED: ['b'],
KEY_TOUCHED: ['touched'],
},
'conditions': [
['OS=="amiga"', {
'variables': {
KEY_TRACKED: ['g'],
'read_only': False,
},
}],
['OS=="atari"', {
'variables': {
'command': ['echo', 'Hello World'],
KEY_TRACKED: ['c'],
KEY_UNTRACKED: ['d'],
KEY_TOUCHED: ['touched_a'],
'read_only': True,
},
}, {
'variables': {
'command': ['echo', 'You should get an Atari'],
KEY_TRACKED: ['e'],
KEY_UNTRACKED: ['f'],
KEY_TOUCHED: ['touched_e'],
},
}],
['OS=="coleco"', {
}, {
'variables': {
KEY_UNTRACKED: ['h'],
},
}],
],
}
self.assertEqual(
expected, isolate.convert_map_to_isolate_dict(values, oses))
def test_merge_two_empty(self):
# Flat stay flat. Pylint is confused about union() return type.
# pylint: disable=E1103
actual = isolate.union(
isolate.union(
isolate.Configs([], None),
isolate.load_isolate_as_config({}, None, [])),
isolate.load_isolate_as_config({}, None, [])).flatten()
self.assertEqual({}, actual)
def test_merge_empty(self):
actual = isolate.convert_map_to_isolate_dict(
*isolate.reduce_inputs(*isolate.invert_map({})))
self.assertEqual({}, actual)
def test_load_two_conditions(self):
linux = {
'conditions': [
['OS=="linux"', {
'variables': {
'isolate_dependency_tracked': [
'file_linux',
'file_common',
],
},
}],
],
}
mac = {
'conditions': [
['OS=="mac"', {
'variables': {
'isolate_dependency_tracked': [
'file_mac',
'file_common',
],
},
}],
],
}
expected = {
'linux': {
'isolate_dependency_tracked': ['file_common', 'file_linux'],
},
'mac': {
'isolate_dependency_tracked': ['file_common', 'file_mac'],
},
}
# Pylint is confused about union() return type.
# pylint: disable=E1103
configs = isolate.union(
isolate.union(
isolate.Configs([], None),
isolate.load_isolate_as_config(linux, None, [])),
isolate.load_isolate_as_config(mac, None, [])).flatten()
self.assertEqual(expected, configs)
def test_load_three_conditions(self):
linux = {
'conditions': [
['OS=="linux"', {
'variables': {
'isolate_dependency_tracked': [
'file_linux',
'file_common',
],
},
}],
],
}
mac = {
'conditions': [
['OS=="mac"', {
'variables': {
'isolate_dependency_tracked': [
'file_mac',
'file_common',
],
},
}],
],
}
win = {
'conditions': [
['OS=="win"', {
'variables': {
'isolate_dependency_tracked': [
'file_win',
'file_common',
],
},
}],
],
}
expected = {
'linux': {
'isolate_dependency_tracked': ['file_common', 'file_linux'],
},
'mac': {
'isolate_dependency_tracked': ['file_common', 'file_mac'],
},
'win': {
'isolate_dependency_tracked': ['file_common', 'file_win'],
},
}
# Pylint is confused about union() return type.
# pylint: disable=E1103
configs = isolate.union(
isolate.union(
isolate.union(
isolate.Configs([], None),
isolate.load_isolate_as_config(linux, None, [])),
isolate.load_isolate_as_config(mac, None, [])),
isolate.load_isolate_as_config(win, None, [])).flatten()
self.assertEqual(expected, configs)
def test_merge_three_conditions(self):
values = {
'linux': {
'isolate_dependency_tracked': ['file_common', 'file_linux'],
},
'mac': {
'isolate_dependency_tracked': ['file_common', 'file_mac'],
},
'win': {
'isolate_dependency_tracked': ['file_common', 'file_win'],
},
}
expected = {
'variables': {
'isolate_dependency_tracked': [
'file_common',
],
},
'conditions': [
['OS=="linux"', {
'variables': {
'isolate_dependency_tracked': [
'file_linux',
],
},
}],
['OS=="mac"', {
'variables': {
'isolate_dependency_tracked': [
'file_mac',
],
},
}],
['OS=="win"', {
'variables': {
'isolate_dependency_tracked': [
'file_win',
],
},
}],
],
}
actual = isolate.convert_map_to_isolate_dict(
*isolate.reduce_inputs(*isolate.invert_map(values)))
self.assertEqual(expected, actual)
def test_configs_comment(self):
# Pylint is confused with isolate.union() return type.
# pylint: disable=E1103
configs = isolate.union(
isolate.load_isolate_as_config({}, '# Yo dawg!\n# Chill out.\n', []),
isolate.load_isolate_as_config({}, None, []))
self.assertEqual('# Yo dawg!\n# Chill out.\n', configs.file_comment)
configs = isolate.union(
isolate.load_isolate_as_config({}, None, []),
isolate.load_isolate_as_config({}, '# Yo dawg!\n# Chill out.\n', []))
self.assertEqual('# Yo dawg!\n# Chill out.\n', configs.file_comment)
# Only keep the first one.
configs = isolate.union(
isolate.load_isolate_as_config({}, '# Yo dawg!\n', []),
isolate.load_isolate_as_config({}, '# Chill out.\n', []))
self.assertEqual('# Yo dawg!\n', configs.file_comment)
def test_extract_comment(self):
self.assertEqual(
'# Foo\n# Bar\n', isolate.extract_comment('# Foo\n# Bar\n{}'))
self.assertEqual('', isolate.extract_comment('{}'))
def _test_pretty_print_impl(self, value, expected):
actual = cStringIO.StringIO()
isolate.pretty_print(value, actual)
self.assertEqual(expected, actual.getvalue())
def test_pretty_print_empty(self):
self._test_pretty_print_impl({}, '{\n}\n')
def test_pretty_print_mid_size(self):
value = {
'variables': {
'bar': [
'file1',
'file2',
],
},
'conditions': [
['OS=\"foo\"', {
'variables': {
isolate.KEY_UNTRACKED: [
'dir1',
'dir2',
],
isolate.KEY_TRACKED: [
'file4',
'file3',
],
'command': ['python', '-c', 'print "H\\i\'"'],
'read_only': True,
'relative_cwd': 'isol\'at\\e',
},
}],
['OS=\"bar\"', {
'variables': {},
}, {
'variables': {},
}],
],
}
expected = (
"{\n"
" 'variables': {\n"
" 'bar': [\n"
" 'file1',\n"
" 'file2',\n"
" ],\n"
" },\n"
" 'conditions': [\n"
" ['OS=\"foo\"', {\n"
" 'variables': {\n"
" 'command': [\n"
" 'python',\n"
" '-c',\n"
" 'print \"H\\i\'\"',\n"
" ],\n"
" 'relative_cwd': 'isol\\'at\\\\e',\n"
" 'read_only': True\n"
" 'isolate_dependency_tracked': [\n"
" 'file4',\n"
" 'file3',\n"
" ],\n"
" 'isolate_dependency_untracked': [\n"
" 'dir1',\n"
" 'dir2',\n"
" ],\n"
" },\n"
" }],\n"
" ['OS=\"bar\"', {\n"
" 'variables': {\n"
" },\n"
" }, {\n"
" 'variables': {\n"
" },\n"
" }],\n"
" ],\n"
"}\n")
self._test_pretty_print_impl(value, expected)
class IsolateLoad(unittest.TestCase):
def setUp(self):
super(IsolateLoad, self).setUp()
self.directory = tempfile.mkdtemp(prefix='isolate_')
def tearDown(self):
isolate.run_isolated.rmtree(self.directory)
super(IsolateLoad, self).tearDown()
def _get_option(self, isolate_file):
class Options(object):
isolated = os.path.join(self.directory, 'isolated')
outdir = os.path.join(self.directory, 'outdir')
isolate = isolate_file
variables = {'foo': 'bar'}
ignore_broken_items = False
return Options()
def _cleanup_isolated(self, expected_isolated, actual_isolated):
"""Modifies isolated to remove the non-deterministic parts."""
if sys.platform == 'win32':
# 'm' are not saved in windows.
for values in expected_isolated['files'].itervalues():
del values['m']
for item in actual_isolated['files'].itervalues():
if 't' in item:
self.assertTrue(item.pop('t'))
def test_load_stale_isolated(self):
isolate_file = os.path.join(
ROOT_DIR, 'tests', 'isolate', 'touch_root.isolate')
# Data to be loaded in the .isolated file. Do not create a .state file.
input_data = {
'command': ['python'],
'files': {
'foo': {
"m": 416,
"h": "invalid",
"s": 538,
"t": 1335146921,
},
os.path.join('tests', 'isolate', 'touch_root.py'): {
"m": 488,
"h": "invalid",
"s": 538,
"t": 1335146921,
},
},
}
options = self._get_option(isolate_file)
isolate.trace_inputs.write_json(options.isolated, input_data, False)
# A CompleteState object contains two parts:
# - Result instance stored in complete_state.isolated, corresponding to the
# .isolated file, is what is read by run_test_from_archive.py.
# - SavedState instance stored in compelte_state.saved_state,
# corresponding to the .state file, which is simply to aid the developer
# when re-running the same command multiple times and contain
# discardable information.
complete_state = isolate.load_complete_state(options, None)
actual_isolated = complete_state.isolated.flatten()
actual_saved_state = complete_state.saved_state.flatten()
expected_isolated = {
'command': ['python', 'touch_root.py'],
'files': {
os.path.join(u'tests', 'isolate', 'touch_root.py'): {
'm': 488,
'h': _sha1('tests', 'isolate', 'touch_root.py'),
's': _size('tests', 'isolate', 'touch_root.py'),
},
'isolate.py': {
'm': 488,
'h': _sha1('isolate.py'),
's': _size('isolate.py'),
},
},
'os': isolate.get_flavor(),
'relative_cwd': os.path.join('tests', 'isolate'),
}
self._cleanup_isolated(expected_isolated, actual_isolated)
self.assertEqual(expected_isolated, actual_isolated)
expected_saved_state = {
'isolate_file': isolate_file,
'variables': {'foo': 'bar'},
}
self.assertEqual(expected_saved_state, actual_saved_state)
def test_subdir(self):
# The resulting .isolated file will be missing ../../isolate.py. It is
# because this file is outside the --subdir parameter.
isolate_file = os.path.join(
ROOT_DIR, 'tests', 'isolate', 'touch_root.isolate')
options = self._get_option(isolate_file)
complete_state = isolate.load_complete_state(
options, os.path.join('tests', 'isolate'))
actual_isolated = complete_state.isolated.flatten()
actual_saved_state = complete_state.saved_state.flatten()
expected_isolated = {
'command': ['python', 'touch_root.py'],
'files': {
os.path.join('tests', 'isolate', 'touch_root.py'): {
'm': 488,
'h': _sha1('tests', 'isolate', 'touch_root.py'),
's': _size('tests', 'isolate', 'touch_root.py'),
},
},
'os': isolate.get_flavor(),
'relative_cwd': os.path.join('tests', 'isolate'),
}
self._cleanup_isolated(expected_isolated, actual_isolated)
self.assertEqual(expected_isolated, actual_isolated)
expected_saved_state = {
'isolate_file': isolate_file,
'variables': {'foo': 'bar'},
}
self.assertEqual(expected_saved_state, actual_saved_state)
def test_subdir_variable(self):
# The resulting .isolated file will be missing ../../isolate.py. It is
# because this file is outside the --subdir parameter.
isolate_file = os.path.join(
ROOT_DIR, 'tests', 'isolate', 'touch_root.isolate')
options = self._get_option(isolate_file)
options.variables['BAZ'] = os.path.join('tests', 'isolate')
complete_state = isolate.load_complete_state(options, '<(BAZ)')
actual_isolated = complete_state.isolated.flatten()
actual_saved_state = complete_state.saved_state.flatten()
expected_isolated = {
'command': ['python', 'touch_root.py'],
'files': {
os.path.join('tests', 'isolate', 'touch_root.py'): {
'm': 488,
'h': _sha1('tests', 'isolate', 'touch_root.py'),
's': _size('tests', 'isolate', 'touch_root.py'),
},
},
'os': isolate.get_flavor(),
'relative_cwd': os.path.join('tests', 'isolate'),
}
self._cleanup_isolated(expected_isolated, actual_isolated)
self.assertEqual(expected_isolated, actual_isolated)
expected_saved_state = {
'isolate_file': isolate_file,
'variables': {
'foo': 'bar',
'BAZ': os.path.join('tests', 'isolate'),
},
}
self.assertEqual(expected_saved_state, actual_saved_state)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.ERROR,
format='%(levelname)5s %(filename)15s(%(lineno)3d): %(message)s')
unittest.main()
| bsd-3-clause | 2,551,652,073,914,647,600 | 28.380165 | 79 | 0.493847 | false |
JIC-CSB/jicbioimage | setup.py | 1 | 1144 | from setuptools import setup
readme = open('README.rst').read()
version = "0.15.0"
setup(name='jicbioimage',
packages=['jicbioimage', ],
version=version,
description='Python package designed to make it easy to work with bio images.',
long_description=readme,
author='Tjelvar Olsson',
author_email = '[email protected]',
url = 'https://github.com/JIC-CSB/jicbioimage',
download_url = 'https://github.com/JIC-CSB/jicbioimage/tarball/{}'.format(version),
license='MIT',
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Scientific/Engineering :: Image Recognition",
],
keywords = ['microscopy', 'image analysis'],
)
| mit | -7,883,911,221,225,092,000 | 35.903226 | 89 | 0.620629 | false |
KWierso/treeherder | tests/push_health/test_compare.py | 1 | 2050 | import datetime
import responses
from treeherder.model.models import Push
from treeherder.push_health.compare import (get_parent,
get_response_object)
def test_get_response_object(test_push, test_repository):
resp = get_response_object('1234', test_push, test_repository)
assert resp['parentSha'] == '1234'
assert resp['id'] == 1
assert resp['exactMatch'] is False
assert resp['revision'] == '4c45a777949168d16c03a4cba167678b7ab65f76'
@responses.activate
def test_get_parent(test_push, test_repository):
test_revision = '4c45a777949168d16c03a4cba167678b7ab65f76'
parent_revision = 'abcdef77949168d16c03a4cba167678b7ab65f76'
Push.objects.create(
revision=parent_revision,
repository=test_repository,
author='[email protected]',
time=datetime.datetime.now()
)
commits_url = '{}/json-pushes?version=2&full=1&changeset={}'.format(
test_repository.url, test_revision)
commits = {'pushes': {1: {'changesets': [{'parents': [parent_revision]}]}}}
responses.add(responses.GET, commits_url, json=commits, content_type='application/json', status=200)
parent = get_parent(test_repository, test_revision, test_push)
assert parent['parentSha'] == parent_revision
assert parent['revision'] == parent_revision
@responses.activate
def test_get_parent_not_found(test_push, test_repository):
test_revision = '4c45a777949168d16c03a4cba167678b7ab65f76'
# Does not exist as a Push in the DB.
parent_revision = 'abcdef77949168d16c03a4cba167678b7ab65f76'
commits_url = '{}/json-pushes?version=2&full=1&changeset={}'.format(
test_repository.url, test_revision)
commits = {'pushes': {1: {'changesets': [{'parents': [parent_revision]}]}}}
responses.add(responses.GET, commits_url, json=commits, content_type='application/json', status=200)
parent = get_parent(test_repository, test_revision, test_push)
assert parent['parentSha'] == parent_revision
assert parent['revision'] is None
| mpl-2.0 | -8,634,045,018,885,420,000 | 38.423077 | 104 | 0.697561 | false |
globocom/tapioca | tests/acceptance/test_validation.py | 1 | 2829 | from json import loads
import tornado.web
from schema import Use
from tornado.testing import AsyncHTTPTestCase
from tests.support import AsyncHTTPClientMixin, assert_response_code
from tapioca import TornadoRESTful, ResourceHandler, validate, optional
class ProjectsResource(ResourceHandler):
@validate(querystring={
'ck': unicode,
optional('name'): (unicode, 'The name of the project that do you want to search for'),
optional('size'): (Use(int), 'The maximum number of projects you want')
})
def get_collection(self, callback):
callback([self.values['querystring']])
class UseOfValidationTestCase(AsyncHTTPTestCase, AsyncHTTPClientMixin):
def get_app(self):
api = TornadoRESTful()
api.add_resource('projects', ProjectsResource)
application = tornado.web.Application(api.get_url_mapping())
return application
def test_should_return_when_called_with_the_correct_values(self):
response = self.get('/projects.json?ck=1&name=test&size=10')
assert_response_code(response, 200)
def test_should_return_ok_if_more_params_than_expected(self):
response = self.get('/projects.json?ck=1&name=test&foo=bar&size=234')
assert_response_code(response, 200)
def test_should_return_an_descriptive_error(self):
response = self.get('/projects.json?ck=1&name=test&size=foo')
assert_response_code(response, 400)
assert loads(response.body)['error'] == \
'The "size" parameter value is not valid.'
def test_should_be_able_to_call_without_size(self):
response = self.get('/projects.json?ck=1&name=foobar')
assert_response_code(response, 200)
def test_should_throw_error_when_required_param_is_not_passed(self):
response = self.get('/projects.json')
assert_response_code(response, 400)
assert loads(response.body)['error'] == \
'The "ck" parameter is required.'
class NoAccessValuesInQuerystring(ResourceHandler):
@validate(querystring={
'it_is_int_value': (Use(int), 'The maximum number of projects you want')
})
def get_collection(self, callback):
callback(['its always valid'])
class NoAccessDefinedValuesTestCase(AsyncHTTPTestCase, AsyncHTTPClientMixin):
def get_app(self):
api = TornadoRESTful()
api.add_resource('always_valid', NoAccessValuesInQuerystring)
application = tornado.web.Application(api.get_url_mapping())
return application
def test_should_be_a_valid_using_int(self):
response = self.get('/always_valid.json?size=1')
assert_response_code(response, 200)
def test_should_be_a_valid_request_anyway(self):
response = self.get('/always_valid.json?size=abc')
assert_response_code(response, 200)
| mit | 8,893,176,927,390,336,000 | 34.810127 | 94 | 0.684341 | false |
berkmancenter/mediacloud | apps/webapp-api/src/python/webapp/auth/password.py | 1 | 5455 | import base64
import hashlib
import os
from mediawords.db import DatabaseHandler
from mediawords.util.log import create_logger
from mediawords.util.perl import decode_object_from_bytes_if_needed
__HASH_SALT_PREFIX = "{SSHA256}"
__HASH_LENGTH = 64 # SHA-256 hash length
__SALT_LENGTH = 64
__MIN_PASSWORD_LENGTH = 8
__MAX_PASSWORD_LENGTH = 120
log = create_logger(__name__)
class McAuthPasswordException(Exception):
"""Password-related exceptions."""
pass
def password_hash_is_valid(password_hash: str, password: str) -> bool:
"""Validate a password / password token.
Ported from Crypt::SaltedHash: https://metacpan.org/pod/Crypt::SaltedHash
"""
password_hash = decode_object_from_bytes_if_needed(password_hash)
password = decode_object_from_bytes_if_needed(password)
if not password_hash:
raise McAuthPasswordException("Password hash is None or empty.")
if password is None:
raise McAuthPasswordException("Password is None.")
# Password can be an empty string but that would be weird so we only spit out a warning
if not password:
log.warning("Password is empty.")
if not password_hash.startswith(__HASH_SALT_PREFIX):
raise McAuthPasswordException("Password hash does not start with an expected prefix.")
if len(password_hash) != len(__HASH_SALT_PREFIX) + __HASH_LENGTH + __SALT_LENGTH:
raise McAuthPasswordException("Password hash is of the incorrect length.")
try:
password = password.encode('utf-8', errors='replace') # to concatenate with 'bytes' salt later
password_hash = password_hash[len(__HASH_SALT_PREFIX):]
salted_hash_salt = base64.b64decode(password_hash)
salt = salted_hash_salt[-1 * __SALT_LENGTH:]
expected_salted_hash = salted_hash_salt[:len(salted_hash_salt) - __SALT_LENGTH]
actual_password_salt = password + salt
sha256 = hashlib.sha256()
sha256.update(actual_password_salt)
actual_salted_hash = sha256.digest()
if expected_salted_hash == actual_salted_hash:
return True
else:
return False
except Exception as ex:
log.warning("Failed to validate hash: %s" % str(ex))
return False
def generate_secure_hash(password: str) -> str:
"""Hash a secure hash (password / password reset token) with a random salt.
Ported from Crypt::SaltedHash: https://metacpan.org/pod/Crypt::SaltedHash
"""
password = decode_object_from_bytes_if_needed(password)
if password is None:
raise McAuthPasswordException("Password is None.")
# Password can be an empty string but that would be weird so we only spit out a warning
if not password:
log.warning("Password is empty.")
password = password.encode('utf-8', errors='replace') # to concatenate with 'bytes' salt later
# os.urandom() is supposed to be crypto-secure
salt = os.urandom(__SALT_LENGTH)
password_salt = password + salt
sha256 = hashlib.sha256()
sha256.update(password_salt)
salted_hash = sha256.digest()
salted_hash_salt = salted_hash + salt
base64_salted_hash = base64.b64encode(salted_hash_salt).decode('ascii')
return __HASH_SALT_PREFIX + base64_salted_hash
def password_reset_token_is_valid(db: DatabaseHandler, email: str, password_reset_token: str) -> bool:
"""Validate password reset token (used for both user activation and password reset)."""
email = decode_object_from_bytes_if_needed(email)
password_reset_token = decode_object_from_bytes_if_needed(password_reset_token)
if not (email and password_reset_token):
log.error("Email and / or password reset token is empty.")
return False
# Fetch readonly information about the user
password_reset_token_hash = db.query("""
SELECT auth_users_id,
email,
password_reset_token_hash
FROM auth_users
WHERE email = %(email)s
LIMIT 1
""", {'email': email}).hash()
if password_reset_token_hash is None or 'auth_users_id' not in password_reset_token_hash:
log.error("Unable to find user %s in the database." % email)
return False
password_reset_token_hash = password_reset_token_hash['password_reset_token_hash']
if password_hash_is_valid(password_hash=password_reset_token_hash, password=password_reset_token):
return True
else:
return False
def validate_new_password(email: str, password: str, password_repeat: str) -> str:
"""Check if password complies with strength the requirements.
Returns empty string on valid password, error message on invalid password."""
email = decode_object_from_bytes_if_needed(email)
password = decode_object_from_bytes_if_needed(password)
password_repeat = decode_object_from_bytes_if_needed(password_repeat)
if not email:
return 'Email address is empty.'
if not (password and password_repeat):
return 'To set the password, please repeat the new password twice.'
if password != password_repeat:
return 'Passwords do not match.'
if len(password) < __MIN_PASSWORD_LENGTH or len(password) > __MAX_PASSWORD_LENGTH:
return 'Password must be between %d and %d characters length.' % (__MIN_PASSWORD_LENGTH, __MAX_PASSWORD_LENGTH,)
if password == email:
return "New password is your email address; don't cheat!"
return ''
| agpl-3.0 | 8,678,888,388,578,591,000 | 32.67284 | 120 | 0.67846 | false |
jakubtyniecki/pact | sort/hybrid.py | 1 | 2447 | """ hybrid sort module """
from sort.framework import validate
THRESHOLD = 10 # threshold when to fallback to insert sort
@validate
def sort(arr):
""" hybrid sort """
hybridsort(arr, 0, len(arr) - 1)
return arr
def hybridsort(arr, first, last):
""" hybrid sort """
stack = []
stack.append((first, last))
while stack:
pos = stack.pop()
left, right = pos[0], pos[1]
if right - left < THRESHOLD:
""" if array is smaller then given threshold
use insert sort as it'll be more efficient """
insertsort(arr, left, right)
else:
piv = partition(arr, left, right)
if piv - 1 > left:
stack.append((left, piv - 1))
if piv + 1 < right:
stack.append((piv + 1, right))
def insertsort(arr, first, last):
""" insert sort """
assert first <= len(arr) and last < len(arr), \
"first: {}, last: {}".format(first, last)
for i in range(first, last + 1):
position, currentvalue = i, arr[i]
while position > 0 and arr[position - 1] > currentvalue:
arr[position] = arr[position - 1]
position -= 1
arr[position] = currentvalue
def partition(arr, first, last):
""" partition """
assert first < len(arr) and last < len(arr) and first < last, \
"first: {}, last: {}".format(first, last)
pivotindex = pivotpoint(arr, first, last)
if pivotindex > first:
arr[first], arr[pivotindex] = arr[pivotindex], arr[first]
pivotvalue = arr[first]
left, right = first + 1, last
while right >= left:
while left <= right and arr[left] <= pivotvalue:
left += 1
while arr[right] >= pivotvalue and right >= left:
right -= 1
assert right >= 0 and left <= len(arr)
if right > left:
arr[left], arr[right] = arr[right], arr[left]
if right > first:
arr[first], arr[right] = arr[right], arr[first]
return right
def pivotpoint(arr, first, last):
""" pivot point strategy
using median of first, mid and last elements
to prevent worst case scenario """
mid = first + (last - first) >> 1
if (arr[first] - arr[mid]) * (arr[last] - arr[first]) >= 0:
return first
elif (arr[mid] - arr[first]) * (arr[last] - arr[mid]) >= 0:
return mid
else:
return last
| mit | 7,002,549,922,402,661,000 | 23.969388 | 67 | 0.548018 | false |
xuludev/CVLH_tutorial | netease_spider.py | 1 | 3334 | import json
import time
import os
import re
import requests
from bs4 import BeautifulSoup
import chardet
"""
url_list = [
'http://tech.163.com/special/00097UHL/tech_datalist_02.js?callback=data_callback',
'http://ent.163.com/special/000380VU/newsdata_index_02.js?callback=data_callback',
'http://sports.163.com/special/000587PR/newsdata_n_index_02.js?callback=data_callback',
'http://money.163.com/special/002557S5/newsdata_idx_index.js?callback=data_callback',
'http://edu.163.com/special/002987KB/newsdata_edu_hot_02.js?callback=data_callback'
]
"""
def crawl(pn):
headers = {
'Accept': '*/*',
'Connection': 'keep-alive',
'Host': 'ent.163.com',
'Referer': 'http://ent.163.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
request_url = 'http://ent.163.com/special/000380VU/newsdata_index_0' + str(pn) + '.js?callback=data_callback'
# print('is crawling page ' + request_url + '......')
response = requests.get(request_url, headers=headers)
if response.status_code == 200:
page_encoding = chardet.detect(response.content)['encoding']
temp_str = response.text.replace('data_callback(', '')
# temp_str = response.content.decode(page_encoding).replace('data_callback(', '')
temp_str = temp_str.replace(temp_str[-1], '')
for each_news in json.loads(temp_str):
print(each_news['docurl'])
download_news_content(each_news['title'], each_news['docurl'])
elif response.status_code == 404:
raise Exception('No Page Found! ' + request_url)
else:
print('ERROR! ' + str(response.status_code))
def download_news_content(title, news_url):
if news_url.startswith('http://v'):
print('This page contains video ...')
else:
# r_content = re.compile('<img \w')
r_title = re.compile('[\?\"\?\:\s\/\·]')
file_dir = 'd:/网易新闻/娱乐'
if not os.path.exists(file_dir) or not os.path.isdir(file_dir):
os.makedirs(file_dir)
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
response = requests.get(news_url, headers=headers)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html5lib')
if response.url.startswith('http://digi.163.com') or response.url.startswith('http://tech.163.com') or response.url.startswith('http://ent.163.com'):
text_soup = soup.select('#endText')[0]
content_text = text_soup.get_text()
elif response.url.startswith('http://dy.163.com'):
text_soup = soup.select('#content')[0]
content_text = text_soup.get_text()
elif response.url.startswith('http://mobile.163.com'):
text_soup = soup.select('#epContentLeft')[0]
content_text = text_soup.get_text()
with open(file_dir + os.path.sep + re.sub(r_title, '', title, count=0) + '.txt', mode='wt', encoding='utf-8') as f:
f.write(content_text)
f.flush()
f.close()
print(title + '.txt has been written done!')
if __name__ == '__main__':
for i in range(2, 10, 1):
crawl(i)
time.sleep(5) | apache-2.0 | -1,926,016,843,630,203,600 | 34.855556 | 152 | 0.653092 | false |
mdavidsaver/spicetools | spicetools/view/mainwin_ui.py | 1 | 8609 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'spicetools/view/mainwin.ui'
#
# Created: Sun Apr 27 13:13:01 2014
# by: PyQt4 UI code generator 4.9.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(746, 516)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icon.svg")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label_3 = QtGui.QLabel(self.centralwidget)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.verticalLayout.addWidget(self.label_3)
self.sets = QtGui.QComboBox(self.centralwidget)
self.sets.setObjectName(_fromUtf8("sets"))
self.verticalLayout.addWidget(self.sets)
self.label_2 = QtGui.QLabel(self.centralwidget)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.verticalLayout.addWidget(self.label_2)
self.xaxis = QtGui.QComboBox(self.centralwidget)
self.xaxis.setObjectName(_fromUtf8("xaxis"))
self.verticalLayout.addWidget(self.xaxis)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label_4 = QtGui.QLabel(self.centralwidget)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.horizontalLayout.addWidget(self.label_4)
self.ops = QtGui.QComboBox(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ops.sizePolicy().hasHeightForWidth())
self.ops.setSizePolicy(sizePolicy)
self.ops.setObjectName(_fromUtf8("ops"))
self.horizontalLayout.addWidget(self.ops)
self.verticalLayout.addLayout(self.horizontalLayout)
self.label = QtGui.QLabel(self.centralwidget)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.signals = QtGui.QListWidget(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.signals.sizePolicy().hasHeightForWidth())
self.signals.setSizePolicy(sizePolicy)
self.signals.setSelectionMode(QtGui.QAbstractItemView.MultiSelection)
self.signals.setObjectName(_fromUtf8("signals"))
self.verticalLayout.addWidget(self.signals)
self.horizontalLayout_3.addLayout(self.verticalLayout)
self.canvas = PlotArea(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.canvas.sizePolicy().hasHeightForWidth())
self.canvas.setSizePolicy(sizePolicy)
self.canvas.setObjectName(_fromUtf8("canvas"))
self.horizontalLayout_3.addWidget(self.canvas)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 746, 20))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menu_File = QtGui.QMenu(self.menubar)
self.menu_File.setObjectName(_fromUtf8("menu_File"))
self.menuAbout = QtGui.QMenu(self.menubar)
self.menuAbout.setObjectName(_fromUtf8("menuAbout"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionOpen = QtGui.QAction(MainWindow)
self.actionOpen.setObjectName(_fromUtf8("actionOpen"))
self.actionE_xit = QtGui.QAction(MainWindow)
self.actionE_xit.setMenuRole(QtGui.QAction.QuitRole)
self.actionE_xit.setObjectName(_fromUtf8("actionE_xit"))
self.actionAbout = QtGui.QAction(MainWindow)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.actionAboutQt = QtGui.QAction(MainWindow)
self.actionAboutQt.setObjectName(_fromUtf8("actionAboutQt"))
self.actionClose = QtGui.QAction(MainWindow)
self.actionClose.setObjectName(_fromUtf8("actionClose"))
self.actionReload = QtGui.QAction(MainWindow)
self.actionReload.setObjectName(_fromUtf8("actionReload"))
self.actionCloneWindow = QtGui.QAction(MainWindow)
self.actionCloneWindow.setObjectName(_fromUtf8("actionCloneWindow"))
self.menu_File.addAction(self.actionCloneWindow)
self.menu_File.addAction(self.actionOpen)
self.menu_File.addAction(self.actionReload)
self.menu_File.addAction(self.actionClose)
self.menu_File.addSeparator()
self.menu_File.addAction(self.actionE_xit)
self.menuAbout.addAction(self.actionAbout)
self.menuAbout.addAction(self.actionAboutQt)
self.menubar.addAction(self.menu_File.menuAction())
self.menubar.addAction(self.menuAbout.menuAction())
self.retranslateUi(MainWindow)
QtCore.QObject.connect(self.actionE_xit, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.close)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "SpiceView", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MainWindow", "Vector Set", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MainWindow", "X Vector", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("MainWindow", "Op:", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MainWindow", "Y Vector(s)", None, QtGui.QApplication.UnicodeUTF8))
self.signals.setSortingEnabled(True)
self.menu_File.setTitle(QtGui.QApplication.translate("MainWindow", "&File", None, QtGui.QApplication.UnicodeUTF8))
self.menuAbout.setTitle(QtGui.QApplication.translate("MainWindow", "&Help", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpen.setText(QtGui.QApplication.translate("MainWindow", "&Open", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpen.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+O", None, QtGui.QApplication.UnicodeUTF8))
self.actionE_xit.setText(QtGui.QApplication.translate("MainWindow", "E&xit", None, QtGui.QApplication.UnicodeUTF8))
self.actionE_xit.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+Q", None, QtGui.QApplication.UnicodeUTF8))
self.actionAbout.setText(QtGui.QApplication.translate("MainWindow", "About", None, QtGui.QApplication.UnicodeUTF8))
self.actionAboutQt.setText(QtGui.QApplication.translate("MainWindow", "About Qt", None, QtGui.QApplication.UnicodeUTF8))
self.actionClose.setText(QtGui.QApplication.translate("MainWindow", "&Close", None, QtGui.QApplication.UnicodeUTF8))
self.actionReload.setText(QtGui.QApplication.translate("MainWindow", "&Reload", None, QtGui.QApplication.UnicodeUTF8))
self.actionReload.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+R", None, QtGui.QApplication.UnicodeUTF8))
self.actionCloneWindow.setText(QtGui.QApplication.translate("MainWindow", "Clone Wi&ndow", None, QtGui.QApplication.UnicodeUTF8))
from .plotarea import PlotArea
from . import viewer_rc
| gpl-3.0 | 8,151,757,473,375,844,000 | 58.784722 | 137 | 0.724707 | false |
Jumpscale/jumpscale_portal8 | apps/portalbase/macros/page/action/3_action.py | 1 | 1083 | def main(j, args, params, tags, tasklet):
page = args.page
data = {'action': args.getTag('id'),
'class': args.getTag('class') or '',
'deleterow': args.getTag('deleterow') or 'false',
'label': args.getTag('label') or '',
}
extradata = {}
tags = j.data.tags.getObject(args.cmdstr, None)
for tagname, tagvalue in tags.getDict().items():
if tagname.startswith('data-'):
extradata[tagname[5:]] = tagvalue
data['data'] = j.data.serializer.json.dumps(extradata)
if data['class']:
data['label'] = "<span class='%(class)s'></span> %(label)s" % data
element = "<a class='js_action'" \
" data-action='%(action)s'" \
" data-extradata='%(data)s'" \
" data-deleterow='%(deleterow)s'" \
"href='javascript:void(0);'>%(label)s</a>" % data
page.addMessage(element)
page.addJS('/system/.files/js/action.js', header=False)
params.result = page
return params
def match(j, args, params, tags, tasklet):
return True
| apache-2.0 | 6,079,685,514,749,661,000 | 32.84375 | 74 | 0.554017 | false |
volnrok/sortable-challenge | main.py | 1 | 3683 | import json
import random
import re
from check import check_match
from encoder import Encoder
from listing import Listing
from product import Product
# We'll sort products by manufacturer first
man_lookup = {}
# List of common manufacturer aliases
aliases = {
'agfaphoto': 'agfa',
'fuji': 'fujifilm',
'hewlett': 'hp',
'konica': 'konica minolta',
'sigmatek': 'sigma'
}
with open('products.txt', encoding='utf-8') as file:
for j in file:
product = Product(j)
man = product.manufacturer.lower()
# New manufacturer
if man not in man_lookup:
man_lookup[man] = []
# Enter product into data store
man_lookup[man].append(product)
with open('listings.txt', encoding='utf-8') as file:
mcount = 0
lcount = 0
man_cutoff = 3 # Only check the first few words for manufacturer matches
word_pattern = re.compile('\w+')
for j in file:
listing = Listing(j)
man = listing.manufacturer.lower()
if man not in man_lookup:
if man in aliases:
# First look for manufacturer aliases match
man = aliases[man]
else:
i = 0
# Try to find a manufacturer match, look for words in the listing title
for match in word_pattern.finditer(listing.title):
match_str = match.group(0).lower()
if match_str in aliases:
man = aliases[match_str]
break
if match_str in man_lookup:
man = match_str
break
i += 1
# Actual product matches (vs accessories) will have a manufacturer match in the first few words
if i >= man_cutoff:
break
if man in man_lookup:
model_matches = []
family_matches = []
for product in man_lookup[man]:
match = check_match(product, listing)
# Don't count model matches with single-character models
if match['m_match'] and len(product.model) > 1:
model_matches.append((product, match['m_index']))
if match['f_match'] >= 2:
family_matches.append((product, match['m_index']))
matched = False
if len(model_matches) == 1:
matched = model_matches[0]
elif len(family_matches) == 1:
matched = family_matches[0]
if matched:
# If the manufacturer is present in the title multiple times, check that the product model happens before the second
i = 0
second_index = 0
for man_match in re.finditer(man, listing.title, re.IGNORECASE):
i += 1
if i >= 2:
second_index = man_match.start(0)
break
if i >= 2 and second_index < matched[1]:
pass
else:
mcount += 1
matched[0].matches.append(listing)
lcount += 1
if lcount % 1000 == 0:
print('.', end='')
print()
print(lcount, 'listings read,', mcount, 'matches found')
with open('matches.txt', mode='w', encoding='utf-8') as file:
for man in man_lookup:
for product in man_lookup[man]:
if len(product.matches):
file.write(json.dumps(product, cls=Encoder, ensure_ascii=False))
file.write('\n')
print('Results saved to matches.txt')
| mit | -3,413,363,037,030,869,000 | 31.307018 | 132 | 0.519685 | false |
ExCiteS/geokey-wegovnow | geokey_wegovnow/tests/test_commands.py | 1 | 7233 | """Test all commands."""
import sys
from StringIO import StringIO
from django.conf import settings
from django.test import TestCase
from django.core import management
from django.contrib.sites.shortcuts import get_current_site
from allauth.socialaccount import providers
from allauth.socialaccount.models import SocialApp, SocialAccount
from geokey.users.models import User
from geokey.users.tests.model_factories import UserFactory
class AddUWUMAppCommandTest(TestCase):
"""Tests for command `add_uwum_app`."""
def setUp(self):
"""Set up tests."""
self.out = StringIO()
self.err = StringIO()
sys.stout = self.out
sys.sterr = self.err
def test_when_client_id_is_not_provided(self):
"""Test command when client ID is not provided."""
options = {}
management.call_command(
'add_uwum_app',
stdout=self.out,
stderr=self.err,
**options)
self.assertEquals(self.out.getvalue(), '')
self.assertTrue('Client ID not provided.' in self.err.getvalue())
def test_when_social_app_was_not_added_yet(self):
"""Test command when social app was not added yet."""
options = {'id': 'test-uwum-app'}
management.call_command(
'add_uwum_app',
stdout=self.out,
stderr=self.err,
**options)
self.assertTrue(
'UWUM app `test-uwum-app` was added.' in self.out.getvalue())
self.assertTrue(
'Please configure UWUM in the settings.' in self.out.getvalue())
self.assertEquals(self.err.getvalue(), '')
self.assertEquals(SocialApp.objects.count(), 1)
social_app = SocialApp.objects.latest('pk')
self.assertEquals(social_app.provider, 'uwum')
self.assertEquals(social_app.name, 'UWUM')
self.assertEquals(social_app.client_id, 'test-uwum-app')
self.assertEquals(social_app.secret, '')
self.assertEquals(social_app.key, '')
def test_when_social_app_was_already_added(self):
"""Test command when social app was already added."""
provider = providers.registry.by_id('uwum')
social_app = SocialApp.objects.create(
provider=provider.id,
name=provider.name,
client_id='uwum-app-1',
secret='',
key='')
social_app.sites.add(get_current_site(settings.SITE_ID))
options = {'id': 'uwum-app-2'}
management.call_command(
'add_uwum_app',
stdout=self.out,
stderr=self.err,
**options)
self.assertTrue(
'UWUM app was updated to `uwum-app-2`.' in self.out.getvalue())
self.assertEquals(self.err.getvalue(), '')
self.assertEquals(SocialApp.objects.count(), 1)
social_app = SocialApp.objects.get(pk=social_app.id)
self.assertEquals(social_app.provider, 'uwum')
self.assertEquals(social_app.name, 'UWUM')
self.assertEquals(social_app.client_id, 'uwum-app-2')
self.assertEquals(social_app.secret, '')
self.assertEquals(social_app.key, '')
class SetSuperuserCommandTest(TestCase):
"""Tests for command `set_superuser`."""
def setUp(self):
"""Set up tests."""
self.out = StringIO()
self.err = StringIO()
sys.stout = self.out
sys.sterr = self.err
self.user_1 = UserFactory.create(
display_name='Test User 1',
email='[email protected]',
is_superuser=False)
self.user_2 = UserFactory.create(
display_name='Test User 2',
email='[email protected]',
is_superuser=False)
self.socialaccount_1 = SocialAccount.objects.create(
user=self.user_1,
provider='twitter',
uid='5478',
extra_data={})
self.socialaccount_2 = SocialAccount.objects.create(
user=self.user_2,
provider='uwum',
uid='437',
extra_data={
'member': {'name': 'Another User', 'email': '[email protected]'}
})
self.socialaccount_3 = SocialAccount.objects.create(
user=self.user_1,
provider='uwum',
uid='1547',
extra_data={
'member': {'name': 'Test User', 'email': '[email protected]'}
})
def test_when_username_is_not_provided(self):
"""Test command when username is not provided."""
options = {'email': '[email protected]'}
management.call_command(
'set_superuser',
stdout=self.out,
stderr=self.err,
**options)
self.assertEquals(self.out.getvalue(), '')
self.assertTrue('Username not provided.' in self.err.getvalue())
def test_when_email_is_not_provided(self):
"""Test command when email is not provided."""
options = {'username': 'Test User'}
management.call_command(
'set_superuser',
stdout=self.out,
stderr=self.err,
**options)
self.assertEquals(self.out.getvalue(), '')
self.assertTrue('Email address not provided.' in self.err.getvalue())
def test_when_user_is_not_found(self):
"""Test command when user is not found."""
options = {
'username': 'Test User',
'email': '[email protected]'}
management.call_command(
'set_superuser',
stdout=self.out,
stderr=self.err,
**options)
self.assertEquals(self.out.getvalue(), '')
self.assertTrue('User was not found.' in self.err.getvalue())
options = {
'username': 'Non-existing User',
'email': '[email protected]'}
management.call_command(
'set_superuser',
stdout=self.out,
stderr=self.err,
**options)
self.assertEquals(self.out.getvalue(), '')
self.assertTrue('User was not found.' in self.err.getvalue())
def test_when_user_is_already_superuser(self):
"""Test command when user is already a superuser."""
self.user_1.is_superuser = True
self.user_1.save()
options = {
'username': 'Test User',
'email': '[email protected]'}
management.call_command(
'set_superuser',
stdout=self.out,
stderr=self.err,
**options)
self.assertEquals(self.out.getvalue(), '')
self.assertTrue('User is already a superuser.' in self.err.getvalue())
def test_when_user_is_set_as_the_superuser(self):
"""Test command when user is set as a superuser."""
options = {
'username': 'Test User',
'email': '[email protected]'}
management.call_command(
'set_superuser',
stdout=self.out,
stderr=self.err,
**options)
self.assertTrue('User was set as a superuser.' in self.out.getvalue())
self.assertEquals(self.err.getvalue(), '')
user = User.objects.get(pk=self.user_1.id)
self.assertEquals(user.is_superuser, True)
| mit | 8,339,166,114,679,516,000 | 34.985075 | 78 | 0.574036 | false |
Jorgesolis1989/SIVORE | corporaciones/views.py | 1 | 11935 | from django.shortcuts import render_to_response
from django.shortcuts import render ,redirect
from django.template.context import RequestContext
from corporaciones.models import Corporacion , Sede
from django.contrib.auth.decorators import permission_required
from corporaciones.forms import FormularioRegistroCorporacion, FormularioEditarCorporacion , FormularioCargar
from votantes.models import Votante
from planchas.models import Plancha
from candidatos.models import Candidato
from usuarios.models import Usuario
from django.db.models import Q
import csv
# -*- coding: utf-8 -*-
from io import StringIO
@permission_required("usuarios.Administrador" , login_url="/")
def registro_corporacion(request):
mensaje = ""
if request.method == 'POST' and "btncreate" in request.POST:
form = FormularioRegistroCorporacion(request.POST)
form2 = FormularioCargar(request.POST , request.FILES)
#Si el formulario es valido y tiene datos
if form.is_valid():
#Capture el id de corporacion
id_corporation = form.cleaned_data["id_corporation"]
sede = form.cleaned_data["sede"]
#Consultando la corporacion en la base de datos.
try:
corporacion = Corporacion.objects.get(id_corporation=id_corporation , sede=sede)
except Corporacion.DoesNotExist:
corporacion = Corporacion()
corporacion_create(corporacion, form)
llamarMensaje = "exito_corporacion"
if form.cleaned_data["facultad"] is None or form.cleaned_data["sede"] is None:
mensaje = "La corporacion "+ str(id_corporation) +" se guardo correctamente"
else:
mensaje = "La corporacion "+ str(id_corporation) +" sede "+str(sede.nombre_sede)+" se guardo correctamente"
else:
if not corporacion.is_active:
corporacion_create(corporacion, form)
llamarMensaje = "exito_corporacion"
mensaje = "La corporación "+ str(id_corporation) +" se guardo correctamente"
else:
llamarMensaje = "fracaso_corporacion"
mensaje = "La corporacion "+ str(corporacion) + " ya esta registrada"
request.session['llamarMensaje'] = llamarMensaje
request.session['mensaje'] = mensaje
return redirect("listar_corporacion")
else:
form = FormularioRegistroCorporacion()
data = {
'form': form,
}
return render_to_response('registro_corporacion.html', data, context_instance=RequestContext(request))
elif request.method == 'POST' and "btnload" in request.POST:
form = FormularioRegistroCorporacion()
form2 = FormularioCargar(request.POST , request.FILES)
#Si el formulario es valido y tiene datos
if form2.is_valid():
try:
csvf = StringIO(request.FILES['file'].read().decode('ISO-8859-3'))
except Exception as e:
llamarMensaje = "fracaso_corporacion"
mensaje = "Error en el formato del archivo de entrada"
request.session['llamarMensaje'] = llamarMensaje
request.session['mensaje'] = mensaje
return redirect("listar_corporacion")
reader = csv.reader(csvf, delimiter=';')
line=0
counter= 0
# Verificar y crear si estan creadas las sedes.
diccionarioSedes= {'CALI':'0', 'BUGA':'1', 'CAICEDONIA':'2', 'CARTAGO':'3', 'PACIFICO':'4',
'PALMIRA':'5','TULUA':'6', 'ZARZAL':'7', 'YUMBO':'14',
'SANTANDER DE QUILICHAO':'21'}
#COD. PROGRAMA;PROGRAMA ACADÉMICO;JORNADA;FACULTAD;SEDE
for nombre,codigo in diccionarioSedes.items():
try:
Sede.objects.get(codigo=codigo, is_active=True)
except Sede.DoesNotExist:
sede = Sede()
sede.codigo = codigo
sede.nombre_sede = nombre
try:
sede.save()
except Exception as e:
print("No se pudo guardar la sede" + sede.nombre_sede)
# Verificar y crear si estan creadas las facultades
diccionarioFacultad= { 'CONSEJO SUPERIOR':'1', 'CONSEJO ACADÉMICO':'2',
'INGENIERÍA':'3', 'CIENCIAS DE LA ADMINISTRACIÓN':'4',
'CIENCIAS NATURALES Y EXACTAS':'5', 'HUMANIDADES':'6',
'CIENCIAS SOCIALES Y ECONÓMICAS':'7', 'ARTES INTEGRADAS':'8',
'SALUD':'9', 'INSTITUTO DE EDUCACIÓN Y PEDAGOGÍA':'10' ,
'INSTITUTO DE PSICOLOGÍA': '11'}
# Creando las facultades
for nombre,codigo in diccionarioFacultad.items():
try:
Corporacion.objects.get(id_corporation=codigo, is_active=True)
except Corporacion.DoesNotExist:
corporacion = Corporacion()
corporacion.id_corporation = codigo
# Codigo de las corproaciones que no llevan Facultad
if codigo not in {'1' , '2' , '10', '11'}:
corporacion.name_corporation = "FACULTAD DE " + nombre
else:
corporacion.name_corporation = nombre
try:
corporacion.save()
except Exception as e:
print("No se pudo guardar la corporacion" + corporacion.name_corporation)
#COD. PROGRAMA;PROGRAMA ACADÉMICO;JORNADA;FACULTAD;SEDE
# Ahora crear las corporaciones
for row in reader:
if line > 0:
try:
Corporacion.objects.get(id_corporation=row[0] , sede__codigo=diccionarioSedes.get(row[4]) , is_active=True)
except Corporacion.DoesNotExist:
corporacion = Corporacion()
try:
corporacion.id_corporation = row[0]
corporacion.name_corporation = row[1]
corporacion.facultad = Corporacion.objects.get(id_corporation=diccionarioFacultad.get(row[3]))
sede = diccionarioSedes.get(row[4])
corporacion.sede = Sede.objects.get(codigo=sede)
corporacion.save()
counter+= 1
except Exception as e:
print(e)
except Exception as e:
llamarMensaje = "fracaso_corporacion"
mensaje = "Hubo un problema con el archivo de entrada, no coinciden los datos de entrada con la especificaciones dadaas"
request.session['llamarMensaje'] = llamarMensaje
request.session['mensaje'] = mensaje
return redirect("listar_corporacion")
else:
line =+ 1
#Consultando la corporacion en la base de datos.
llamarMensaje = "exito_corporacion"
mensaje = "Se crearon " + str(counter)+" corporacion(es) sactisfactoriamente"
request.session['llamarMensaje'] = llamarMensaje
request.session['mensaje'] = mensaje
return redirect("listar_corporacion")
else:
form = FormularioRegistroCorporacion()
form2 = FormularioCargar()
return render(request, 'registro_corporacion.html',{'mensaje': mensaje, 'form': form , 'form2': form2})
# Vista para listar corporaciones
@permission_required("usuarios.Administrador", login_url="/")
def listar_corporacion(request):
llamarMensaje = request.session.pop('llamarMensaje', None)
mensaje = request.session.pop('mensaje', None)
corporaciones = Corporacion.objects.filter(is_active=True)
return render(request, 'listar_corporaciones.html', {'corporaciones': corporaciones , 'llamarMensaje': llamarMensaje,'mensaje':mensaje })
#Edicion usuarios
@permission_required("usuarios.Administrador" , login_url="/")
def editar_corporacion(request, id_corporation=None):
corporacion = Corporacion.objects.get(id=id_corporation)
if request.method == 'POST':
form = FormularioEditarCorporacion(request.POST)
#Si el formulario es valido y tiene datos
if form.is_valid():
#Capture el id de corporacion
corporacion.name_corporation = form.cleaned_data["name_corporation"]
corporacion.facultad = form.cleaned_data["facultad"]
corporacion.sede = form.cleaned_data["sede"]
#Actualiza la corporacion en la BD si hay excepcion
try:
corporacion.save()
except Exception as e:
print(e)
#Consultando la corporacion en la base de datos.
llamarMensaje = "edito_corporacion"
mensaje = "Se editó la corporacion " +str(corporacion) +" sactisfactoriamente"
request.session['llamarMensaje'] = llamarMensaje
request.session['mensaje'] = mensaje
return redirect("listar_corporacion")
else:
print("por aca")
else:
if id_corporation is None:
return render(request, 'administrador.html')
else:
form = FormularioEditarCorporacion()
form.initial = {'id_corporation': corporacion.id_corporation, 'name_corporation': corporacion.name_corporation, 'facultad': corporacion.facultad,
'sede': corporacion.sede}
if corporacion.facultad is not None:
form.fields["facultad"].empty_label = None
form.fields["sede"].empty_label = None
else:
form.fields['facultad'].widget.attrs['disabled'] = True
form.fields['sede'].widget.attrs['disabled'] = True
return render(request, 'editar_corporacion.html', {'form': form})
# Este metodo no elimina en la base de datos, sino que desactiva la corporacion
@permission_required("usuarios.Administrador", login_url="/")
def eliminar_corporacion(request, id_corporation=None):
if request.method == 'POST':
corporacion=Corporacion.objects.get(id=id_corporation)
# sacando los votantes de la corporacion
votantes_corporacion = Votante.objects.filter((Q(plan__facultad__id=corporacion.id) | Q(plan__id=corporacion.id)) & Q(is_active=True))
# Si la corporacion tiene votantes
if votantes_corporacion:
llamarMensaje = "fracaso_usuario"
mensaje = "No se eliminó la corporacion " + str(id_corporation) +" porque tiene votantes asociados"
else:
corporacion.is_active = False
llamarMensaje = "exito_usuario"
mensaje = "Se eliminó la corporacion " + str(id_corporation) +" sactisfactoriamente"
try:
corporacion.save()
except Exception as e:
print(e)
request.session['llamarMensaje'] = llamarMensaje
request.session['mensaje'] = mensaje
return redirect("listar_corporacion")
def corporacion_create(corporacion, form):
corporacion.id_corporation= form.cleaned_data["id_corporation"]
corporacion.name_corporation= form.cleaned_data["name_corporation"]
corporacion.facultad= form.cleaned_data["facultad"]
corporacion.sede= form.cleaned_data["sede"]
corporacion.is_active = True
try:
corporacion.save()
except Exception as e:
print(e)
| apache-2.0 | 9,110,471,766,066,271,000 | 43.155556 | 157 | 0.585472 | false |
teamCarel/EyeTracker | src/shared_modules/calibration_routines/finish_calibration.py | 1 | 17482 | '''
(*)~---------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2017 Pupil Labs
Distributed under the terms of the GNU
Lesser General Public License (LGPL v3.0).
See COPYING and COPYING.LESSER for license details.
---------------------------------------------------------------------------~(*)
'''
import os
import numpy as np
from . import calibrate
from math_helper import *
from file_methods import load_object,save_object
from . camera_intrinsics_estimation import load_camera_calibration
from . optimization_calibration import bundle_adjust_calibration
from . calibrate import find_rigid_transform
#logging
import logging
logger = logging.getLogger(__name__)
from . gaze_mappers import *
not_enough_data_error_msg = 'Did not collect enough data during calibration.'
solver_failed_to_converge_error_msg = 'Paramters could not be estimated from data.'
def finish_calibration(g_pool,pupil_list,ref_list):
if pupil_list and ref_list:
pass
else:
logger.error(not_enough_data_error_msg)
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
camera_intrinsics = load_camera_calibration(g_pool)
# match eye data and check if biocular and or monocular
pupil0 = [p for p in pupil_list if p['id']==0]
pupil1 = [p for p in pupil_list if p['id']==1]
#TODO unify this and don't do both
matched_binocular_data = calibrate.closest_matches_binocular(ref_list,pupil_list)
matched_pupil0_data = calibrate.closest_matches_monocular(ref_list,pupil0)
matched_pupil1_data = calibrate.closest_matches_monocular(ref_list,pupil1)
if len(matched_pupil0_data)>len(matched_pupil1_data):
matched_monocular_data = matched_pupil0_data
else:
matched_monocular_data = matched_pupil1_data
logger.info('Collected {} monocular calibration data.'.format(len(matched_monocular_data)))
logger.info('Collected {} binocular calibration data.'.format(len(matched_binocular_data)))
mode = g_pool.detection_mapping_mode
if mode == '3d' and not camera_intrinsics:
mode = '2d'
logger.warning("Please calibrate your world camera using 'camera intrinsics estimation' for 3d gaze mapping.")
if mode == '3d':
hardcoded_translation0 = np.array([20,15,-20])
hardcoded_translation1 = np.array([-40,15,-20])
if matched_binocular_data:
method = 'binocular 3d model'
#TODO model the world as cv2 pinhole camera with distorion and focal in ceres.
# right now we solve using a few permutations of K
smallest_residual = 1000
scales = list(np.linspace(0.7,1.4,20))
K = camera_intrinsics["camera_matrix"]
for s in scales:
scale = np.ones(K.shape)
scale[0,0] *= s
scale[1,1] *= s
camera_intrinsics["camera_matrix"] = K*scale
ref_dir, gaze0_dir, gaze1_dir = calibrate.preprocess_3d_data(matched_binocular_data,
camera_intrinsics = camera_intrinsics )
if len(ref_dir) < 1 or len(gaze0_dir) < 1 or len(gaze1_dir) < 1:
logger.error(not_enough_data_error_msg)
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
sphere_pos0 = pupil0[-1]['sphere']['center']
sphere_pos1 = pupil1[-1]['sphere']['center']
initial_R0,initial_t0 = find_rigid_transform(np.array(gaze0_dir)*500,np.array(ref_dir)*500)
initial_rotation0 = math_helper.quaternion_from_rotation_matrix(initial_R0)
initial_translation0 = np.array(initial_t0).reshape(3)
initial_R1,initial_t1 = find_rigid_transform(np.array(gaze1_dir)*500,np.array(ref_dir)*500)
initial_rotation1 = math_helper.quaternion_from_rotation_matrix(initial_R1)
initial_translation1 = np.array(initial_t1).reshape(3)
eye0 = { "observations" : gaze0_dir , "translation" : hardcoded_translation0 , "rotation" : initial_rotation0,'fix':['translation'] }
eye1 = { "observations" : gaze1_dir , "translation" : hardcoded_translation1 , "rotation" : initial_rotation1,'fix':['translation'] }
world = { "observations" : ref_dir , "translation" : (0,0,0) , "rotation" : (1,0,0,0),'fix':['translation','rotation'],'fix':['translation','rotation'] }
initial_observers = [eye0,eye1,world]
initial_points = np.array(ref_dir)*500
success,residual, observers, points = bundle_adjust_calibration(initial_observers , initial_points, fix_points=False )
if residual <= smallest_residual:
smallest_residual = residual
scales[-1] = s
if not success:
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
logger.error("Calibration solver faild to converge.")
return
eye0,eye1,world = observers
t_world0 = np.array(eye0['translation'])
R_world0 = math_helper.quaternion_rotation_matrix(np.array(eye0['rotation']))
t_world1 = np.array(eye1['translation'])
R_world1 = math_helper.quaternion_rotation_matrix(np.array(eye1['rotation']))
def toWorld0(p):
return np.dot(R_world0, p)+t_world0
def toWorld1(p):
return np.dot(R_world1, p)+t_world1
points_a = [] #world coords
points_b = [] #eye0 coords
points_c = [] #eye1 coords
for a,b,c,point in zip(world['observations'] , eye0['observations'],eye1['observations'],points):
line_a = np.array([0,0,0]) , np.array(a) #observation as line
line_b = toWorld0(np.array([0,0,0])) , toWorld0(b) #eye0 observation line in world coords
line_c = toWorld1(np.array([0,0,0])) , toWorld1(c) #eye1 observation line in world coords
close_point_a,_ = math_helper.nearest_linepoint_to_point( point , line_a )
close_point_b,_ = math_helper.nearest_linepoint_to_point( point , line_b )
close_point_c,_ = math_helper.nearest_linepoint_to_point( point , line_c )
points_a.append(close_point_a)
points_b.append(close_point_b)
points_c.append(close_point_c)
# we need to take the sphere position into account
# orientation and translation are referring to the sphere center.
# but we want to have it referring to the camera center
# since the actual translation is in world coordinates, the sphere translation needs to be calculated in world coordinates
sphere_translation = np.array( sphere_pos0 )
sphere_translation_world = np.dot( R_world0 , sphere_translation)
camera_translation = t_world0 - sphere_translation_world
eye_camera_to_world_matrix0 = np.eye(4)
eye_camera_to_world_matrix0[:3,:3] = R_world0
eye_camera_to_world_matrix0[:3,3:4] = np.reshape(camera_translation, (3,1) )
sphere_translation = np.array( sphere_pos1 )
sphere_translation_world = np.dot( R_world1 , sphere_translation)
camera_translation = t_world1 - sphere_translation_world
eye_camera_to_world_matrix1 = np.eye(4)
eye_camera_to_world_matrix1[:3,:3] = R_world1
eye_camera_to_world_matrix1[:3,3:4] = np.reshape(camera_translation, (3,1) )
g_pool.plugins.add(Binocular_Vector_Gaze_Mapper,args={
'eye_camera_to_world_matrix0':eye_camera_to_world_matrix0,
'eye_camera_to_world_matrix1':eye_camera_to_world_matrix1 ,
'camera_intrinsics': camera_intrinsics ,
'cal_points_3d': points,
'cal_ref_points_3d': points_a,
'cal_gaze_points0_3d': points_b,
'cal_gaze_points1_3d': points_c})
elif matched_monocular_data:
method = 'monocular 3d model'
#TODO model the world as cv2 pinhole camera with distorion and focal in ceres.
# right now we solve using a few permutations of K
smallest_residual = 1000
scales = list(np.linspace(0.7,1.4,20))
K = camera_intrinsics["camera_matrix"]
for s in scales:
scale = np.ones(K.shape)
scale[0,0] *= s
scale[1,1] *= s
camera_intrinsics["camera_matrix"] = K*scale
ref_dir , gaze_dir, _ = calibrate.preprocess_3d_data(matched_monocular_data,
camera_intrinsics = camera_intrinsics )
# save_object((ref_dir,gaze_dir),os.path.join(g_pool.user_dir, "testdata"))
if len(ref_dir) < 1 or len(gaze_dir) < 1:
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
logger.error(not_enough_data_error_msg + " Using:" + method)
return
### monocular calibration strategy: mimize the reprojection error by moving the world camera.
# we fix the eye points and work in the eye coord system.
initial_R,initial_t = find_rigid_transform(np.array(ref_dir)*500,np.array(gaze_dir)*500)
initial_rotation = math_helper.quaternion_from_rotation_matrix(initial_R)
initial_translation = np.array(initial_t).reshape(3)
# this problem is scale invariant so we scale to some sensical value.
if matched_monocular_data[0]['pupil']['id'] == 0:
hardcoded_translation = hardcoded_translation0
else:
hardcoded_translation = hardcoded_translation1
eye = { "observations" : gaze_dir , "translation" : (0,0,0) , "rotation" : (1,0,0,0),'fix':['translation','rotation'] }
world = { "observations" : ref_dir , "translation" : np.dot(initial_R,-hardcoded_translation) , "rotation" : initial_rotation,'fix':['translation'] }
initial_observers = [eye,world]
initial_points = np.array(gaze_dir)*500
success,residual, observers, points_in_eye = bundle_adjust_calibration(initial_observers , initial_points, fix_points=True )
if residual <= smallest_residual:
smallest_residual = residual
scales[-1] = s
eye, world = observers
if not success:
logger.error("Calibration solver faild to converge.")
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
#pose of the world in eye coords.
rotation = np.array(world['rotation'])
t_world = np.array(world['translation'])
R_world = math_helper.quaternion_rotation_matrix(rotation)
# inverse is pose of eye in world coords
R_eye = R_world.T
t_eye = np.dot(R_eye,-t_world)
def toWorld(p):
return np.dot(R_eye, p)+np.array(t_eye)
points_in_world = [toWorld(p) for p in points_in_eye]
points_a = [] #world coords
points_b = [] #cam2 coords
for a,b,point in zip(world['observations'] , eye['observations'],points_in_world):
line_a = np.array([0,0,0]) , np.array(a) #observation as line
line_b = toWorld(np.array([0,0,0])) , toWorld(b) #cam2 observation line in cam1 coords
close_point_a,_ = math_helper.nearest_linepoint_to_point( point , line_a )
close_point_b,_ = math_helper.nearest_linepoint_to_point( point , line_b )
# print np.linalg.norm(point-close_point_a),np.linalg.norm(point-close_point_b)
points_a.append(close_point_a)
points_b.append(close_point_b)
# we need to take the sphere position into account
# orientation and translation are referring to the sphere center.
# but we want to have it referring to the camera center
# since the actual translation is in world coordinates, the sphere translation needs to be calculated in world coordinates
sphere_translation = np.array( matched_monocular_data[-1]['pupil']['sphere']['center'] )
sphere_translation_world = np.dot( R_eye , sphere_translation)
camera_translation = t_eye - sphere_translation_world
eye_camera_to_world_matrix = np.eye(4)
eye_camera_to_world_matrix[:3,:3] = R_eye
eye_camera_to_world_matrix[:3,3:4] = np.reshape(camera_translation, (3,1) )
g_pool.plugins.add(Vector_Gaze_Mapper,args=
{'eye_camera_to_world_matrix':eye_camera_to_world_matrix ,
'camera_intrinsics': camera_intrinsics ,
'cal_points_3d': points_in_world,
'cal_ref_points_3d': points_a,
'cal_gaze_points_3d': points_b,
'gaze_distance':500})
else:
logger.error(not_enough_data_error_msg)
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
elif mode == '2d':
if matched_binocular_data:
method = 'binocular polynomial regression'
cal_pt_cloud_binocular = calibrate.preprocess_2d_data_binocular(matched_binocular_data)
cal_pt_cloud0 = calibrate.preprocess_2d_data_monocular(matched_pupil0_data)
cal_pt_cloud1 = calibrate.preprocess_2d_data_monocular(matched_pupil1_data)
map_fn,inliers,params = calibrate.calibrate_2d_polynomial(cal_pt_cloud_binocular,g_pool.capture.frame_size,binocular=True)
if not inliers.any():
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
map_fn,inliers,params_eye0 = calibrate.calibrate_2d_polynomial(cal_pt_cloud0,g_pool.capture.frame_size,binocular=False)
if not inliers.any():
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
map_fn,inliers,params_eye1 = calibrate.calibrate_2d_polynomial(cal_pt_cloud1,g_pool.capture.frame_size,binocular=False)
if not inliers.any():
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
g_pool.plugins.add(Binocular_Gaze_Mapper,args={'params':params, 'params_eye0':params_eye0, 'params_eye1':params_eye1})
elif matched_monocular_data:
method = 'monocular polynomial regression'
cal_pt_cloud = calibrate.preprocess_2d_data_monocular(matched_monocular_data)
map_fn,inliers,params = calibrate.calibrate_2d_polynomial(cal_pt_cloud,g_pool.capture.frame_size,binocular=False)
if not inliers.any():
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
g_pool.plugins.add(Monocular_Gaze_Mapper,args={'params':params})
else:
logger.error(not_enough_data_error_msg)
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
ts = g_pool.get_timestamp()
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.successful','method':method,'timestamp': ts, 'record':True})
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.calibration_data','timestamp': ts, 'pupil_list':pupil_list,'ref_list':ref_list,'calibration_method':method,'record':True})
#this is only used by show calibration. TODO: rewrite show calibraiton.
user_calibration_data = {'timestamp': ts,'pupil_list':pupil_list,'ref_list':ref_list,'calibration_method':method}
save_object(user_calibration_data,os.path.join(g_pool.user_dir, "user_calibration_data"))
| lgpl-3.0 | 1,249,891,660,788,555,000 | 50.875371 | 194 | 0.605709 | false |
brachyprint/brachyprint | src/mesh/__init__.py | 1 | 1496 | # Brachyprint -- 3D printing brachytherapy moulds
# Copyright (C) 2013-14 James Cranch, Martin Green and Oliver Madge
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Mesh
====
Provides
1. A 3D Vector<> class
2. A mesh data structure for efficient mesh operations
3. Functions to create 3D primitives (e.g. cylinders, spheres, cubes)
4. Mesh manipulations, e.g. clipping, inside/outside, intersection
Available subpackages
---------------------
primitives
primitive generators
manipulate
mesh manipulations
Utilities
---------
tests
Run mesh unittests
"""
from core import Mesh, Vector, nullVector, Vertex, Face, Edge
from core import Polygon, Vector2d, nullVector2d, Vertex2d, Line
from plane import *
# subpackages
import primitives
import manipulate
import fileio
| gpl-2.0 | -4,344,528,957,305,115,600 | 28.92 | 76 | 0.72861 | false |
nfqsolutions/pylm | tests/test_services/test_subscribed_client.py | 1 | 2736 | import concurrent.futures
import time
from concurrent.futures import ThreadPoolExecutor
import zmq
from pylm.clients import Client
from pylm.parts.core import zmq_context
from pylm.parts.messages_pb2 import PalmMessage
def fake_server(messages=1):
db_socket = zmq_context.socket(zmq.REP)
db_socket.bind('inproc://db')
pull_socket = zmq_context.socket(zmq.PULL)
pull_socket.bind('inproc://pull')
pub_socket = zmq_context.socket(zmq.PUB)
pub_socket.bind('inproc://pub')
# PUB-SUB takes a while
time.sleep(1.0)
for i in range(messages):
message_data = pull_socket.recv()
print(i)
message = PalmMessage()
message.ParseFromString(message_data)
topic = message.client
pub_socket.send_multipart([topic.encode('utf-8'), message_data])
def test_subscribed_client_single():
got = []
client = Client(
server_name='someserver',
db_address='inproc://db',
push_address='inproc://pull',
sub_address='inproc://pub',
this_config=True)
with ThreadPoolExecutor(max_workers=2) as executor:
results = [
executor.submit(fake_server, messages=2),
executor.submit(client.job, 'f', [b'1', b'2'], messages=2)
]
for future in concurrent.futures.as_completed(results):
try:
result = future.result()
if result:
for r in result:
got.append(r)
except Exception as exc:
print(exc)
assert len(got) == 2
def test_subscribed_client_multiple():
got = []
client = Client(
server_name='someserver',
db_address='inproc://db',
sub_address='inproc://pub',
push_address='inproc://pull',
this_config=True)
client1 = Client(
server_name='someserver',
db_address='inproc://db',
sub_address='inproc://pub',
push_address='inproc://pull',
this_config=True)
with ThreadPoolExecutor(max_workers=2) as executor:
results = [
executor.submit(fake_server, messages=4),
executor.submit(client.job, 'f', [b'1', b'2'], messages=2),
executor.submit(client1.job, 'f', [b'a', b'b'], messages=2)
]
for future in concurrent.futures.as_completed(results):
try:
result = future.result()
if result:
for r in result:
got.append(r)
except Exception as exc:
print(exc)
assert len(got) == 4
if __name__ == '__main__':
test_subscribed_client_single()
test_subscribed_client_multiple()
| agpl-3.0 | -7,718,448,006,989,908,000 | 25.823529 | 72 | 0.56981 | false |
pwnbus/scoring_engine | scoring_engine/config_loader.py | 1 | 2625 | import configparser
import os
class ConfigLoader(object):
def __init__(self, location="../engine.conf"):
config_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), location)
self.parser = configparser.ConfigParser()
self.parser.read(config_location)
self.debug = self.parse_sources(
'debug',
self.parser['OPTIONS']['debug'].lower() == 'true',
'bool'
)
self.checks_location = self.parse_sources(
'checks_location',
self.parser['OPTIONS']['checks_location'],
)
self.round_time_sleep = self.parse_sources(
'round_time_sleep',
int(self.parser['OPTIONS']['round_time_sleep']),
'int'
)
self.worker_refresh_time = self.parse_sources(
'worker_refresh_time',
int(self.parser['OPTIONS']['worker_refresh_time']),
'int'
)
self.worker_num_concurrent_tasks = self.parse_sources(
'worker_num_concurrent_tasks',
int(self.parser['OPTIONS']['worker_num_concurrent_tasks']),
'int'
)
self.worker_queue = self.parse_sources(
'worker_queue',
self.parser['OPTIONS']['worker_queue'],
)
self.timezone = self.parse_sources(
'timezone',
self.parser['OPTIONS']['timezone']
)
self.db_uri = self.parse_sources(
'db_uri',
self.parser['OPTIONS']['db_uri']
)
self.cache_type = self.parse_sources(
'cache_type',
self.parser['OPTIONS']['cache_type']
)
self.redis_host = self.parse_sources(
'redis_host',
self.parser['OPTIONS']['redis_host']
)
self.redis_port = self.parse_sources(
'redis_port',
int(self.parser['OPTIONS']['redis_port']),
'int'
)
self.redis_password = self.parse_sources(
'redis_password',
self.parser['OPTIONS']['redis_password']
)
def parse_sources(self, key_name, default_value, obj_type='str'):
environment_key = "SCORINGENGINE_{}".format(key_name.upper())
if environment_key in os.environ:
if obj_type.lower() == 'int':
return int(os.environ[environment_key])
elif obj_type.lower() == 'bool':
return os.environ[environment_key].lower() == 'true'
else:
return os.environ[environment_key]
else:
return default_value
| mit | 7,857,065,895,646,105,000 | 28.829545 | 92 | 0.525333 | false |
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/magic/tools/sesame.py | 1 | 3792 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.tools.sesame Contains the Sesame class.
# -----------------------------------------------------------------
"""
Created on Mar 13, 2011
Sesame class to access Sesame name resolver service
Based on 2005-06-11 by Shui Hung Kwok
See http://cdsweb.u-strasbg.fr/doc/sesame.htx for description of Sesame
@author: shkwok
"""
from urllib2 import urlopen
#from xparser.XParser import XParser
#from .. import XParser
# -----------------------------------------------------------------
class Sesame (object):
"""
This class ...
"""
CatalogOpt = "SNV" # S simbad, N ned, V vizier, A All
OutputOpt = "oxp" # xp for xml as text/plain rather then text/xml (-ox)
SesameURL = "http://cdsweb.u-strasbg.fr/cgi-bin/nph-sesame"
def __init__(self, urn=SesameURL, opt=CatalogOpt, opt1=OutputOpt):
"""
Initializes Sesame URL and options
Default options are SNV for CatalogOpt
and -oxp for OutputOpt.
SNV = Simbad + NED + Vizier and A for All
The order indicates the order to search.
"All" means search all services, otherwise stops when
first entry found.
Output options start with -o
followed by
x : xml output
p : plain text
I : include all identifiers
"""
self.catOpt = opt
self.outOpt = opt1
self.urn = urn
# Sesame
def getCoord(self, node):
"""
Helper method to extract ra and dec from node
"""
res = node.getResource("/Sesame/Target");
resolvers = res.getChildren ("Resolver")
for r in resolvers:
try:
ra = float (r.getResourceContent("/Resolver/jradeg").strip())
dec = float (r.getResourceContent("/Resolver/jdedeg").strip())
return ra, dec
except Exception:
raise Exception, "invalid coordinates"
else:
raise Exception, "no ra/dec values found"
# getCoord
def getAliases(self):
"""
Extracts aliases for the given target.
Returns a list of names.
"""
res = []
for resolver in self.xml.root.Sesame.Resolver:
try:
for a in resolver.alias:
res.append (a.content)
except:
pass
return res
def buildQuery(self, name, all=True):
"""
Builds query URL for use with HTTP GET
If all is true, then all known identifiers shall be returned.
"""
opt = self.catOpt
opt1 = '-' + self.outOpt
if all:
opt += 'A'
opt1 += 'I' # all identifiers
queryURL = "%s/%s/%s?%s" % (self.urn, opt1, opt, name)
return queryURL
def resolveRaw(self, name, all=True):
"""
Performs a raw query.
Returns what the server returns.
"""
query = self.buildQuery (name, all)
print "query=", query
hcon = urlopen (query)
res = hcon.read ()
hcon.close ()
return res
def resolve(self, name, all=True):
"""
Performs a query.
Returns ra and dec
"""
query = self.buildQuery(name, all)
xp = XParser()
xn = xp.parseFromFile(query)
return self.getCoord(xn) | mit | -434,090,002,494,834,500 | 27.727273 | 78 | 0.498813 | false |
containers-ftw/cftw | cftw/utils.py | 1 | 8502 | '''
utils.py: part of cftw package
Copyright (c) 2017 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import fnmatch
import os
import json
import re
import shutil
import simplejson
from cftw.logger import bot
import sys
import subprocess
import tempfile
import tarfile
import zipfile
######################################################################################
# Local commands and requests
######################################################################################
def get_installdir():
'''get_installdir returns the installation directory of the application
'''
return os.path.abspath(os.path.dirname(__file__))
def run_command(cmd,error_message=None,sudopw=None,suppress=False):
'''run_command uses subprocess to send a command to the terminal.
:param cmd: the command to send, should be a list for subprocess
:param error_message: the error message to give to user if fails,
if none specified, will alert that command failed.
:param execute: if True, will add `` around command (default is False)
:param sudopw: if specified (not None) command will be run asking for sudo
'''
if sudopw == None:
sudopw = os.environ.get('pancakes',None)
if sudopw != None:
cmd = ' '.join(["echo", sudopw,"|","sudo","-S"] + cmd)
if suppress == False:
output = os.popen(cmd).read().strip('\n')
else:
output = cmd
os.system(cmd)
else:
try:
process = subprocess.Popen(cmd,stdout=subprocess.PIPE)
output, err = process.communicate()
except OSError as error:
bot.error(err)
return None
return output
############################################################################
## FILE OPERATIONS #########################################################
############################################################################
def write_file(filename,content,mode="w"):
'''write_file will open a file, "filename" and write content, "content"
and properly close the file
'''
with open(filename,mode) as filey:
filey.writelines(content)
return filename
def write_json(json_obj,filename,mode="w",print_pretty=True):
'''write_json will (optionally,pretty print) a json object to file
:param json_obj: the dict to print to json
:param filename: the output file to write to
:param pretty_print: if True, will use nicer formatting
'''
with open(filename,mode) as filey:
if print_pretty == True:
filey.writelines(simplejson.dumps(json_obj, indent=4, separators=(',', ': ')))
else:
filey.writelines(simplejson.dumps(json_obj))
return filename
def read_file(filename,mode="r"):
'''write_file will open a file, "filename" and write content, "content"
and properly close the file
'''
with open(filename,mode) as filey:
content = filey.readlines()
return content
def read_json(filename,mode="r"):
'''read_json will open a file, "filename" and read the string as json
'''
with open(filename,mode) as filey:
content = json.loads(filey.read())
return content
def recursive_find(base,pattern=None):
'''recursive find dicoms will search for dicom files in all directory levels
below a base. It uses get_dcm_files to find the files in the bases.
'''
if pattern is None:
pattern = "*"
files = []
for root, dirnames, filenames in os.walk(base):
for filename in fnmatch.filter(filenames, pattern):
files.append(os.path.join(root, filename))
return files
############################################################################
## FOLDER OPERATIONS #########################################################
############################################################################
def mkdir_p(path):
'''mkdir_p attempts to get the same functionality as mkdir -p
:param path: the path to create.
'''
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
bot.error("Error creating path %s, exiting." %path)
sys.exit(1)
def tree(base):
'''print a simple directory tree, primarily for showing
content of templates'''
for root, dirs, files in os.walk(base):
level = root.replace(base, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
############################################################################
## COMPRESSED FILES ########################################################
############################################################################
def detect_compressed(folder,compressed_types=None):
'''detect compressed will return a list of files in
some folder that are compressed, by default this means
.zip or .tar.gz, but the called can specify a custom list
:param folder: the folder base to use.
:param compressed_types: a list of types to include, should
be extensions in format like *.tar.gz, *.zip, etc.
'''
compressed = []
if compressed_types == None:
compressed_types = ["*.tar.gz",'*zip']
bot.debug("Searching for %s" %", ".join(compressed_types))
for filey in os.listdir(folder):
for compressed_type in compressed_types:
if fnmatch.fnmatch(filey, compressed_type):
compressed.append("%s/%s" %(folder,filey))
bot.debug("Found %s compressed files in %s" %len(compressed),folder)
return compressed
def unzip_dir(zip_file,dest_dir=None):
'''unzip_dir will extract a zipfile to a directory. If
an extraction destination is not defined, a temporary
directory will be created and used.
:param zip_file: the .zip file to unzip
:param dest_dir: the destination directory
'''
if dest_dir == None:
dest_dir = tempfile.mkdtemp()
with zipfile.ZipFile(zip_file,"r") as zf:
zf.extractall(dest_dir)
return dest_dir
def zip_dir(zip_dir, zip_name, output_folder=None):
'''zip_dir will zip up and entire zip directory
:param folder_path: the folder to zip up
:param zip_name: the name of the zip to return
:output_folder:
'''
tmpdir = tempfile.mkdtemp()
output_zip = "%s/%s" %(tmpdir,zip_name)
zf = zipfile.ZipFile(output_zip, "w", zipfile.ZIP_DEFLATED, allowZip64=True)
for root, dirs, files in os.walk(zip_dir):
for file in files:
zf.write(os.path.join(root, file))
zf.close()
if output_folder != None:
shutil.copyfile(output_zip,"%s/%s"%(output_folder,zip_name))
shutil.rmtree(tmpdir)
output_zip = "%s/%s"%(output_folder,zip_name)
return output_zip
def untar_dir(tar_file,dest_dir=None):
'''untar_dir will extract a tarfile to a directory. If
an extraction destination is not defined, a temporary
directory will be created and used.
:param tar_file: the .tar.gz file to untar/decompress
:param dest_dir: the destination directory
'''
if dest_dir == None:
dest_dir = tempfile.mkdtemp()
contents = []
if tarfile.is_tarfile(tar_file):
with tarfile.open(tar_file) as tf:
tf.extractall(dest_dir)
return dest_dir
| mit | 8,000,063,582,081,847,000 | 32.472441 | 90 | 0.597389 | false |
pepincho/playground | python/Learn-Python-The-Hard-Way/exercises11to20.py | 1 | 2190 | # exercise 11
name = input("What's your name? ") # take the name from the keyboard
print ("Your name is {}".format(name))
# exercise 15
file_again = input("Type the filename again: > ") # read the file's name from the keyboard
txt_again = open(file_again) # open the file
print (txt_again.read()) # print the file's content
# exercise 16
print ("Opening the file...")
target_name = input("Type the filename: > ")
target_txt = open(target_name, "r+")
print ("Now I'm going to ask you for three lines.")
line1 = input("line 1: ")
line2 = input("line 2: ")
line3 = input("line 3: ")
print ("I'm going to write these to the file.")
target_txt.write(line1)
target_txt.write("\n")
target_txt.write(line2)
target_txt.write("\n")
target_txt.write(line3)
target_txt.write("\n")
print ("And finally, we close it.")
target_txt.close()
# exerckse 17, read from a file and write in another file
from os.path import exists
from_file = input("From file: > ")
to_file = input("To file: > ")
in_file = open(from_file)
indata = in_file.read()
print ("The input file is {} bytes long.".format(len(indata)))
print ("Does the output file exist? {}".format(exists(to_file)))
out_file = open(to_file, 'w')
out_file.write(indata)
print ("Alright, all done.")
out_file.close()
in_file.close()
# exercise 18
def print_two(*args):
arg1, arg2 = args
print ("arg1: %r, arg2: %r" % (arg1, arg2))
def print_two_again(arg1, arg2):
print ("arg1: %r, arg2: %r" % (arg1, arg2))
def print_none():
print ("I got nothin'.")
print_two("Zed","Shaw")
print_two_again("Zed","Shaw")
print_none()
# exercise 20
def print_all(f):
print (f.read())
def rewind(f):
f.seek(0)
def print_a_line(line_count, f):
print (line_count, f.readline())
file_name = input("File name: > ")
current_file = open(file_name)
print ("First let's print the whole file:\n")
print_all(current_file)
print ("Now let's rewind, kind of like a tape.")
print (rewind(current_file))
print ("Let's print three lines:")
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
| mit | -1,002,428,991,431,571,700 | 21.121212 | 90 | 0.67032 | false |
bfontaine/Teebr | teebr/features.py | 1 | 6724 | # -*- coding: UTF-8 -*-
from __future__ import absolute_import, unicode_literals
import re
from json import dumps
from collections import defaultdict
from .log import mkLogger
from .text.utils import contains_emoji, extract_named_entities
from .text.utils import most_common_words
from .text.spam import is_spam
logger = mkLogger("features")
LANGUAGES = ('en',) # 'fr')
SOURCE_TYPES = {
"source_mobile": [
"Echofon",
"Mobile Web (M2)",
"Mobile Web (M5)",
"Mobile Web",
"Samsung Mobile",
"Twitter for Android",
"Twitter for BlackBerry®",
"Twitter for Windows Phone",
"Twitter for iPhone",
"Twitterrific",
"iOS",
"uberSocial for Android",
],
"source_tablet": [
"Twitter for Android Tablets",
"Twitter for iPad",
],
"source_desktop": [
"TweetDeck",
"Twitter Web Client",
"Twitter for Mac",
"OS X",
],
# automated publication tools + bot-like tweets
"source_autopub": [
"Buffer",
"Hootsuite",
"IFTTT",
"JustUnfollow",
"RoundTeam",
"TweetAdder v4",
"fllwrs",
"twittbot.net",
],
"source_social": [
"Ask.fm",
"Facebook",
"Foursquare",
"Instagram",
"LinkedIn",
"Path",
"Pinterest",
"Reddit RSS",
"Vine - Make a Scene",
"Vine for Android",
],
"source_news": [
"Nachrichten News",
],
"source_other": [],
}
URL_TYPES = {
"url_social": [
"fb.me",
"path.com",
],
"url_social_media": [
"vine.co",
"instagram.com",
],
"url_product": [
"amzn.to",
],
"url_video": [
"youtu.be",
],
}
# TODO we might be able to remove this now that we have a spam filter
APPS_BLACKLIST = set([
# followers spam
u"Unfollowers",
u"JustUnfollow",
u"fllwrs",
u"..ignite.v.1.",
u"Adi Sumardiyasa",
u"Who Unfollowed Me",
# tweets ranking
u"001FM Top40 Tweets",
# Games
u"1Dreamboy 2 Game",
u"1Dreamboy Version 2 Game",
u"Airport City Mobile",
u"The Tribez HD on iOS",
# General news
u"233Live Tweets",
u"247newz",
# Misc news
u"ADVFN News Alert",
u"APD Traffic Alerts",
# Buzzfeed-like
u"75325love",
u"AlltheGoss",
u"AllHealthSecrets.com",
u"Amusing information",
u"volkanc",
u"awe.sm",
# nsfw
u"definebabecom",
u"Cumagination Gay",
u"Cumagination Lesbian",
u"EscortGuidexXx",
u"TweetAdder v",
# Misc Spam
u";sdklafjas",
u"Acne-Treatments-and-Tips.com",
u"AmazonRecommend",
# Others
u"Adcourier",
])
# some apps add numbers at the end, e.g. MySpam, MySpam1, MySpam2, etc
END_DIGITS = re.compile(r"\s*\d+$")
entity_keys = ("urls", "hashtags", "user_mentions", "trends", "symbols", "media")
def filter_status(st):
"""
Check if we should include a status as returned by the Streaming API in our
DB. It'll return ``False`` if it should be rejected.
"""
# keep only some languages
if st.lang not in LANGUAGES:
return False
# remove replies
if st.in_reply_to_screen_name:
return False
# remove RTs
if getattr(st, 'retweeted_status', False):
return False
# remove suspicious apps
if not st.source or not st.source_url:
return False
# remove spam apps
if END_DIGITS.sub("", st.source) in APPS_BLACKLIST:
return False
# remove manual RTs
if st.text.startswith("RT @") or st.text.startswith("MT @"):
return False
# remove manual responses
if st.text.startswith(".@"):
return False
# remove other spam tweets
if is_spam(st.text):
return False
# ok
return True
class FeaturesDict(defaultdict):
def __init__(self, st):
super(FeaturesDict, self).__init__(float)
self._st = st
def compute_features(self):
"""
Compute all features for this tweet
"""
self._set_source_type()
self._set_extra_entities()
st = self._st
self["sg_geolocalized"] = float(st.geo is not None)
self["sg_lang_%s" % st.lang] = 1.0
self["sg_contributors"] = float(st.contributors is not None)
self["sg_emojis"] = contains_emoji(st.text)
# some statuses don't have this attribute
self["sg_nsfw"] = getattr(st, "possibly_sensitive", 0.0)
entities = extract_named_entities(st.text)
self["names"] = ",".join(entities)
for entity in entities:
entity = entity.lower()
if entity in most_common_words:
self["sg_mc_word_%s" % entity] = 1.0
self["retweet_count"] = getattr(st, "retweet_count", 0.0)
self["favorite_count"] = getattr(st, "favorite_count", 0.0)
for key in entity_keys:
self["sg_%s" % key] = int(bool(self._st.entities["urls"]))
def _set_source_type(self):
"""
Feature: source type
Keys: source_mobile, source_desktop, source_autopub, source_social,
source_tablet, source_other, ... (see SOURCE_TYPES)
Values: [0, 1]
"""
text = self._st.source.strip()
for s,vs in SOURCE_TYPES.items():
if text in vs:
self["sg_%s" % s] = 1.0
return
ltext = text.lower()
for brand in ("android", "iphone", "blackberry", "windows phone"):
if ltext.endswith(" for %s" % brand):
self["sg_source_mobile"] = 1.0
return
self["sg_source_others"] = 1.0
def _set_extra_entities(self):
extra = {}
media = getattr(self._st, "entities", {}).get("media", [])
if media:
photos = []
for m in media:
# TODO check the format for videos
if m.get("type") != "photo":
continue
photos.append({
# The image URL
"media_url": m["media_url_https"],
# The URL included in the status (expanded by us)
"url": m["expanded_url"],
})
extra["photos"] = photos
self["extra_entities"] = dumps(extra)
def compute_features(status):
expand_urls(status)
f = FeaturesDict(status)
f.compute_features()
return f
def expand_urls(st):
entities = getattr(st, "entities", {})
for link in entities.get("urls", []) + entities.get("media", []):
st.text = st.text.replace(link["url"], link["expanded_url"])
| mit | -323,460,344,605,223,000 | 22.840426 | 81 | 0.542466 | false |
jabez007/Training_Helpyr | Setup/__init__.py | 1 | 7646 | import re
import os
APP_PATH = os.path.join(*os.path.split(os.path.dirname(os.path.realpath(__file__)))[:-1])
import sys
if APP_PATH not in sys.path:
sys.path.append(APP_PATH)
import MyTrack
import PowerShell
import Phonebook
import Overlord
import Log
LOGGER = Log.MyLog(name=__name__)
# # # #
"""
Special setup for Care Everywhere 101 (fka CE-500)
"""
def ce500(instructor, trainees, code="CSCce500setup"):
"""
entry point for setting up CE 101 (FKA CE500)
:param instructor: <string> the cache environment for the Instructor
:param trainees: <string> the cache environments for the trainees
:param code: <string> the Overlord tag the needs to be ran in each environment to complete setup
:return: <bool> True if everything was successful
"""
gwn = None
instr = "".join([c for c in instructor if c.isdigit()])
trns = clean_caches(trainees)
if instr:
'''
if this is a fresh class setup, as in we are not just adding trainee environments to an existing class
'''
# pull out the last trainee environment and make it GWN
gwn = trns[-1:]
if gwen(gwn):
# then take that environment out of the list we'll set up later
trns = trns[:-1]
LOGGER.info("epic-trn%s set up as GWN environment" % gwn[0])
else:
# otherwise, skip the GWN setup and make this a normal environment
gwn = None
LOGGER.error("Galaxy Wide Network not set up")
setup_instructor(instr)
# Connect Interconnects to trainee environments
if not assign_interconnects("CE500", trns):
return False
# Update Training Phone Book with new environment assignments
if not update_phonebook(trns):
return False
# Restart the Training Phone Book so our changes take affect
if not PowerShell.restart_phonebook():
LOGGER.error("Error in restarting Training Phonebook Interconnect")
return False
# Run Cache setup script
if not setup_cache([instr]+trns, code):
return False
if gwn is not None:
setup_cache(gwn, code, "GWeN")
return True
def setup_instructor(instructor):
"""
runs the setup particular to the instructor environment
:param instructor: <string> the cache environment for the class instructor
:return: <bool> True is everything was successful
"""
# Connect Interconnect to instructor environment
if not PowerShell.setup('01', instructor):
LOGGER.error("Failed to connect epic-trn%s to CE500 instructor Interconnect. See powershell.err" % instructor)
return False
# Save to tracking database
if not MyTrack.assign("Instructors", "train01", "epic-trn"+instructor):
LOGGER.error("Setup between CE500 instructor Interconnect and epic-trn%s not saved to database. See my_track.err"
% instructor)
# Reset TRN Phonebook and register Instructor environment
if not Phonebook.TrnPhonebook().instructor(instructor):
LOGGER.error("Error in registering epic-trn%s as the Instructor environment in the Training Phonebook. See TRNphonebook.err"
% instructor)
return False
LOGGER.info("epic-trn%s set up as instructor environment" % instructor)
return True
def update_phonebook(trainees):
"""
updates the training Phonebook with trainee environments for this class
:param trainees: <list(string)> the cache environments for the trainees
:return: <bool> True if everything was successful
"""
for cache in trainees:
if not Phonebook.TrnPhonebook().register(cache):
LOGGER.error("Error in registering epic-trn%s with Training Phonebook. See TRNphonebook.err" % cache)
return False
LOGGER.info("Trainee environments registered in phonebook")
return True
def gwen(trainee):
"""
runs the setup particular to the Galaxy Wide Network environment
:param trainee: <string> the cache environment for GWN
:return: <bool> True if everything was successful
"""
# assign interconnect - this should be the same as the other trainee environments
assign_interconnects("CE500", trainee)
# update Phonebook
if not Phonebook.TrnPhonebook().register_gwn(trainee[0]):
return False
# setup cache for GWN with the other environments
return True
# # # #
"""
Generic Care Everywhere setup for IP and AMB Funds classes
"""
def funds(caches, code="CSCInpFunds"):
"""
:param caches: <string>
:param code: <string>
:return: <bool>
"""
trns = clean_caches(caches)
if not assign_interconnects("AMB_IP", trns):
return False
if code:
if not setup_cache(trns, code):
return False
return True
# # # #
"""
used by both Care Everywhere 101 and IP/AMB Funds
"""
def clean_caches(caches):
"""
uses regex to parse out our cache environments passed in
:param caches: <string>
:return: <list(string)>
"""
return_caches = list()
data = re.finditer("([a-zA-Z0-9\-]+)", caches)
for d in data:
cache = "".join([s for s in d.group(1) if s.isdigit()])
# make sure we have an environment and that it's not already assigned
if cache and not MyTrack.check_assigned(cache):
return_caches.append(cache)
return return_caches
def assign_interconnects(_class, trns):
assigned_interconnects = 1 # CE500 instructor always gets Interconnect 1
clss = _class
for cache in trns:
# #
if ("CE500" in _class) and (assigned_interconnects >= 40): # if training overbooks us, steal from FUNDs
clss = "AMB_IP"
interconnect = "".join([s for s in MyTrack.get("unassigned", "AMB_IP") if s.isdigit()])
else:
interconnect = "".join([s for s in MyTrack.get("unassigned", _class) if s.isdigit()])
# #
if interconnect:
if not PowerShell.setup(interconnect, cache):
LOGGER.error("Powershell failed to connect epic-trn%s to train%s" % (cache, interconnect))
return False
assigned_interconnects += 1
if not MyTrack.assign(clss, "train"+interconnect, "epic-trn"+cache):
LOGGER.error("Setup between epic-trn%s and train%s not saved to MyTrack" % (cache, interconnect))
return False
else:
LOGGER.error("No Interconnect returned from MyTrack for epic-trn%s" % cache)
return False
LOGGER.info("epic-trn%s connected to Interconnect-train%s" % (cache, interconnect))
return True
def setup_cache(trns, code, flag=""):
success = True
for trn in trns:
if not Overlord.overlord(trn, code, flag):
LOGGER.error("Error running %s. See Overlord logs" % code)
success = False
# LOGGER.info("%s successfully ran in %s" % (code, ", ".join(trns)))
return success
# # # #
if __name__ == "__main__":
import datetime
import Outlook
for days in range(2): # setup today's and tomorrow's classes
tomorrow = (datetime.datetime.now() + datetime.timedelta(days=days)).strftime("%m/%d/%Y") # MM/DD/YYYY
print("Setting up classes for %s:" % tomorrow)
classes = MyTrack.setup_schedule(tomorrow)
for new_class in classes:
if funds(new_class[0]):
print("\t%s - email to %s" % (new_class[0], new_class[1]))
Outlook.send_email(e_address=new_class[1], env=new_class[0])
else:
print("\t%s failed" % new_class[0])
| mit | -4,601,870,185,056,420,400 | 31.675214 | 132 | 0.639681 | false |
aricaldeira/PySPED | pysped/cte/webservices_flags.py | 1 | 2161 | # -*- coding: utf-8 -*-
#
# PySPED - Python libraries to deal with Brazil's SPED Project
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira at tauga.com.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation, either version 2.1 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# PySPED - Bibliotecas Python para o
# SPED - Sistema Público de Escrituração Digital
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira arroba tauga.com.br>
#
# Este programa é um software livre: você pode redistribuir e/ou modificar
# este programa sob os termos da licença GNU Library General Public License,
# publicada pela Free Software Foundation, em sua versão 2.1 ou, de acordo
# com sua opção, qualquer versão posterior.
#
# Este programa é distribuido na esperança de que venha a ser útil,
# porém SEM QUAISQUER GARANTIAS, nem mesmo a garantia implícita de
# COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Veja a
# GNU Library General Public License para mais detalhes.
#
# Você deve ter recebido uma cópia da GNU Library General Public License
# juntamente com este programa. Caso esse não seja o caso, acesse:
# <http://www.gnu.org/licenses/>
#
from __future__ import division, print_function, unicode_literals
from ..nfe.webservices_flags import UF_CODIGO, CODIGO_UF
WS_CTE_AUTORIZACAO = 0
WS_CTE_CONSULTA_AUTORIZACAO = 1
WS_CTE_INUTILIZACAO = 1
WS_CTE_CONSULTA = 3
WS_CTE_SITUACAO = 4
WS_CTE_RECEPCAO_EVENTO = 2
WS_CTE_RECEPCAO_OS = 5
WS_CTE_DISTRIBUICAO = 6
CTE_AMBIENTE_PRODUCAO = 1
CTE_AMBIENTE_HOMOLOGACAO = 2
| lgpl-2.1 | -2,877,343,729,118,594,600 | 36.54386 | 76 | 0.757944 | false |
sunoru/pokemon_only | stall/migrations/0001_initial.py | 1 | 6359 | # Generated by Django 2.2.2 on 2019-06-04 21:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pmo', models.CharField(choices=[('unknown', 'unknown'), ('pmo2015', 'pmo2015'), ('pmo2016', 'pmo2016'), ('pmo2017', 'pmo2017'), ('pmo2018', 'pmo2018'), ('pmo2019', 'pmo2019')], default='unknown', help_text='漫展', max_length=10)),
('validated', models.BooleanField(default=False)),
('name', models.CharField(default='未命名', max_length=50)),
('item_type', models.CharField(default='', help_text='种类', max_length=20)),
('content', models.CharField(default='', help_text='内容', max_length=100)),
('price', models.FloatField(default=0, help_text='价格')),
('url', models.URLField(default='', help_text='链接')),
('authors', models.TextField(default='', help_text='作者名单')),
('introduction', models.TextField(default='', help_text='简介')),
('cover_image', models.ImageField(help_text='封面图片', max_length=1024, null=True, upload_to='items/%Y/%m/%d')),
('forto', models.CharField(default='', help_text='面向人群', max_length=20)),
('is_restricted', models.CharField(default='', help_text='限制级是否', max_length=20)),
('circle', models.CharField(default='', help_text='出品社团', max_length=40)),
('is_started_with', models.BooleanField(default=False, help_text='是否首发')),
('item_order', models.IntegerField(default=0, help_text='商品排序')),
],
options={
'ordering': ['seller'],
},
),
migrations.CreateModel(
name='Option',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=255, unique=True)),
('value', models.TextField(default='')),
],
),
migrations.CreateModel(
name='Seller',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pmo', models.CharField(choices=[('unknown', 'unknown'), ('pmo2015', 'pmo2015'), ('pmo2016', 'pmo2016'), ('pmo2017', 'pmo2017'), ('pmo2018', 'pmo2018'), ('pmo2019', 'pmo2019')], default='unknown', help_text='漫展', max_length=10)),
('email', models.EmailField(max_length=30, verbose_name='email address')),
('is_active', models.BooleanField(default=False, help_text='是否激活')),
('signup_datetime', models.DateTimeField(auto_now=True)),
('signup_address', models.GenericIPAddressField()),
('is_stall', models.BooleanField(help_text='是否摊位')),
('circle_name', models.CharField(help_text='社团名', max_length=40)),
('circle_description', models.TextField(help_text='社团介绍')),
('circle_image', models.ImageField(help_text='社团图标', upload_to='circle/%Y/%m/%d')),
('seller_id', models.CharField(default='', help_text='摊位号', max_length=10)),
('proposer_name', models.CharField(help_text='申请人姓名', max_length=20)),
('proposer_sex', models.CharField(help_text='性别', max_length=20)),
('proposer_qq', models.CharField(help_text='QQ', max_length=11)),
('proposer_phone', models.CharField(help_text='电话', max_length=20)),
('proposer_id', models.CharField(help_text='身份证号', max_length=18)),
('booth', models.FloatField(default=1, help_text='申请摊位数')),
('number_of_people', models.SmallIntegerField(default=1, help_text='申请人数')),
('remarks', models.TextField(default='', help_text='备注')),
('status', models.IntegerField(help_text='状态')),
('notice', models.TextField(default='', help_text='通知')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ValidateCode',
fields=[
('pmo', models.CharField(choices=[('unknown', 'unknown'), ('pmo2015', 'pmo2015'), ('pmo2016', 'pmo2016'), ('pmo2017', 'pmo2017'), ('pmo2018', 'pmo2018'), ('pmo2019', 'pmo2019')], default='unknown', help_text='漫展', max_length=10)),
('code', models.CharField(max_length=20, primary_key=True, serialize=False)),
('validated', models.BooleanField(default=False)),
('seller', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stall.Seller')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ItemPicture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pmo', models.CharField(choices=[('unknown', 'unknown'), ('pmo2015', 'pmo2015'), ('pmo2016', 'pmo2016'), ('pmo2017', 'pmo2017'), ('pmo2018', 'pmo2018'), ('pmo2019', 'pmo2019')], default='unknown', help_text='漫展', max_length=10)),
('picture', models.ImageField(help_text='图片', max_length=1024, upload_to='items/%Y/%m/%d')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stall.Item')),
],
),
migrations.AddField(
model_name='item',
name='seller',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stall.Seller'),
),
]
| gpl-2.0 | -6,446,297,086,749,532,000 | 59.343137 | 246 | 0.560845 | false |
meraki-analytics/cassiopeia | cassiopeia/cassiopeia.py | 1 | 6069 | from typing import List, Set, Dict, Union, TextIO
import arrow
import datetime
from .data import Region, Queue, Season, Tier, Division, Position
from .core import Champion, Summoner, ChampionMastery, Rune, Item, Match, Map, SummonerSpell, Realms, ProfileIcon, LanguageStrings, CurrentMatch, ShardStatus, Versions, MatchHistory, Champions, ChampionMasteries, Runes, Items, SummonerSpells, Maps, FeaturedMatches, Locales, ProfileIcons, ChallengerLeague, GrandmasterLeague, MasterLeague, League, LeagueSummonerEntries, LeagueEntries, Patch, VerificationString, ChampionRotation
from .datastores import common as _common_datastore
from ._configuration import Settings, load_config, get_default_config
from . import configuration
# Settings endpoints
def apply_settings(config: Union[str, TextIO, Dict, Settings]):
if not isinstance(config, (Dict, Settings)):
config = load_config(config)
if not isinstance(config, Settings):
settings = Settings(config)
else:
settings = config
# Load any plugins after everything else has finished importing
import importlib
for plugin in settings.plugins:
imported_plugin = importlib.import_module("cassiopeia.plugins.{plugin}.monkeypatch".format(plugin=plugin))
print_calls(settings._Settings__default_print_calls, settings._Settings__default_print_riot_api_key)
# Overwrite the old settings
configuration._settings = settings
# Initialize the pipeline immediately
_ = configuration.settings.pipeline
def set_riot_api_key(key: str):
configuration.settings.set_riot_api_key(key)
def set_default_region(region: Union[Region, str]):
configuration.settings.set_region(region)
def print_calls(calls: bool, api_key: bool = False):
_common_datastore._print_calls = calls
_common_datastore._print_api_key = api_key
# Data endpoints
def get_league_entries(summoner: Summoner) -> LeagueEntries:
return summoner.league_entries
def get_paginated_league_entries(queue: Queue, tier: Tier, division: Division, region: Union[Region, str] = None) -> LeagueEntries:
return LeagueEntries(region=region, queue=queue, tier=tier, division=division)
def get_master_league(queue: Union[Queue, int, str], region: Union[Region, str] = None) -> MasterLeague:
return MasterLeague(queue=queue, region=region)
def get_grandmaster_league(queue: Union[Queue, int, str], region: Union[Region, str] = None) -> GrandmasterLeague:
return GrandmasterLeague(queue=queue, region=region)
def get_challenger_league(queue: Union[Queue, int, str], region: Union[Region, str] = None) -> ChallengerLeague:
return ChallengerLeague(queue=queue, region=region)
def get_match_history(summoner: Summoner, begin_index: int = None, end_index: int = None, begin_time: arrow.Arrow = None, end_time: arrow.Arrow = None, queues: Set[Queue] = None, seasons: Set[Season] = None, champions: Set[Champion] = None):
return MatchHistory(summoner=summoner, begin_index=begin_index, end_index=end_index, begin_time=begin_time, end_time=end_time, queues=queues, seasons=seasons, champions=champions)
def get_match(id : int, region: Union[Region, str] = None) -> Match:
return Match(id=id, region=region)
def get_featured_matches(region: Union[Region, str] = None) -> FeaturedMatches:
return FeaturedMatches(region=region)
def get_current_match(summoner: Summoner, region: Union[Region, str] = None) -> CurrentMatch:
return CurrentMatch(summoner=summoner, region=region)
def get_champion_masteries(summoner: Summoner, region: Union[Region, str] = None) -> ChampionMasteries:
return ChampionMasteries(summoner=summoner, region=region)
def get_champion_mastery(summoner: Summoner, champion: Union[Champion, int, str], region: Union[Region, str] = None) -> ChampionMastery:
return ChampionMastery(champion=champion, summoner=summoner, region=region)
def get_summoner(*, id: str = None, account_id: str = None, name: str = None, region: Union[Region, str] = None) -> Summoner:
return Summoner(id=id, account_id=account_id, name=name, region=region)
def get_champion(key: Union[str, int], region: Union[Region, str] = None) -> Champion:
return get_champions(region=region)[key]
def get_champions(region: Union[Region, str] = None) -> Champions:
return Champions(region=region)
def get_runes(region: Union[Region, str] = None) -> Runes:
return Runes(region=region)
def get_summoner_spells(region: Union[Region, str] = None) -> SummonerSpells:
return SummonerSpells(region=region)
def get_items(region: Union[Region, str] = None) -> Items:
return Items(region=region)
def get_maps(region: Union[Region, str] = None) -> Maps:
return Maps(region=region)
def get_profile_icons(region: Union[Region, str] = None) -> ProfileIcons:
return ProfileIcons(region=region)
def get_realms(region: Union[Region, str] = None) -> Realms:
return Realms(region=region)
def get_status(region: Union[Region, str] = None) -> ShardStatus:
return ShardStatus(region=region)
def get_language_strings(region: Union[Region, str] = None) -> LanguageStrings:
return LanguageStrings(region=region)
def get_locales(region: Union[Region, str] = None) -> List[str]:
return Locales(region=region)
def get_versions(region: Union[Region, str] = None) -> List[str]:
return Versions(region=region)
def get_version(date: datetime.date = None, region: Union[Region, str] = None) -> Union[None, str]:
versions = get_versions(region)
if date is None:
return versions[0]
else:
patch = Patch.from_date(date, region=region)
for version in versions:
if patch.majorminor in version:
return version
return None
def get_verification_string(summoner: Summoner) -> VerificationString:
return VerificationString(summoner=summoner)
def get_champion_rotations(region: Union[Region, str] = None) -> ChampionRotation:
return ChampionRotation(region=region)
# Pipeline
def _get_pipeline():
return configuration.settings.pipeline
| mit | 2,522,235,741,749,909,500 | 35.341317 | 429 | 0.731257 | false |
sk413025/tilitools | latentsvdd.py | 1 | 3222 | from cvxopt import matrix,spmatrix,sparse,uniform,normal,setseed
from cvxopt.blas import dot,dotu
from cvxopt.solvers import qp
from cvxopt.lapack import syev
import numpy as np
import math as math
from kernel import Kernel
from svdd import SVDD
from ocsvm import OCSVM
import pylab as pl
import matplotlib.pyplot as plt
class LatentSVDD:
""" Latent variable support vector data description.
Written by Nico Goernitz, TU Berlin, 2014
For more information see:
'Learning and Evaluation with non-i.i.d Label Noise'
Goernitz et al., AISTATS & JMLR W&CP, 2014
"""
PRECISION = 10**-3 # important: effects the threshold, support vectors and speed!
C = 1.0 # (scalar) the regularization constant > 0
sobj = [] # structured object contains various functions
# i.e. get_num_dims(), get_num_samples(), get_sample(i), argmin(sol,i)
sol = [] # (vector) solution vector (after training, of course)
def __init__(self, sobj, C=1.0):
self.C = C
self.sobj = sobj
def train_dc(self, max_iter=50):
""" Solve the LatentSVDD optimization problem with a
sequential convex programming/DC-programming
approach:
Iteratively, find the most likely configuration of
the latent variables and then, optimize for the
model parameter using fixed latent states.
"""
N = self.sobj.get_num_samples()
DIMS = self.sobj.get_num_dims()
# intermediate solutions
# latent variables
latent = [0]*N
sol = 10.0*normal(DIMS,1)
psi = matrix(0.0, (DIMS,N)) # (dim x exm)
old_psi = matrix(0.0, (DIMS,N)) # (dim x exm)
threshold = 0
obj = -1
iter = 0
# terminate if objective function value doesn't change much
while iter<max_iter and (iter<2 or sum(sum(abs(np.array(psi-old_psi))))>=0.001):
print('Starting iteration {0}.'.format(iter))
print(sum(sum(abs(np.array(psi-old_psi)))))
iter += 1
old_psi = matrix(psi)
# 1. linearize
# for the current solution compute the
# most likely latent variable configuration
for i in range(N):
# min_z ||sol - Psi(x,z)||^2 = ||sol||^2 + min_z -2<sol,Psi(x,z)> + ||Psi(x,z)||^2
# Hence => ||sol||^2 - max_z 2<sol,Psi(x,z)> - ||Psi(x,z)||^2
(foo, latent[i], psi[:,i]) = self.sobj.argmax(sol, i, opt_type='quadratic')
# 2. solve the intermediate convex optimization problem
kernel = Kernel.get_kernel(psi,psi)
svdd = SVDD(kernel, self.C)
svdd.train_dual()
threshold = svdd.get_threshold()
inds = svdd.get_support_dual()
alphas = svdd.get_support_dual_values()
sol = psi[:,inds]*alphas
self.sol = sol
self.latent = latent
return (sol, latent, threshold)
def apply(self, pred_sobj):
""" Application of the LatentSVDD:
anomaly_score = min_z ||c*-\Psi(x,z)||^2
latent_state = argmin_z ||c*-\Psi(x,z)||^2
"""
N = pred_sobj.get_num_samples()
norm2 = self.sol.trans()*self.sol
vals = matrix(0.0, (1,N))
lats = matrix(0.0, (1,N))
for i in range(N):
# min_z ||sol - Psi(x,z)||^2 = ||sol||^2 + min_z -2<sol,Psi(x,z)> + ||Psi(x,z)||^2
# Hence => ||sol||^2 - max_z 2<sol,Psi(x,z)> - ||Psi(x,z)||^2
(max_obj, lats[i], foo) = pred_sobj.argmax(self.sol, i, opt_type='quadratic')
vals[i] = norm2 - max_obj
return (vals, lats)
| mit | -4,128,180,928,146,153,500 | 29.396226 | 86 | 0.646182 | false |
sniemi/SamPy | sandbox/src1/pviewer/pviewer.py | 1 | 31336 | #!/usr/bin/env python
from tkFileDialog import *
from Tkinter import *
from tkSimpleDialog import Dialog
import tkMessageBox
from plotAscii import *
from imageUtil import *
from view2d import *
from mdaAscii import *
import Pmw
import os, string
import AppShell
global Scan
global SH # SHARED
class setupPrinter(Dialog):
"Dialog for setting up printer "
def body(self,master):
self.title("Set Printer Dialog")
Label(master, text='Enter Printer Name:').grid(row=1, sticky=W)
self.label = StringVar()
self.label = Entry(master, width = 26 )
self.label.grid(row=1,column=1)
self.label.insert(0,SH['printer'])
return self.label
def apply(self):
SH['printer'] = self.label.get()
writeSH(SH)
class commandSyntax(Dialog):
"Dialog for sending a system command or any executable client"
def body(self,master):
self.title("Command Dialog")
self.commandsyntax = Pmw.EntryField(master, labelpos='w',
label_text='Enter Command:', value='',
command=self.valuechanged)
self.commandsyntax.pack(fill='x')
self.commandsyntax.component('entry').focus_set()
def valuechanged(self):
os.system(self.commandsyntax.get()+ ' &')
def apply(self):
self.destroy()
class pickDIdialog(Dialog):
"Dialog for selecting a text line which contains DI names to be used in multiline plot. If blank comment line picked, sequence number is used."
def body(self,master):
file = Scan['txtfile']
data = readArray(file)
nc = len(data[0])
self.nc = nc
fo = open(file,'r')
lines = fo.read()
fo.close()
lines = string.split(lines,'\n')
self.title("Pick Line where DI Names Resides")
box = Pmw.ScrolledListBox(master,
items=(lines),
labelpos=NW,label_font=SH['font'],
label_text='Extract column legends from the text window\nSelect the text line which contains\nlegends to be extracted for multi-line plot',
selectioncommand=self.selectionCommand,
dblclickcommand=self.selectionCommand,
usehullsize=1,hull_width=700,hull_height=400)
box.pack()
self.box = box
def selectionCommand(self):
box = self.box
sels = box.getcurselection()
sels = string.split(sels[0])
no = len(sels)
dc = no - self.nc
if dc >= 0:
sels = sels[dc:no]
ix = SH['ix']
sel = sels[ix+1:no]
else:
sel = range(self.nc)
V = []
for i in range(85):
V.append('')
for i in range(len(sel)):
V[i] = sel[i]
fo = open('pvs','w')
fo.write(str(V))
fo.close()
Scan['nc'] = len(V)
namedialog = GetLegends(self)
def apply(self):
self.destroy()
class GetXYVdialog(Dialog):
"Dialog to set column or line # of X, Y, DATA array located in the opend ascii 2D image file (generated by scanSee/catcher/yviewer)"
def body(self,master):
try:
font=SH['font'] #'Verdana 10 bold'
self.title("Extract X,Y,DATA array from scanSee ASCII file")
self.ix = [IntVar(),IntVar(),IntVar(),IntVar()]
Label(master,text='X and Data column #:',font=font).grid(row=0,column=0,sticky=W)
Label(master,text='X Vector Column #').grid(row=1,column=1,sticky=W)
Label(master,text='Data Start Column #').grid(row=2,column=1,sticky=W)
Label(master,text='Y Vector Defined in:',font=font).grid(row=3,column=0,sticky=W)
Label(master,text='Y Vector Line #').grid(row=4,column=1,sticky=W)
Label(master,text='Y Start Column #').grid(row=5,column=1,sticky=W)
Entry(master,width=4,textvariable=self.ix[0]).grid(row=1,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[1]).grid(row=2,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[2]).grid(row=4,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[3]).grid(row=5,column=2,sticky=W)
self.ix[0].set(0)
self.ix[1].set(2)
self.ix[2].set(3)
self.ix[3].set(2)
except AttributeError:
return self.ix[0]
def get(self):
return [self.ix[0].get(),self.ix[1].get(),self.ix[2].get(),self.ix[3].get()]
def apply(self):
ix = self.get()
Scan['rowcol'] = ix
file = Scan['txtfile']
if file != '':
data = readArray(file)
nc = len(data)
nr = len(data[0])
data = rowreverse(data)
x = data[ix[0]]
data = data[ix[1]:nr]
data = array(data)
fo = open(file,'r')
lines = fo.read()
fo.close
lines = string.split(lines,'\n')
if ix[2] >= 0:
py = lines[ix[2]]
py = string.split(py)
y = py[ix[3]:len(py)]
for i in range(len(y)):
y[i] = string.atof(y[i])
else:
y = range(len(data))
Scan['X'] = x
Scan['Y'] = y
file = Scan['txtfile']
if Scan['updown']:
plot2dUpdown(data,x,y,title=file)
else:
plot2d(data,x,y,title=file)
class defineXYdialog(Dialog):
"Dialog for entering Xmin,Xmax,Ymin,Ymax ranges"
def body(self,master):
try:
file = Scan['txtfile']
data = readArray(file)
data = rowreverse(data)
data = array(data)
nc = data.shape[1]
nr = data.shape[0]
Scan['im'] = data
font=SH['font'] #'Verdana 10 bold'
self.title("Set X, Y Ranges for Image Plot")
self.ix = [StringVar(),StringVar(),StringVar(),StringVar()]
Label(master,text='Enter X Plot Range',font=font).grid(row=0,column=0,sticky=W)
Label(master,text='Xmin').grid(row=1,column=1,sticky=W)
Label(master,text='Xmax').grid(row=2,column=1,sticky=W)
Label(master,text='Enter Y Plot Range',font=font).grid(row=3,column=0,sticky=W)
Label(master,text='Ymin').grid(row=4,column=1,sticky=W)
Label(master,text='Ymax').grid(row=5,column=1,sticky=W)
Entry(master,width=14,textvariable=self.ix[0]).grid(row=1,column=2,sticky=W)
Entry(master,width=14,textvariable=self.ix[1]).grid(row=2,column=2,sticky=W)
Entry(master,width=14,textvariable=self.ix[2]).grid(row=4,column=2,sticky=W)
Entry(master,width=14,textvariable=self.ix[3]).grid(row=5,column=2,sticky=W)
self.ix[0].set(1.)
self.ix[1].set(float(nc))
self.ix[2].set(1.)
self.ix[3].set(float(nr))
except AttributeError:
return self.ix[0]
def get(self):
return [self.ix[0].get(),self.ix[1].get(),self.ix[2].get(),self.ix[3].get()]
def apply(self):
ix = self.get()
ix = [string.atof(ix[0]),string.atof(ix[1]),string.atof(ix[2]),
string.atof(ix[3])]
data = Scan['im']
nr = data.shape[0]
nc = data.shape[1]
x = []
dx = (ix[1]-ix[0])/(nc-1)
for i in range(nc):
x.append(ix[0]+dx*i)
y = []
dy = (ix[3]-ix[2])/(nr-1)
for i in range(nr):
y.append(ix[2]+dy*i)
if Scan['updown']:
plot2dUpdown(data,x,y,title=Scan['txtfile'])
else:
plot2d(data,x,y,title=Scan['txtfile'])
class GetXYdialog(Dialog):
"Dialog for define X,Y vector line and column #"
def body(self,master):
try:
font=SH['font'] #'Verdana 10 bold'
self.title("Get X, Y Vectors from ASCII file")
self.ix = [IntVar(),IntVar(),IntVar(),IntVar()]
Label(master,text='X Vector Defined in:',font=font).grid(row=0,column=0,sticky=W)
Label(master,text='Line #').grid(row=1,column=1,sticky=W)
Label(master,text='Start Column #').grid(row=2,column=1,sticky=W)
Label(master,text='Y Vector Defined in:',font=font).grid(row=3,column=0,sticky=W)
Label(master,text='Line #').grid(row=4,column=1,sticky=W)
Label(master,text='Start Column #').grid(row=5,column=1,sticky=W)
Entry(master,width=4,textvariable=self.ix[0]).grid(row=1,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[1]).grid(row=2,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[2]).grid(row=4,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[3]).grid(row=5,column=2,sticky=W)
# cl = Scan['rowcol']
cl = [3,2,4,2]
self.ix[0].set(cl[0])
self.ix[1].set(cl[1])
self.ix[2].set(cl[2])
self.ix[3].set(cl[3])
except AttributeError:
return self.ix[0]
def get(self):
return [self.ix[0].get(),self.ix[1].get(),self.ix[2].get(),self.ix[3].get()]
def apply(self):
ix = self.get()
Scan['rowcol'] = ix
file = Scan['txtfile']
if file != '':
fo = open(file,'r')
lines = fo.read()
fo.close
lines = string.split(lines,'\n')
px = lines[ix[0]]
px = string.split(px)
x = px[ix[1]:len(px)]
for i in range(len(x)):
x[i] = string.atof(x[i])
py = lines[ix[2]]
py = string.split(py)
y = py[ix[3]:len(py)]
for i in range(len(y)):
y[i] = string.atof(y[i])
Scan['X'] = x
Scan['Y'] = y
file = Scan['txtfile']
data = readArray(file)
data = rowreverse(data)
data = array(data)
if Scan['updown']:
plot2dUpdown(data,x,y,title=file)
else:
plot2d(data,x,y,title=file)
class GetXdialog(Dialog):
"Dialog for defining X column # in text file"
def body(self,master):
font=SH['font'] #'Verdana 10 bold'
self.title("1D Multi-Line Plot")
self.ix = IntVar()
Label(master,text='Defined valid X column # from text file:',font=font).pack(anchor=NW)
Label(master,text=Scan['txtfile'],font=font).pack(anchor=NW)
Label(master,text='-1 - No X column defined ').pack(anchor=NW)
Label(master,text=' 0 - X defined at First column').pack(anchor=NW)
Label(master,text=' 1 - X defined at Second column').pack(anchor=NW)
Label(master,text='Enter X Column Index #:',font=font).pack(side=LEFT)
self.ix = Entry(master, width = 4)
self.ix.pack(side=LEFT)
v = self.get()
self.ix.insert(0,v)
return self.ix
def get(self):
# fo.close()
SH = readSH()
ix = SH['ix']
return ix
def apply(self):
ix = self.ix.get()
SH['ix'] = string.atoi(ix)
writeSH(SH)
os.system('plotAscii.py '+Scan['txtfile']+' '+str(ix) +' &')
class pick2Ddetector(Dialog):
"Dialog to pick any detector from the MDA 2D detector list and plot the selected 2D detector image"
def body(self,master):
self.title("Select 2D Detector")
box = Pmw.ScrolledListBox(master,
items=('1','2','3','4'),
labelpos=NW,label_text='Pick Detector',
selectioncommand=self.selectionCommand,
dblclickcommand=self.selectionCommand,
usehullsize=1,hull_width=200,hull_height=200)
box.pack()
self.box = box
def selectionCommand(self):
box = self.box
sels = box.getcurselection()
sels = string.split(sels[0])
sel = string.atoi(sels[0])
Scan['2d'] = sel
d = Scan['data']
pick2d(d,sel,updown=Scan['updown'])
def apply(self):
self.destroy()
class pviewer(AppShell.AppShell):
usecommandarea=1
balloonhelp=1
appversion = '1.0'
appname = 'pviewer'
copyright = 'Copyright ANL-APS-AOD-BCDA. All Rights Reserved'
contactname = 'Ben-chin K Cha'
contactphone = '(630) 252-8653'
contactemail = '[email protected]'
frameWidth = 800
frameHeight = 500
def unimplemented(self):
pass
def messageMDA(self):
box = Pmw.Dialog(self.interior(),
defaultbutton='OK',title='Info')
w = Label(box.interior(),
text='You need to use File->Open MDA...\n to load in an MDA file first',
padx=10,pady=10).pack()
box.activate()
def messageAscii(self):
box = Pmw.Dialog(self.interior(),
defaultbutton='OK',title='Info')
w = Label(box.interior(),
text='You need to use File->Open Ascii...\n to load in an ASCII file first',
padx=10,pady=10).pack()
box.activate()
def savepvs(self):
file = 'pvs'
V = self.apply()
fd = open(file,'w')
fd.write(str(V))
fd.close()
def createButtons(self):
self.buttonAdd('Exit',
helpMessage='Exit pviewer',
statusMessage='Exit pviewer',
command=self.closeup)
def startup(self):
if os.path.isfile('pviewer.config'):
lines = readST('pviewer.config')
self.mdapath = lines[0]
self.txtpath = lines[1]
print 'self.mdapath=', self.mdapath
print 'self.txtpath=', self.txtpath
else:
self.mdapath = os.curdir
self.txtpath = os.curdir
def closeup(self):
fo = open('pviewer.config','w')
st = [ self.mdapath,self.txtpath]
# print str(st)
fo.write(str(st))
fo.close()
self.quit()
# def addmenuBar(self):
# self.menuBar.addmenu('Setup','Fields for plot legend')
def addMoremenuBar(self):
self.menuBar.addmenuitem('File', 'command', 'Quit this application',
label='Quit',
command=self.closeup)
self.menuBar.addmenuitem('File', 'command', '', label='--------------')
self.menuBar.addmenuitem('File', 'command',
'Setup Printer ...',
label='Printer...',
command=self.printerDialog)
self.menuBar.addmenuitem('File', 'command', '', label='--------------')
self.menuBar.addmenuitem('File', 'command',
'File Selection dialog for Ascii File ...',
label='Open Ascii ...',
command=self.openAscii)
self.menuBar.addmenuitem('File', 'command', '', label='--------------')
self.menuBar.addmenuitem('File', 'command',
'File Selection dialog for MDA File ...',
label='Open MDA ...',
command=self.openMDA)
self.menuBar.addmenuitem('Help', 'command',
'Online help about this application ...',
label='pviewer_help.txt ...',
command=self.openHelpText)
self.menuBar.addmenuitem('Setup','command',
'Pick and load Color Table for 2D image plot ',
label='Color Table...',
command=self.setCTdialog)
self.menuBar.addmenuitem('Setup','command',
'Modify legend field names used in multiline plot',
label='Name Legends...',
command=self.legenddialog)
self.toggleUpdownVar=IntVar()
self.toggleUpdownVar.set(1)
self.menuBar.addmenuitem('Setup','checkbutton',
'Toggle plot2d updown mode',
label='Image Upside Down',
variable=self.toggleUpdownVar,
command=self.updownImage)
self.menuBar.addmenu('MDAView','Various MDAView features')
self.menuBar.addmenuitem('MDAView','command',
'Access 1D Array and pass to multiline plotter...',
label='Multi-line 1D Plot...',
command=self.mda1DRptPlot)
self.menuBar.addmenuitem('MDAView', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAView','command',
'Access panimage window',
label='PanImages...',
command=self.getpanimage)
self.menuBar.addmenuitem('MDAView','command',
'Display 2D image for the select detector',
label='Pick Di Image...',
command=self.get2Ddetector)
self.menuBar.addmenu('MDAReports','Various Report features')
self.menuBar.addmenuitem('MDAReports','command',
'Generate MDA 1D/2D reports',
label='MDA 1D/2D Reports...',
command=self.mdaReport)
self.menuBar.addmenuitem('MDAReports','command',
'Generate sequential MDA 1D report from 2D array',
label='MDA 2D->1D Report...',
command=self.mda2D1DRpt)
self.menuBar.addmenuitem('MDAReports', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAReports','command',
'Generate MDA report for current MDA directory',
label='Generate All MDA Report...',
command=self.mdaAllRpt)
self.menuBar.addmenuitem('MDAReports', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAReports','command',
'Generate MDA 2D report in IGOR format',
label='MDA to IGOR Report...',
command=self.mdaIGORRpt)
self.menuBar.addmenuitem('MDAReports', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAReports','command',
'Show ASCII Report Files',
label='View ASCII Report...',
command=self.showAscii)
self.menuBar.addmenuitem('MDAReports', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAReports','command',
'Clear All Files in ASCII directory',
label='Remove All Reports...',
command=self.removeAscii)
self.menuBar.addmenu('AsciiView','Various AsciiView features')
self.menuBar.addmenuitem('AsciiView', 'command', '',
label='--------------')
self.menuBar.addmenuitem('AsciiView','command',
'Enter the zero based X column # in ASCII file',
label='Multi-line Plotter...',
command=self.XcolDialog)
self.menuBar.addmenuitem('AsciiView','command',
'Pick line of DI legend name from the ascii file',
label='Extract & Modify Legend...',
command=self.DIlinedialog)
self.menuBar.addmenuitem('AsciiView', 'command', '',
label='--------------')
self.menuBar.addmenuitem('AsciiView', 'command',
'Pass ascii text data to image plot ...',
label='TV Image ...',
command=self.imageAscii)
self.menuBar.addmenu('Ascii2Image','Plot2D Ascii Image features')
self.menuBar.addmenuitem('Ascii2Image', 'command',
'No X,Y vector defined in ascii file',
label='Plot2d...',
command=self.plot2ddialog)
self.menuBar.addmenuitem('Ascii2Image', 'command',
'User set X,Y ranges dialog',
label='X,Y Range for image...',
command=self.XYrangeDialog)
self.menuBar.addmenuitem('Ascii2Image', 'command', '',
label='--------------')
self.menuBar.addmenuitem('Ascii2Image', 'command',
'Extract the X,Y line vectors from mdaAscii generated file',
label='X,Y Line vector from mdaAscii file...',
command=self.XYrowcolDialog)
self.menuBar.addmenuitem('Ascii2Image', 'command', '',
label='--------------')
self.menuBar.addmenuitem('Ascii2Image', 'command',
'Extract X,Y,Data from scanSee/catcher/yviewer generated file',
label='X column, Y line, DATA column from ascii file...',
command=self.XYVDialog)
self.menuBar.addmenu('ScanTools','Various scan programs')
self.menuBar.addmenuitem('ScanTools','command',
'Run plot.py python program',
label='Python plot.py ...',
command=self.runPlot)
self.menuBar.addmenuitem('ScanTools', 'command', '',
label='--------------')
self.menuBar.addmenuitem('ScanTools','command',
'Run idlvm sscan (scanSee) program',
label='idlvm sscan ...',
command=self.runSscan)
self.menuBar.addmenuitem('ScanTools','command',
'Run idlvm catcher (catcher) program',
label='idlvm catcher ...',
command=self.runCatcher)
self.menuBar.addmenuitem('ScanTools','command',
'Run idlvm mca (MCA) program',
label='idlvm mca ...',
command=self.runMCA)
self.menuBar.addmenu('Tools','Various system tools')
self.menuBar.addmenuitem('Tools','command',
'Run start_epics program',
label='start_epics ...',
command=self.runMedm)
self.menuBar.addmenuitem('Tools', 'command', '',
label='--------------')
self.menuBar.addmenuitem('Tools', 'command',
'Enter any valid command syntax ...',
label='Command Dialog...',
command=self.commandDialog)
def runPlot(self):
os.system('plot.py & ')
def runSscan(self):
os.system('idlvm sscan & ')
def runCatcher(self):
os.system('idlvm catcher & ')
def runMCA(self):
os.system('idlvm mca & ')
def runMedm(self):
h = os.getenv('HOME')
os.system(h +'/start_epics & ')
def commandDialog(self):
cmd = commandSyntax(self.interior())
def printerDialog(self):
setupPrinter(self.interior())
def removeAscii(self):
from Dialog import *
# dir = os.getcwd() +os.sep+'ASCII'+os.sep+'*.txt'
dir = self.txtpath+os.sep+'*.txt'
dir = 'rm -fr '+dir
pa = {'title': 'Remove ASCII files',
'text': dir + '\n\n'
'All ascii text files will be removed\n'
'from the sub-directory ASCII.\n'
'Is it OK to remove all files ?\n ',
'bitmap': DIALOG_ICON,
'default': 1,
'strings': ('OK','Cancel')}
dialog = Dialog(self.interior(),pa)
ans = dialog.num
if ans == 0:
print dir
os.system(dir)
def showAscii(self):
fname = tkFileDialog.askopenfilename(initialdir=self.txtpath,initialfile="*txt*")
if fname == (): return
xdisplayfile(fname)
def mdaIGORRpt(self):
if Scan['open']:
d = self.MDA
if d[0]['rank'] < 2:
return
fname = self.mdafile
ofname = mdaAscii_IGOR(d)
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=ofname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
fo = open(ofname,'r')
st_text = fo.read()
fo.close()
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=5, pady=5)
self.textWid = st
self.textfile = ofname
SH['ix'] = -1
writeSH(SH)
(self.txtpath,fn) = os.path.split(ofname)
else:
self.messageMDA()
def mdaAllRpt(self):
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text='MDA file from: '+self.mdapath,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
st.pack()
st.settext('Reports saved in: '+os.getcwd()+os.sep+'ASCII')
self.textWid=st
mdaAscii_all(self.mdapath)
def mda2D1DRpt(self):
# d = readMDA.readMDA(fname, 1, 0, 0)
if Scan['open']:
d = self.MDA
if d[0]['rank'] < 2: return
if d[2].nd == 0: return
fname = self.mdafile
ofname = mdaAscii_2D1D(d)
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=ofname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
fo = open(ofname,'r')
st_text = fo.read()
fo.close()
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=5, pady=5)
self.textWid = st
self.textfile = ofname
Scan['txtfile'] = ofname
SH['ix'] = 0
(self.txtpath,fn) = os.path.split(ofname)
def mda2DRpt(self):
# d = readMDA.readMDA(fname, 1, 0, 0)
if Scan['open']:
d = self.MDA
fname = self.mdafile
if d[1].nd > 0 :
ofname = mdaAscii_1D(d)
if d[0]['rank'] < 2: return
if d[2].nd == 0 : return
ofname = mdaAscii_2D(d)
py = d[1].p[0].data
px = d[2].p[0].data
px = px[0]
Scan['X'] = px
Scan['Y'] = py
Scan['txtfile'] = ofname
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=ofname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
fo = open(ofname,'r')
st_text = fo.read()
fo.close()
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=5, pady=5)
self.textWid = st
self.textfile = ofname
SH['ix'] = -1
writeSH(SH)
(self.txtpath,fn) = os.path.split(ofname)
else:
self.messageMDA()
def mda1DRptPlot(self):
self.mda1DRpt()
self.plotAscii()
def mdaReport(self):
d = self.MDA
if d[0]['rank'] == 1:
self.mda1DRpt()
if d[0]['rank'] >= 2:
self.mda2DRpt()
def mda1DRpt(self):
# d = readMDA.readMDA(fname, 1, 0, 0)
if Scan['open']:
d = self.MDA
fname = self.mdafile
ofname = mdaAscii_1D(d)
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=ofname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
fo = open(ofname,'r')
st_text = fo.read()
fo.close()
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=5, pady=5)
self.textWid = st
self.textfile = ofname
Scan['txtfile'] = ofname
SH['ix'] = 0
(self.txtpath,fn) = os.path.split(ofname)
else:
self.messageMDA()
def colorbar(self):
W = 256
clrbar =[]
for j in range(10):
clrbar.append(range(W))
clrbar = array(clrbar)
imagebar = PNGImage(self.canvas,clrbar,(2,2))
imagebar.pack(side='top')
self.imagebar = imagebar
def executeCT(self):
sels = self.textWid.getcurselection()
sels = string.split(sels[0])
CT_id = string.atoi(sels[0])
ps = str(CT[CT_id])
fo = open('pal.dat','wb')
fo.write(ps)
fo.close()
self.imagebar.destroy()
self.colorbar()
def setCTdialog(self):
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
CT = readCT()
CT_id=39
frame = self.interior()
self.canvas = Canvas(frame,width=300,height=50)
self.canvas.pack()
self.colorbar()
dname=('0 B-W LINEAR','1 BLUE/WHITE','2 GRN-RED-BLU-WHT',
'3 RED TEMPERATURE','4 BLUE/GREEN/RED/YELLOW','5 STD GAMMA-II',
'6 PRISM','7 RED-PURPLE','8 GREEN/WHITE LINEAR',
'9 GRN/WHT EXPONENTIAL','10 GREEN-PINK','11 BLUE-RED',
'12 16-LEVEL','13 RAINBOW','14 STEPS',
'15 STERN SPECIAL','16 Haze','17 Blue-Pastel-Red',
'18 Pastels','19 Hue Sat Lightness1','20 Hue Sat Lightness2',
'21 Hue Sat Value 1','22 Hue Sat Value 2','23 Purple-Red + Stripes',
'24 Beach','25 Mac Style','26 Eos A',
'27 Eos B','28 Hardcandy','29 Nature',
'30 Ocean','31 Peppermint','32 Plasma',
'33 Blue-Red','34 Rainbow',
'35 Blue Waves','36 Volcano','37 Waves',
'38 Rainbow18','39 Rainbow + white','40 Rainbow + black')
box = Pmw.ScrolledListBox(frame,
labelpos=N,label_text='Color Table #',
items=dname,
listbox_height=5,vscrollmode='static',
selectioncommand= self.executeCT,
dblclickcommand= self.executeCT,
usehullsize=1, hull_width=400, hull_height=200)
# box.pack(fill=BOTH,expand=1,padx=10,pady=10)
box.pack()
self.textWid = box
def selectionCommand(self):
box = self.textWid
sels = box.getcurselection()
sels = string.split(sels[0])
sel = string.atoi(sels[0])
Scan['2d'] = sel
d = self.MDA
pick2d(d,sel,updown=Scan['updown'])
def get2Ddetector(self):
if self.mdafile == '':
self.messageMDA()
return
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
root = self.interior()
d = self.MDA
nd = d[2].nd
dname =[]
for i in range(nd):
lst = str(i) + ' '+d[2].d[i].fieldName +' ' + d[2].d[i].name +' '+ d[2].d[i].desc +' '+d[2].d[i].unit
dname.append(lst)
box = Pmw.ScrolledListBox(root,
labelpos=N,label_text='2D Image Seq #',
items=(dname[0:nd]),
listbox_height=5,vscrollmode='static',
selectioncommand= self.selectionCommand,
dblclickcommand= self.selectionCommand,
usehullsize=1, hull_width=500, hull_height=200)
# box.pack(fill=BOTH,expand=1,padx=10,pady=10)
box.pack()
self.textWid = box
def getpanimage(self):
file = self.mdafile
if file != '':
d = self.MDA
pal = readPalette()
if d[0]['rank'] > 1:
det2D(d[2].d[0:d[2].nd],scale=(1,1),columns=5,file=file,pal=pal)
else:
self.messageMDA()
def headerMDA(self,d,J,st_text):
try:
if d[J].nd > 0:
st_text = st_text+d[J].scan_name+'\n'
st_test = st_text+'NPTS: '+str(d[J].npts)+'\n'
st_test = st_text+'CURR_PT: '+str(d[J].curr_pt)+'\n'
st_text = st_text + '**'+str(J)+'D detectors**\n'
for i in range(d[J].nd):
st_text=st_text+d[J].d[i].fieldName+' : '+d[J].d[i].name+', '+d[J].d[i].desc+', '+d[J].d[i].unit+'\n'
except IndexError:
pass
return st_text
def openMDA(self):
fname = askopenfilename( initialdir=self.mdapath,
filetypes=[("MDA File", '.mda'),
("All Files","*")])
if fname =='':
return
self.mdafile = fname
(self.mdapath, fn) = os.path.split(fname)
d = readMDA(fname)
self.MDA = d
Scan['data'] = d
Scan['open'] = 1
st_text = 'Please use ViewMDA menu to access MDA 1D/2D data array\n\n'
try:
if d[1].nd > 0:
st_text = self.headerMDA(d,1,st_text)
if d[1].nd > 0:
V=[]
for i in range(85):
V.append('')
for i in range(d[1].nd):
V[i] = d[1].d[i].fieldName
file='pvs'
fd = open(file,'w')
fd.write(str(V))
fd.close()
except IndexError:
pass
try:
if d[2].nd > 0:
st_text = self.headerMDA(d,2,st_text)
except IndexError:
pass
try:
if d[3].nd > 0:
st_text = self.headerMDA(d,3,st_text)
except IndexError:
pass
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=fname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=1, pady=1)
self.textWid = st
def openHelpText(self):
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
fname = os.environ['PYTHONSTARTUP']+os.sep+'pviewer_help.txt'
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=fname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
st.importfile(fname)
st.pack(fill=BOTH, expand=1, padx=1, pady=1)
self.textWid = st
def openAscii(self):
fname = askopenfilename(initialdir=self.txtpath,
filetypes=[("ASCII Data", '.txt'),
("Image Files","*im*"),
("Data Files",".dat"),
("All Files","*")])
if fname == '':
return
(self.txtpath,fn) = os.path.split(fname)
Scan['txtfile'] = fname
self.textfile = fname
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=fname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
st.importfile(fname)
st.pack(fill=BOTH, expand=1, padx=1, pady=1)
self.textWid = st
def imageAscii(self):
if self.textfile != '':
file = self.textfile
data = readArray(file)
data = rowreverse(data)
TV(data)
else:
self.messageAscii()
def plot2ddialog(self):
if self.textfile != '':
file = self.textfile
data = readArray(file)
data = rowreverse(data)
nr = len(data)
nc = len(data[0])
x = range(nc)
y = range(nr)
data = array(data)
if Scan['updown']:
plot2dUpdown(data,x,y,title=file)
else:
plot2d(data,x,y,title=file)
else:
self.messageAscii()
def plotAscii(self):
if self.textfile == '':
self.messageAscii()
return
try:
os.system('plotAscii.py '+self.textfile+' &')
except AttributeError:
pass
def XYrowcolDialog(self):
file = Scan['txtfile']
if file == '':
self.messageAscii()
return
ix = GetXYdialog(self.interior())
def XYVDialog(self):
file = Scan['txtfile']
if file == '':
self.messageAscii()
return
ix = GetXYVdialog(self.interior())
def XYrangeDialog(self):
file = Scan['txtfile']
if file == '':
self.messageAscii()
return
ix = defineXYdialog(self.interior())
def XcolDialog(self):
if self.textfile == '':
self.messageAscii()
else:
Scan['txtfile'] = self.textfile
ix=GetXdialog(self.interior())
def legenddialog(self):
# dialog=GetLegends(self.interior())
GetLegends(self.interior())
def DIlinedialog(self):
file = Scan['txtfile']
if file == '': return
dialog=pickDIdialog(self.interior())
def updownImage(self):
Scan['updown'] = self.toggleUpdownVar.get()
def pick2Ddialog(self):
if Scan['open']:
dialog=pick2Ddetector(self.interior())
def createInterface(self):
AppShell.AppShell.createInterface(self)
self.addMoremenuBar()
# self.createButtons()
self.textWid = None
self.mdafile = ''
self.textfile = ''
self.startup()
if __name__ == '__main__':
SH = {'ix': 0, 'printer': '', 'font': 'Verdana 10 bold', }
if os.path.isfile('SH'):
SH = readSH()
else:
writeSH(SH)
Scan = { 'open': 0,
'2d': 0,
'updown': 1,
'1d': 0,
'nc': 0,
'CT': 39,
'rowcol': [3,2,4,2],
'txtfile': '',
'pvs1': None,
'pvs2': None,
'pvs3': None,
'X': None,
'Y': None,
'im': None,
'data': None }
CT = readCT()
pt = pviewer()
pt.run()
| bsd-2-clause | -3,291,895,447,585,921,000 | 27.987974 | 146 | 0.633361 | false |
c0cky/mediathread | mediathread/projects/admin.py | 1 | 1386 | from django.contrib import admin
from django.contrib.auth.models import User
from mediathread.projects.models import Project
class ProjectAdmin(admin.ModelAdmin):
search_fields = ("title",
"participants__last_name", "author__username",
"participants__last_name")
list_display = ("title", "course", "author", "modified",
"date_submitted", "id", "project_type",
"response_view_policy")
filter_horizontal = ('participants',)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "author":
kwargs["queryset"] = User.objects.all().order_by('username')
return super(ProjectAdmin, self).formfield_for_foreignkey(db_field,
request,
**kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
if db_field.name == "participants":
kwargs["queryset"] = User.objects.all().order_by('username')
return super(ProjectAdmin, self).formfield_for_manytomany(db_field,
request,
**kwargs)
admin.site.register(Project, ProjectAdmin)
| gpl-2.0 | 7,909,254,326,937,188,000 | 43.709677 | 75 | 0.519481 | false |
lorensen/VTKExamples | src/Python/Deprecated/GeometricObjects/ParametricObjectsDemo.py | 1 | 5485 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
import vtk
def main():
colors = vtk.vtkNamedColors()
colors.SetColor("BkgColor", [26, 51, 102, 255])
parametricObjects = list()
parametricObjects.append(vtk.vtkParametricBoy())
parametricObjects.append(vtk.vtkParametricConicSpiral())
parametricObjects.append(vtk.vtkParametricCrossCap())
parametricObjects.append(vtk.vtkParametricDini())
parametricObjects.append(vtk.vtkParametricEllipsoid())
parametricObjects[-1].SetXRadius(0.5)
parametricObjects[-1].SetYRadius(2.0)
parametricObjects.append(vtk.vtkParametricEnneper())
parametricObjects.append(vtk.vtkParametricFigure8Klein())
parametricObjects.append(vtk.vtkParametricKlein())
parametricObjects.append(vtk.vtkParametricMobius())
parametricObjects[-1].SetRadius(2)
parametricObjects[-1].SetMinimumV(-0.5)
parametricObjects[-1].SetMaximumV(0.5)
parametricObjects.append(vtk.vtkParametricRandomHills())
parametricObjects[-1].AllowRandomGenerationOff()
parametricObjects.append(vtk.vtkParametricRoman())
parametricObjects.append(vtk.vtkParametricSuperEllipsoid())
parametricObjects[-1].SetN1(0.5)
parametricObjects[-1].SetN2(0.1)
parametricObjects.append(vtk.vtkParametricSuperToroid())
parametricObjects[-1].SetN1(0.2)
parametricObjects[-1].SetN2(3.0)
parametricObjects.append(vtk.vtkParametricTorus())
parametricObjects.append(vtk.vtkParametricSpline())
# Add some points to the parametric spline.
inputPoints = vtk.vtkPoints()
rng = vtk.vtkMinimalStandardRandomSequence()
rng.SetSeed(8775070)
for i in range(0, 10):
rng.Next()
x = rng.GetRangeValue(0.0, 1.0)
rng.Next()
y = rng.GetRangeValue(0.0, 1.0)
rng.Next()
z = rng.GetRangeValue(0.0, 1.0)
inputPoints.InsertNextPoint(x, y, z)
parametricObjects[-1].SetPoints(inputPoints)
parametricFunctionSources = list()
renderers = list()
mappers = list()
actors = list()
textmappers = list()
textactors = list()
# Create one text property for all
textProperty = vtk.vtkTextProperty()
textProperty.SetFontSize(12)
textProperty.SetJustificationToCentered()
backProperty = vtk.vtkProperty()
backProperty.SetColor(colors.GetColor3d("Tomato"))
# Create a parametric function source, renderer, mapper, and actor
# for each object
for i in range(0, len(parametricObjects)):
parametricFunctionSources.append(vtk.vtkParametricFunctionSource())
parametricFunctionSources[i].SetParametricFunction(parametricObjects[i])
parametricFunctionSources[i].SetUResolution(51)
parametricFunctionSources[i].SetVResolution(51)
parametricFunctionSources[i].SetWResolution(51)
parametricFunctionSources[i].Update()
mappers.append(vtk.vtkPolyDataMapper())
mappers[i].SetInputConnection(parametricFunctionSources[i].GetOutputPort())
actors.append(vtk.vtkActor())
actors[i].SetMapper(mappers[i])
actors[i].GetProperty().SetColor(colors.GetColor3d("Banana"))
actors[i].GetProperty().SetSpecular(.5)
actors[i].GetProperty().SetSpecularPower(20)
actors[i].SetBackfaceProperty(backProperty)
textmappers.append(vtk.vtkTextMapper())
textmappers[i].SetInput(parametricObjects[i].GetClassName())
textmappers[i].SetTextProperty(textProperty)
textactors.append(vtk.vtkActor2D())
textactors[i].SetMapper(textmappers[i])
textactors[i].SetPosition(100, 16)
renderers.append(vtk.vtkRenderer())
renderers[i].AddActor(actors[i])
renderers[i].AddActor(textactors[i])
renderers[i].SetBackground(colors.GetColor3d("BkgColor"))
# Setup the viewports
xGridDimensions = 4
yGridDimensions = 4
rendererSize = 200
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetWindowName("Parametric Objects Demonstration")
renderWindow.SetSize(rendererSize * xGridDimensions, rendererSize * yGridDimensions)
for row in range(0, yGridDimensions):
for col in range(0, xGridDimensions):
index = row * xGridDimensions + col
# (xmin, ymin, xmax, ymax)
viewport = [float(col) / xGridDimensions,
float(yGridDimensions - (row + 1)) / yGridDimensions,
float(col + 1) / xGridDimensions,
float(yGridDimensions - row) / yGridDimensions]
if index > (len(actors) - 1):
# Add a renderer even if there is no actor.
# This makes the render window background all the same color.
ren = vtk.vtkRenderer()
ren.SetBackground(colors.GetColor3d("BkgColor"))
ren.SetViewport(viewport)
renderWindow.AddRenderer(ren)
continue
renderers[index].SetViewport(viewport)
renderers[index].ResetCamera()
renderers[index].GetActiveCamera().Azimuth(30)
renderers[index].GetActiveCamera().Elevation(-30)
renderers[index].GetActiveCamera().Zoom(0.9)
renderers[index].ResetCameraClippingRange()
renderWindow.AddRenderer(renderers[index])
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(renderWindow)
renderWindow.Render()
interactor.Start()
if __name__ == '__main__':
main()
| apache-2.0 | 3,249,549,297,955,955,000 | 36.827586 | 88 | 0.678213 | false |
jamielennox/python-keystoneclient | keystoneclient/tests/test_cms.py | 1 | 6122 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import subprocess
import mock
import testresources
from testtools import matchers
from keystoneclient.common import cms
from keystoneclient import exceptions
from keystoneclient.tests import client_fixtures
from keystoneclient.tests import utils
class CMSTest(utils.TestCase, testresources.ResourcedTestCase):
"""Unit tests for the keystoneclient.common.cms module."""
resources = [('examples', client_fixtures.EXAMPLES_RESOURCE)]
def test_cms_verify(self):
self.assertRaises(exceptions.CertificateConfigError,
cms.cms_verify,
'data',
'no_exist_cert_file',
'no_exist_ca_file')
def test_token_tocms_to_token(self):
with open(os.path.join(client_fixtures.CMSDIR,
'auth_token_scoped.pem')) as f:
AUTH_TOKEN_SCOPED_CMS = f.read()
self.assertEqual(cms.token_to_cms(self.examples.SIGNED_TOKEN_SCOPED),
AUTH_TOKEN_SCOPED_CMS)
tok = cms.cms_to_token(cms.token_to_cms(
self.examples.SIGNED_TOKEN_SCOPED))
self.assertEqual(tok, self.examples.SIGNED_TOKEN_SCOPED)
def test_asn1_token(self):
self.assertTrue(cms.is_asn1_token(self.examples.SIGNED_TOKEN_SCOPED))
self.assertFalse(cms.is_asn1_token('FOOBAR'))
def test_cms_sign_token_no_files(self):
self.assertRaises(subprocess.CalledProcessError,
cms.cms_sign_token,
self.examples.TOKEN_SCOPED_DATA,
'/no/such/file', '/no/such/key')
def test_cms_sign_token_no_files_pkiz(self):
self.assertRaises(subprocess.CalledProcessError,
cms.pkiz_sign,
self.examples.TOKEN_SCOPED_DATA,
'/no/such/file', '/no/such/key')
def test_cms_sign_token_success(self):
self.assertTrue(
cms.pkiz_sign(self.examples.TOKEN_SCOPED_DATA,
self.examples.SIGNING_CERT_FILE,
self.examples.SIGNING_KEY_FILE))
def test_cms_verify_token_no_files(self):
self.assertRaises(exceptions.CertificateConfigError,
cms.cms_verify,
self.examples.SIGNED_TOKEN_SCOPED,
'/no/such/file', '/no/such/key')
def test_cms_verify_token_no_oserror(self):
def raise_OSError(*args):
e = OSError()
e.errno = errno.EPIPE
raise e
with mock.patch('subprocess.Popen.communicate', new=raise_OSError):
try:
cms.cms_verify("x", '/no/such/file', '/no/such/key')
except exceptions.CertificateConfigError as e:
self.assertIn('/no/such/file', e.output)
self.assertIn('Hit OSError ', e.output)
else:
self.fail('Expected exceptions.CertificateConfigError')
def test_cms_verify_token_scoped(self):
cms_content = cms.token_to_cms(self.examples.SIGNED_TOKEN_SCOPED)
self.assertTrue(cms.cms_verify(cms_content,
self.examples.SIGNING_CERT_FILE,
self.examples.SIGNING_CA_FILE))
def test_cms_verify_token_scoped_expired(self):
cms_content = cms.token_to_cms(
self.examples.SIGNED_TOKEN_SCOPED_EXPIRED)
self.assertTrue(cms.cms_verify(cms_content,
self.examples.SIGNING_CERT_FILE,
self.examples.SIGNING_CA_FILE))
def test_cms_verify_token_unscoped(self):
cms_content = cms.token_to_cms(self.examples.SIGNED_TOKEN_UNSCOPED)
self.assertTrue(cms.cms_verify(cms_content,
self.examples.SIGNING_CERT_FILE,
self.examples.SIGNING_CA_FILE))
def test_cms_verify_token_v3_scoped(self):
cms_content = cms.token_to_cms(self.examples.SIGNED_v3_TOKEN_SCOPED)
self.assertTrue(cms.cms_verify(cms_content,
self.examples.SIGNING_CERT_FILE,
self.examples.SIGNING_CA_FILE))
def test_cms_hash_token_no_token_id(self):
token_id = None
self.assertThat(cms.cms_hash_token(token_id), matchers.Is(None))
def test_cms_hash_token_not_pki(self):
"""If the token_id is not a PKI token then it returns the token_id."""
token = 'something'
self.assertFalse(cms.is_asn1_token(token))
self.assertThat(cms.cms_hash_token(token), matchers.Is(token))
def test_cms_hash_token_default_md5(self):
"""The default hash method is md5."""
token = self.examples.SIGNED_TOKEN_SCOPED
token_id_default = cms.cms_hash_token(token)
token_id_md5 = cms.cms_hash_token(token, mode='md5')
self.assertThat(token_id_default, matchers.Equals(token_id_md5))
# md5 hash is 32 chars.
self.assertThat(token_id_default, matchers.HasLength(32))
def test_cms_hash_token_sha256(self):
"""Can also hash with sha256."""
token = self.examples.SIGNED_TOKEN_SCOPED
token_id = cms.cms_hash_token(token, mode='sha256')
# sha256 hash is 64 chars.
self.assertThat(token_id, matchers.HasLength(64))
def load_tests(loader, tests, pattern):
return testresources.OptimisingTestSuite(tests)
| apache-2.0 | -8,033,659,842,411,192,000 | 40.364865 | 78 | 0.601764 | false |
rmed/textventures | src/textventures/instances/key_navigation.py | 1 | 4144 | # -*- coding: utf-8 -*-
# This file is part of TextVentures - https://github.com/RMed/textventures
#
# Copyright (C) 2013 Rafael Medina García <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import menu, sys
class Listener:
"""Gets user input for navigation."""
def __init__(self):
# Check for Windows platform
if sys.platform.startswith('win'):
import msvcrt
# Check for UNIX platforms
else:
import tty
def __call__(self):
# Windows
if sys.platform.startswith('win'):
import msvcrt
# Save character
char = msvcrt.getch()
# UNIX
else:
import tty, termios
# Read character
fd = sys.stdin.fileno()
attr = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
char = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, attr)
# Return character
return char
class Action:
"""Check the input character and act accordingly."""
def __init__(self, input_char, action_type):
"""Arguments:
input_char -- pressed character
action_type -- type of the action (menu, load, etc)
"""
self.char = input_char.lower()
self.action = action_type
def __call__(self):
# Check the action type
if self.action == 'main':
# Main menu
if self.char == 'n':
# New game menu
menu.newgame_menu()
elif self.char == 'l':
# Load game menu
menu.load_menu()
elif self.char == 'o':
# Options menu
menu.options_menu()
elif self.char == 'h':
# Help menu
menu.help_menu()
elif self.char == 'a':
# About menu
menu.about_menu()
elif self.char == 'e':
# Exit program
sys.exit()
elif self.action == 'load':
# Load menu
if self.char == 'b':
# Back to main menu
menu.main_menu()
elif self.char == 'c':
# Choose game
return self.char
elif self.action == 'options':
# Load menu
if self.char == 'b':
# Back to main menu
menu.main_menu()
elif self.char == 'c':
# Choose language
return self.char
elif self.action == 'new':
# New game menu
if self.char == 'b':
# Back to main menu
menu.main_menu()
elif self.char == 'c':
# Choose game
return self.char
elif self.action == 'help':
# Help menu
if self.char == 'b':
# Back to main menu
menu.main_menu()
elif self.action == 'about':
# About menu
if self.char == 'l':
menu.show_license()
elif self.char == 'b':
# Back to main menu
menu.main_menu()
elif self.action == 'license':
# License
if self.char == 'b':
# Back to About menu
menu.about_menu()
| gpl-2.0 | 5,757,127,063,372,303,000 | 30.150376 | 74 | 0.502052 | false |
banansson/cask | cask.py | 1 | 3281 | #!/usr/bin/python
import sys
import argparse
from os import path
from src.bag import Bag
from src.package import Package
from src.task import Task
from src.message import Message
from src.application import Application
from src.application_info import ApplicationInfo
from src.bootstrap import Bootstrap
from src import utils
def run(argv):
default_packs_dir = "~/.config/cask/packs"
default_target_dir = "~"
parser = argparse.ArgumentParser()
actions = parser.add_mutually_exclusive_group()
actions.add_argument('--version', action='store_true',
help='Display version')
actions.add_argument('--bootstrap', action='store_true',
help='run bootstrap test')
parser.add_argument('command', nargs='?', help='Command to run: list, query, install')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='be verbose')
parser.add_argument('-d', '--dryrun', action='store_true',
help='run in test mode, nothing is installed')
parser.add_argument('-s', '--source', action='store',
default=default_packs_dir,
help='override directory in which to look for packages')
parser.add_argument('-t', '--target', action='store',
default=default_target_dir,
help='override directory in which to install packages')
parser.add_argument('package', nargs='?', help='Name of package')
args = parser.parse_args()
verbose = args.verbose
message = Message(sys.stdout, verbose > 0)
if args.bootstrap:
bootstrap = Bootstrap()
if args.verbose:
bootstrap.verbose(message)
else:
verifications = bootstrap.verify_all()
if not verifications[0]:
message.info('Boostrap verification failed! Use verbose flag for more detailed output')
message.major('Errors:')
for error in verifications[1]:
message.minor(error)
else:
message.info('Boostrap verification succeeded')
return 0
appinfo = ApplicationInfo()
if args.version:
message.info(appinfo.name())
return 0
if not(args.command or args.package):
message.info("No package specified, use -h or --help for help. Listing of")
message.info("all packages can be done using the 'list' argument.")
return 0
(valid, source) = utils.try_lookup_dir(args.source)
if not valid:
message.error("No such directory: %s" % source)
return 0
message.plain("Looking for packages in: %s" % source)
target = utils.lookup_dir(args.target)
bag = Bag(path.abspath(source))
app = Application(bag, message, args)
commands = {}
commands['list'] = lambda bag, message, args: app.list(verbose)
commands['query'] = lambda bag, message, args: app.query(args.package, target)
commands['install'] = lambda bag, message, args: app.install(args.package, target, args.dryrun)
if len(args.command) == 0:
message.info("No action specified, use -h or --help for help.")
return 0
cmd = args.command
if cmd not in commands:
message.info('No such command: {:s}'.format(cmd))
return 0
commands[cmd](bag, message, args)
return 0
if __name__ == '__main__':
code = run(sys.argv)
exit(code)
| mit | 1,634,004,693,254,727,000 | 29.663551 | 97 | 0.650716 | false |
singingwolfboy/webhookdb | docs/conf.py | 1 | 7995 | # -*- coding: utf-8 -*-
#
# WebhookDB documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 25 10:08:26 2015.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinxcontrib.autohttp.flask',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'WebhookDB'
copyright = u'2015, David Baumgold'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'WebhookDBdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'WebhookDB.tex', u'WebhookDB Documentation',
u'David Baumgold', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'webhookdb', u'WebhookDB Documentation',
[u'David Baumgold'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'WebhookDB', u'WebhookDB Documentation',
u'David Baumgold', 'WebhookDB', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
intersphinx_mapping = {
'celery': ('http://docs.celeryproject.org/en/latest/', None),
'flask': ('http://flask.pocoo.org/docs/', None),
}
| agpl-3.0 | -2,868,988,720,259,349,000 | 30.85259 | 80 | 0.702564 | false |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Tools/scripts/pep384_macrocheck.py | 4 | 4720 | """
pep384_macrocheck.py
This programm tries to locate errors in the relevant Python header
files where macros access type fields when they are reachable from
the limided API.
The idea is to search macros with the string "->tp_" in it.
When the macro name does not begin with an underscore,
then we have found a dormant error.
Christian Tismer
2018-06-02
"""
import sys
import os
import re
DEBUG = False
def dprint(*args, **kw):
if DEBUG:
print(*args, **kw)
def parse_headerfiles(startpath):
"""
Scan all header files which are reachable fronm Python.h
"""
search = "Python.h"
name = os.path.join(startpath, search)
if not os.path.exists(name):
raise ValueError("file {} was not found in {}\n"
"Please give the path to Python's include directory."
.format(search, startpath))
errors = 0
with open(name) as python_h:
while True:
line = python_h.readline()
if not line:
break
found = re.match(r'^\s*#\s*include\s*"(\w+\.h)"', line)
if not found:
continue
include = found.group(1)
dprint("Scanning", include)
name = os.path.join(startpath, include)
if not os.path.exists(name):
name = os.path.join(startpath, "../PC", include)
errors += parse_file(name)
return errors
def ifdef_level_gen():
"""
Scan lines for #ifdef and track the level.
"""
level = 0
ifdef_pattern = r"^\s*#\s*if" # covers ifdef and ifndef as well
endif_pattern = r"^\s*#\s*endif"
while True:
line = yield level
if re.match(ifdef_pattern, line):
level += 1
elif re.match(endif_pattern, line):
level -= 1
def limited_gen():
"""
Scan lines for Py_LIMITED_API yes(1) no(-1) or nothing (0)
"""
limited = [0] # nothing
unlimited_pattern = r"^\s*#\s*ifndef\s+Py_LIMITED_API"
limited_pattern = "|".join([
r"^\s*#\s*ifdef\s+Py_LIMITED_API",
r"^\s*#\s*(el)?if\s+!\s*defined\s*\(\s*Py_LIMITED_API\s*\)\s*\|\|",
r"^\s*#\s*(el)?if\s+defined\s*\(\s*Py_LIMITED_API"
])
else_pattern = r"^\s*#\s*else"
ifdef_level = ifdef_level_gen()
status = next(ifdef_level)
wait_for = -1
while True:
line = yield limited[-1]
new_status = ifdef_level.send(line)
dir = new_status - status
status = new_status
if dir == 1:
if re.match(unlimited_pattern, line):
limited.append(-1)
wait_for = status - 1
elif re.match(limited_pattern, line):
limited.append(1)
wait_for = status - 1
elif dir == -1:
# this must have been an endif
if status == wait_for:
limited.pop()
wait_for = -1
else:
# it could be that we have an elif
if re.match(limited_pattern, line):
limited.append(1)
wait_for = status - 1
elif re.match(else_pattern, line):
limited.append(-limited.pop()) # negate top
def parse_file(fname):
errors = 0
with open(fname) as f:
lines = f.readlines()
type_pattern = r"^.*?->\s*tp_"
define_pattern = r"^\s*#\s*define\s+(\w+)"
limited = limited_gen()
status = next(limited)
for nr, line in enumerate(lines):
status = limited.send(line)
line = line.rstrip()
dprint(fname, nr, status, line)
if status != -1:
if re.match(define_pattern, line):
name = re.match(define_pattern, line).group(1)
if not name.startswith("_"):
# found a candidate, check it!
macro = line + "\n"
idx = nr
while line.endswith("\\"):
idx += 1
line = lines[idx].rstrip()
macro += line + "\n"
if re.match(type_pattern, macro, re.DOTALL):
# this type field can reach the limited API
report(fname, nr + 1, macro)
errors += 1
return errors
def report(fname, nr, macro):
f = sys.stderr
print(fname + ":" + str(nr), file=f)
print(macro, file=f)
if __name__ == "__main__":
p = sys.argv[1] if sys.argv[1:] else "../../Include"
errors = parse_headerfiles(p)
if errors:
# somehow it makes sense to raise a TypeError :-)
raise TypeError("These {} locations contradict the limited API."
.format(errors))
| apache-2.0 | -1,668,539,338,901,291,500 | 30.891892 | 75 | 0.523517 | false |
KaiSzuttor/espresso | testsuite/python/rotation.py | 1 | 5271 | import numpy as np
import unittest as ut
try:
import scipy.spatial.transform as sst
except ImportError:
pass
import espressomd.rotation
import unittest_decorators as utx
@utx.skipIfUnmetModuleVersionRequirement('scipy', '>=1.4.0')
class TestRotation(ut.TestCase):
"""
Tests for the rotation utility functions.
"""
def setUp(self):
angle = 2.0 * np.pi * np.random.random()
quat = [np.sin(angle / 2.0), np.sin(angle / 2.0),
np.sin(angle / 2.0), np.cos(angle / 2.0)]
self.rotation = sst.Rotation.from_quat(quat)
def test_quat_from_matrix(self):
"""
Compare the calculated quaternion representation with scipy.
"""
v_x = np.array([1.0, 0.0, 0.0])
rotated_vector_ref = self.rotation.apply(v_x)
quat_from_matrix = espressomd.rotation.matrix_to_quat(
self.rotation.as_matrix())
rotated_vector_matrix = sst.Rotation.from_quat(
np.roll(quat_from_matrix, shift=-1)).apply(v_x)
self.assertAlmostEqual(
np.dot(rotated_vector_ref, rotated_vector_matrix), 1.0)
def test_raise_if_improper(self):
"""
Check that an improper rotation matrix as an argument to
:meth:`espressomd.rotation.matrix_to_quat` raises an exception.
"""
matrix = self.rotation.as_matrix()
matrix[[0, 1], :] = matrix[[1, 0], :]
with self.assertRaises(ValueError):
espressomd.rotation.matrix_to_quat(matrix)
def generate_cuboid_positions(rho, dx, dy, dz):
"""
Generate a list of three dimensional mesh positions.
Parameters
----------
rho : :obj:`float`
Samples per unit length.
dx : :obj:`float`
Range in dimension 0.
dy : :obj:`float`
Range in dimension 1.
dz : :obj:`float`
Range in dimension 2.
Returns
-------
array_like of :obj:`float`
Three dimensional mesh positions;
"""
xs = np.linspace(-0.5 * dx, 0.5 * dx, int(rho * dx))
ys = np.linspace(-0.5 * dy, 0.5 * dy, int(rho * dy))
zs = np.linspace(-0.5 * dz, 0.5 * dz, int(rho * dz))
return np.vstack(np.meshgrid(xs, ys, zs)).reshape(3, -1).T
def inertia_tensor_cuboid(mass, dx, dy, dz):
"""
Reference values for the inertia tensor of a cuboid.
Parameters
----------
mass : :obj:`float`
Mass of the cuboid.
dx : :obj:`float`
Extension in dimension 0.
dy : :obj:`float`
Extension in dimension 1.
dz : :obj:`float`
Extension in dimension 2.
Returns
-------
array_like of :obj:`float`
Inertia tensor of the cuboid.
Notes
-----
See wikipedia_.
.. _wikipedia: https://en.wikipedia.org/wiki/List_of_moments_of_inertia#List_of_3D_inertia_tensors
"""
return 1. / 12. * mass * \
np.diag([dy**2.0 + dz**2.0, dx**2.0 + dz**2.0, dx**2.0 + dy**2.0])
class TestInertiaTensor(ut.TestCase):
"""
Tests for the inertia tensor utility functions.
"""
@classmethod
def setUpClass(cls):
cls.dx = 1.32
cls.dy = 2.12
cls.dz = 3.23
rho = 5
cls.samples = generate_cuboid_positions(rho, cls.dx, cls.dy, cls.dz)
cls.N_samples = cls.samples.shape[0]
cls.m = 0.5
cls.masses = np.ones(cls.N_samples) * cls.m / cls.N_samples
def test_inertia_tensor(self):
"""
Compare the calculated inertia tensor of a sampled cuboid with the
respective literature values.
"""
np.testing.assert_almost_equal(espressomd.rotation.inertia_tensor(
self.samples, self.masses), inertia_tensor_cuboid(self.m, self.dx, self.dy, self.dz), decimal=1)
def test_right_handedness_eigenvectormatrix(self):
"""
Check that the eigenvectors form a right-handed basis.
"""
_, eigenvectors = espressomd.rotation.diagonalized_inertia_tensor(
self.samples, self.masses)
for i in range(3):
ev = np.roll(eigenvectors, axis=0, shift=i)
np.testing.assert_allclose(
np.cross(ev[0], ev[1]), ev[2], atol=1e-7)
@utx.skipIfUnmetModuleVersionRequirement('scipy', '>1.2.0')
def test_inertia_tensor_rotated_cuboid(self):
"""
Rotate the samples and check that the principal axes return by the
utility function corresponds to the rotation matrix.
"""
angle = 2.0 * np.pi * np.random.random()
quat = [np.sin(angle / 2.0), np.sin(angle / 2.0),
np.sin(angle / 2.0), np.cos(angle / 2.0)]
rotation = sst.Rotation.from_quat(quat)
rotated_samples = rotation.apply(self.samples)
_, eigenvectors = espressomd.rotation.diagonalized_inertia_tensor(
rotated_samples, self.masses)
rotated_basis = rotation.apply(np.identity(3))
for i in range(3):
# because there is no particular order in the eigenvalues
# the corresponding eigenvectors are either (anti-) parallel or
# perpendicular to the rotated basis
self.assertAlmostEqual(
abs(abs(np.dot(rotated_basis[i], eigenvectors[i])) - 0.5) - 0.5, 0.0)
if __name__ == "__main__":
ut.main()
| gpl-3.0 | 711,070,109,601,223,000 | 29.824561 | 108 | 0.589262 | false |
yongfuyang/vnpy | vn.trader/ctaAlgo/ctaBase.py | 1 | 5912 | # encoding: UTF-8
'''
本文件中包含了CTA模块中用到的一些基础设置、类和常量等。
'''
from __future__ import division
# 把vn.trader根目录添加到python环境变量中
import sys
sys.path.append('..')
# 常量定义
# CTA引擎中涉及到的交易方向类型
CTAORDER_BUY = u'买开'
CTAORDER_SELL = u'卖平'
CTAORDER_SELLTODAY = u'卖平今'
CTAORDER_SELLYESTERDAY = u'卖平昨'
CTAORDER_SHORT = u'卖开'
CTAORDER_COVER = u'买平'
CTAORDER_COVERTODAY = u'买今平'
CTAORDER_COVERYESTERDAY = u'买平昨'
DIRECTION_LONG = u'多'
DIRECTION_SHORT = u'空'
# 本地停止单状态
STOPORDER_WAITING = u'等待中'
STOPORDER_CANCELLED = u'已撤销'
STOPORDER_TRIGGERED = u'已触发'
# 本地停止单前缀
STOPORDERPREFIX = 'CtaStopOrder.'
# 数据库名称
SETTING_DB_NAME = 'VnTrader_Setting_Db'
POSITION_DB_NAME = 'VnTrader_Position_Db'
BARSIZE_DICT = {}
BARSIZE_DICT = {
0 : 'tick',
1 : '1 secs',
2 : '5 secs',
3 : '15 secs',
4 : '30 secs',
5 : '1 min',
6 : '2 mins',
7 : '3 min',
8 : '5 mins',
9 : '15 mins',
10 : '30 mins',
11 : '1 hour',
12 : '1 day'
}
# 使用的缓存表
# 临时变量使用 barSize
BARSIZE_DFNAME_DICT = {}
BARSIZE_DFNAME_DICT = {
0 : 'df_tick',
1 : 'df_S_Bar',
2 : 'df_S5_Bar',
3 : 'df_S15_Bar',
4 : 'df_S30_Bar',
5 : 'df_M1_Bar',
6 : 'df_M2_Bar',
7 : 'df_M3_Bar',
8 : 'df_M5_Bar',
9 : 'df_M15_Bar',
10 : 'df_M30_Bar',
11 : 'df_H_Bar',
12 : 'df_D_Bar'
}
# BARSIZE 跟本地数据库名的对应关系
# 库名要同 ctaBase 一致
BARSIZE_DBNAME_DICT = {}
BARSIZE_DBNAME_DICT = {
0:'VnTrader_Tick_Db',
5:'VnTrader_1Min_Db',
8:'VnTrader_5Min_Db',
9: 'VnTrader_15Min_Db',
10: 'VnTrader_30Min_Db',
11: 'VnTrader_Hour_Db',
12: 'VnTrader_Daily_Db'
}
# 数据库名称
SETTING_DB_NAME = 'VnTrader_Setting_Db'
TICK_DB_NAME = 'VnTrader_Tick_Db'
DAILY_DB_NAME = 'VnTrader_Daily_Db'
MINUTE_DB_NAME = 'VnTrader_1Min_Db' # 分钟 数据库名称 原名是 : 'VnTrader_1Min_Db'
# 自己加上
HOUR_DB_NAME = 'VnTrader_Hour_Db'
MINUTE5_DB_NAME = 'VnTrader_5Min_Db'
MINUTE15_DB_NAME = 'VnTrader_15Min_Db'
MINUTE30_DB_NAME = 'VnTrader_30Min_Db'
# 引擎类型,用于区分当前策略的运行环境
ENGINETYPE_BACKTESTING = 'backtesting' # 回测
ENGINETYPE_TRADING = 'trading' # 实盘
# CTA引擎中涉及的数据类定义
from vtConstant import EMPTY_UNICODE, EMPTY_STRING, EMPTY_FLOAT, EMPTY_INT
########################################################################
class StopOrder(object):
"""本地停止单"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.vtSymbol = EMPTY_STRING
self.orderType = EMPTY_UNICODE
self.direction = EMPTY_UNICODE
self.offset = EMPTY_UNICODE
self.price = EMPTY_FLOAT
self.volume = EMPTY_INT
self.strategy = None # 下停止单的策略对象
self.stopOrderID = EMPTY_STRING # 停止单的本地编号
self.status = EMPTY_STRING # 停止单状态
########################################################################
class CtaBarData(object):
"""K线数据"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.vtSymbol = EMPTY_STRING # vt系统代码
self.symbol = EMPTY_STRING # 代码
self.exchange = EMPTY_STRING # 交易所
self.open = EMPTY_FLOAT # OHLC
self.high = EMPTY_FLOAT
self.low = EMPTY_FLOAT
self.close = EMPTY_FLOAT
self.date = EMPTY_STRING # bar开始的时间,日期
self.time = EMPTY_STRING # 时间
self.datetime = None # python的datetime时间对象
self.volume = EMPTY_INT # 成交量
self.openInterest = EMPTY_INT # 持仓量
########################################################################
class CtaTickData(object):
"""Tick数据"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.vtSymbol = EMPTY_STRING # vt系统代码
self.symbol = EMPTY_STRING # 合约代码
self.exchange = EMPTY_STRING # 交易所代码
# 成交数据
self.lastPrice = EMPTY_FLOAT # 最新成交价
self.volume = EMPTY_INT # 最新成交量
self.openInterest = EMPTY_INT # 持仓量
self.upperLimit = EMPTY_FLOAT # 涨停价
self.lowerLimit = EMPTY_FLOAT # 跌停价
# tick的时间
self.date = EMPTY_STRING # 日期
self.time = EMPTY_STRING # 时间
self.datetime = None # python的datetime时间对象
# 五档行情
self.bidPrice1 = EMPTY_FLOAT
self.bidPrice2 = EMPTY_FLOAT
self.bidPrice3 = EMPTY_FLOAT
self.bidPrice4 = EMPTY_FLOAT
self.bidPrice5 = EMPTY_FLOAT
self.askPrice1 = EMPTY_FLOAT
self.askPrice2 = EMPTY_FLOAT
self.askPrice3 = EMPTY_FLOAT
self.askPrice4 = EMPTY_FLOAT
self.askPrice5 = EMPTY_FLOAT
self.bidVolume1 = EMPTY_INT
self.bidVolume2 = EMPTY_INT
self.bidVolume3 = EMPTY_INT
self.bidVolume4 = EMPTY_INT
self.bidVolume5 = EMPTY_INT
self.askVolume1 = EMPTY_INT
self.askVolume2 = EMPTY_INT
self.askVolume3 = EMPTY_INT
self.askVolume4 = EMPTY_INT
self.askVolume5 = EMPTY_INT | mit | 8,423,452,578,316,974,000 | 25.412935 | 79 | 0.514883 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.