repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
BenHenning/oppia
|
core/controllers/profile_test.py
|
14
|
9059
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the profile page."""
__author__ = 'Sean Lip'
from core.domain import exp_services
from core.domain import user_services
from core.tests import test_utils
import feconf
class SignupTest(test_utils.GenericTestBase):
def test_signup_page_does_not_have_top_right_menu(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
self.assertEqual(response.status_int, 200)
response.mustcontain(no=['Logout', 'Sign in'])
self.logout()
def test_going_somewhere_else_while_signing_in_logs_user_out(self):
exp_services.load_demo('0')
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
self.assertEqual(response.status_int, 200)
response = self.testapp.get('/create/0')
self.assertEqual(response.status_int, 302)
self.assertIn('Logout', response.headers['location'])
self.assertIn('create', response.headers['location'])
self.logout()
def test_accepting_terms_is_handled_correctly(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL, {'agreed_to_terms': False},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('you will need to accept', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': 'Hasta la vista!'},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('you will need to accept', response_dict['error'])
self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': True, 'username': 'myusername'},
csrf_token=csrf_token)
self.logout()
def test_username_is_handled_correctly(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL, {'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('Empty username supplied', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': '', 'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('Empty username supplied', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': '!a!', 'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': self.UNICODE_TEST_STRING, 'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abcde', 'agreed_to_terms': True},
csrf_token=csrf_token)
self.logout()
class UsernameCheckHandlerTests(test_utils.GenericTestBase):
def test_username_check(self):
self.signup('[email protected]', username='abc')
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': 'abc'},
csrf_token=csrf_token)
self.assertEqual(response_dict, {
'username_is_taken': True
})
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': 'def'},
csrf_token=csrf_token)
self.assertEqual(response_dict, {
'username_is_taken': False
})
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': '!!!INVALID!!!'},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL,
{'username': self.UNICODE_TEST_STRING},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
self.logout()
class EmailPreferencesTests(test_utils.GenericTestBase):
def test_user_not_setting_email_prefs_on_signup(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True},
csrf_token=csrf_token)
# The email update preference should be whatever the setting in feconf
# is.
self.EDITOR_ID = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
self.assertEqual(
user_services.get_email_preferences(self.EDITOR_ID),
{'can_receive_email_updates': True})
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
self.assertEqual(
user_services.get_email_preferences(self.EDITOR_ID),
{'can_receive_email_updates': False})
def test_user_allowing_emails_on_signup(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True,
'can_receive_email_updates': True},
csrf_token=csrf_token)
# The email update preference should be True in all cases.
self.EDITOR_ID = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
self.assertEqual(
user_services.get_email_preferences(self.EDITOR_ID),
{'can_receive_email_updates': True})
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
self.assertEqual(
user_services.get_email_preferences(self.EDITOR_ID),
{'can_receive_email_updates': True})
def test_user_disallowing_emails_on_signup(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True,
'can_receive_email_updates': False},
csrf_token=csrf_token)
# The email update preference should be False in all cases.
self.EDITOR_ID = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
self.assertEqual(
user_services.get_email_preferences(self.EDITOR_ID),
{'can_receive_email_updates': False})
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
self.assertEqual(
user_services.get_email_preferences(self.EDITOR_ID),
{'can_receive_email_updates': False})
|
apache-2.0
|
Maspear/odoo
|
addons/website_certification/__openerp__.py
|
320
|
1562
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Certified People',
'category': 'Website',
'website': 'https://www.odoo.com/page/website-builder',
'summary': 'Display your network of certified people on your website',
'version': '1.0',
'author': 'OpenERP S.A.',
'depends': ['marketing', 'website'],
'description': """
Display your network of certified people on your website
""",
'data': [
'security/ir.model.access.csv',
'views/website_certification_views.xml',
'views/website_certification_templates.xml',
],
'installable': True,
}
|
agpl-3.0
|
NazarethCollege/heweb2017-devops-presentation
|
sites/tweetheat/src/backend/vendor/src/github.com/youtube/vitess/py/vtdb/dbapi.py
|
12
|
1628
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from vtdb import dbexceptions
# A simple class to trap and re-export only variables referenced from
# the sql statement since bind dictionaries can be *very* noisy. This
# is a by-product of converting the DB-API %(name)s syntax to our
# :name syntax.
class BindVarsProxy(object):
def __init__(self, bind_vars):
self.bind_vars = bind_vars
self.accessed_keys = set()
def __getitem__(self, name):
var = self.bind_vars[name]
self.bind_vars[name]
self.accessed_keys.add(name)
if isinstance(var, (list, set, tuple)):
return '::%s' % name
return ':%s' % name
def export_bind_vars(self):
return dict([(k, self.bind_vars[k]) for k in self.accessed_keys])
# convert bind style from %(name)s to :name and export only the
# variables bound.
def prepare_query_bind_vars(query, bind_vars):
bind_vars_proxy = BindVarsProxy(bind_vars)
try:
query %= bind_vars_proxy
except KeyError as e:
raise dbexceptions.InterfaceError(e[0], query, bind_vars)
return query, bind_vars_proxy.export_bind_vars()
|
mit
|
Eficent/odoomrp-wip
|
mrp_bom_component_change/models/mrp_bom_change.py
|
10
|
3058
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api, exceptions, _
from datetime import datetime as dt
class MrpBomChange(models.Model):
_name = 'mrp.bom.change'
_description = 'Mrp BoM Component Change'
@api.one
@api.depends('old_component')
def _calc_boms(self):
self.boms = [(6, 0, [])]
if self.old_component:
for bom in self.env['mrp.bom'].search([]):
bom_lines = bom.bom_line_ids.filtered(
lambda x: x.product_id.id == self.old_component.id)
if bom_lines:
self.boms = [(4, bom.id)]
name = fields.Char('Name', required=True)
new_component = fields.Many2one('product.product', 'New Component',
required=True)
old_component = fields.Many2one('product.product', 'Old Component',
required=True)
create_new_version = fields.Boolean(
string="Create new BoM version", help='Check this field if you want to'
' create a new version of the BOM before modifying the component')
boms = fields.Many2many(
comodel_name='mrp.bom',
relation='rel_mrp_bom_change', column1='bom_change_id',
column2='bom_id', string='BoMs', copy=False, store=True, readonly=True,
compute='_calc_boms')
date = fields.Date('Change Date', readonly=True)
user = fields.Many2one('res.users', 'Changed By', readonly=True)
reason = fields.Char('Reason')
@api.multi
def do_component_change(self):
self.ensure_one()
if not self.old_component or not self.new_component:
raise exceptions.Warning(_("Not Components selected!"))
if not self.boms:
raise exceptions.Warning(_("There isn't any BoM for selected "
"component"))
for bom in self.boms:
bom_lines = bom.bom_line_ids.filtered(
lambda x: x.product_id.id == self.old_component.id)
if self.create_new_version:
new_bom = bom._copy_bom()
bom.button_historical()
new_bom.button_activate()
self.boms = [(3, bom.id)]
self.boms = [(4, new_bom.id)]
bom_lines = new_bom.bom_line_ids.filtered(
lambda x: x.product_id.id == self.old_component.id)
bom_lines.write({'product_id': self.new_component.id})
self.write({'date': dt.now(), 'user': self.env.uid})
return {'name': _('Bill of Material'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'mrp.bom',
'type': 'ir.actions.act_window',
'domain': [('id', 'in', self.boms.mapped('id'))]
}
|
agpl-3.0
|
WholeGrainGoats/servo
|
tests/unit/net/cookie_http_state_utils.py
|
111
|
5965
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import subprocess
import tempfile
REPO = "https://github.com/abarth/http-state.git"
TEST_FILE = "cookie_http_state.rs"
DOMAIN = "http://home.example.org:8888"
RUST_FN = """
#[test]{should_panic}
fn test_{name}() {{
let r = run("{set_location}",
{set_cookies},
"{location}");
assert_eq!(&r, "{expect}");
}}
"""
SET_COOKIES_INDENT = 18
SHOULD_PANIC = "\n#[should_panic] // Look at cookie_http_state_utils.py if this test fails"
# Those tests should PASS. But until fixes land in servo, keep them failing
FAILING_TESTS = [
"0003", # Waiting for a way to clean expired cookies
"0006", # Waiting for a way to clean expired cookies
"mozilla0001", # Waiting for a way to clean expired cookies
"mozilla0002", # Waiting for a way to clean expired cookies
"mozilla0003", # Waiting for a way to clean expired cookies
"mozilla0005", # Waiting for a way to clean expired cookies
"mozilla0007", # Waiting for a way to clean expired cookies
"mozilla0009", # Waiting for a way to clean expired cookies
"mozilla0010", # Waiting for a way to clean expired cookies
"mozilla0013", # Waiting for a way to clean expired cookies
]
def list_tests(dir):
suffix = "-test"
def keep(name):
return name.endswith(suffix) and not name.startswith("disabled")
tests = [name[:-len(suffix)] for name in os.listdir(dir) if keep(name)]
tests.sort()
return tests
def escape(s):
""" Escape the string `s` so that it can be parsed by rust as a valid
UTF-8 string.
We can't use only `encode("unicode_escape")` as it produces things that
rust does not accept ("\\xbf", "\\u6265" for example). So we manually
convert all character whose code point is greater than 128 to
\\u{code_point}.
All other characters are encoded with "unicode_escape" to get escape
sequences ("\\r" for example) except for `"` that we specifically escape
because our string will be quoted by double-quotes.
Lines are also limited in size, so split the string every 70 characters
(gives room for indentation).
"""
res = ""
last_split = 0
for c in s:
if len(res) - last_split > 70:
res += "\\\n"
last_split = len(res)
o = ord(c)
if o == 34:
res += "\\\""
continue
if o >= 128:
res += "\\u{" + hex(o)[2:] + "}"
else:
res += c.encode("unicode_escape")
return res
def format_slice_cookies(cookies):
esc_cookies = ['"%s"' % escape(c) for c in cookies]
if sum(len(s) for s in esc_cookies) < 80:
sep = ", "
else:
sep = ",\n" + " " * SET_COOKIES_INDENT
return "&[" + sep.join(esc_cookies) + "]"
def generate_code_for_test(test_dir, name):
if name in FAILING_TESTS:
should_panic = SHOULD_PANIC
else:
should_panic = ""
test_file = os.path.join(test_dir, name + "-test")
expect_file = os.path.join(test_dir, name + "-expected")
set_cookies = []
set_location = DOMAIN + "/cookie-parser?" + name
expect = ""
location = DOMAIN + "/cookie-parser-result?" + name
with open(test_file) as fo:
for line in fo:
line = line.decode("utf-8").rstrip()
prefix = "Set-Cookie: "
if line.startswith(prefix):
set_cookies.append(line[len(prefix):])
prefix = "Location: "
if line.startswith(prefix):
location = line[len(prefix):]
if location.startswith("/"):
location = DOMAIN + location
with open(expect_file) as fo:
for line in fo:
line = line.decode("utf-8").rstrip()
prefix = "Cookie: "
if line.startswith(prefix):
expect = line[len(prefix):]
return RUST_FN.format(name=name.replace('-', '_'),
set_location=escape(set_location),
set_cookies=format_slice_cookies(set_cookies),
should_panic=should_panic,
location=escape(location),
expect=escape(expect))
def update_test_file(cachedir):
workdir = os.path.dirname(os.path.realpath(__file__))
test_file = os.path.join(workdir, TEST_FILE)
# Create the cache dir
if not os.path.isdir(cachedir):
os.makedirs(cachedir)
# Clone or update the repo
repo_dir = os.path.join(cachedir, "http-state")
if os.path.isdir(repo_dir):
args = ["git", "pull", "-f"]
process = subprocess.Popen(args, cwd=repo_dir)
if process.wait() != 0:
print("failed to update the http-state git repo")
return 1
else:
args = ["git", "clone", REPO, repo_dir]
process = subprocess.Popen(args)
if process.wait() != 0:
print("failed to clone the http-state git repo")
return 1
# Truncate the unit test file to remove all existing tests
with open(test_file, "r+") as fo:
while True:
line = fo.readline()
if line.strip() == "// Test listing":
fo.truncate()
fo.flush()
break
if line == "":
print("Failed to find listing delimiter on unit test file")
return 1
# Append all tests to unit test file
tests_dir = os.path.join(repo_dir, "tests", "data", "parser")
with open(test_file, "a") as fo:
for test in list_tests(tests_dir):
fo.write(generate_code_for_test(tests_dir, test).encode("utf-8"))
return 0
if __name__ == "__main__":
update_test_file(tempfile.gettempdir())
|
mpl-2.0
|
manuelm/pyload
|
module/plugins/hooks/WindowsPhoneNotify.py
|
5
|
2885
|
# -*- coding: utf-8 -*-
import httplib
import time
from module.plugins.internal.Notifier import Notifier
class WindowsPhoneNotify(Notifier):
__name__ = "WindowsPhoneNotify"
__type__ = "hook"
__version__ = "0.17"
__status__ = "testing"
__config__ = [("activated" , "bool", "Activated" , False),
("pushid" , "str" , "Push ID" , "" ),
("pushurl" , "str" , "Push url" , "" ),
("captcha" , "bool", "Notify captcha request" , True ),
("reconnection" , "bool", "Notify reconnection request" , False),
("downloadfinished", "bool", "Notify download finished" , True ),
("downloadfailed" , "bool", "Notify download failed" , True ),
("packagefinished" , "bool", "Notify package finished" , True ),
("packagefailed" , "bool", "Notify package failed" , True ),
("update" , "bool", "Notify pyLoad update" , False),
("exit" , "bool", "Notify pyLoad shutdown/restart" , False),
("sendinterval" , "int" , "Interval in seconds between notifications", 1 ),
("sendpermin" , "int" , "Max notifications per minute" , 60 ),
("ignoreclient" , "bool", "Send notifications if client is connected", True )]
__description__ = """Send push notifications to Windows Phone"""
__license__ = "GPLv3"
__authors__ = [("Andy Voigt" , "[email protected]"),
("Walter Purcaro", "[email protected]" )]
def get_key(self):
return self.config.get('pushid'), self.config.get('pushurl')
def format_request(self, msg):
return ("<?xml version='1.0' encoding='utf-8'?> <wp:Notification xmlns:wp='WPNotification'> "
"<wp:Toast> <wp:Text1>pyLoad</wp:Text1> <wp:Text2>%s</wp:Text2> "
"</wp:Toast> </wp:Notification>" % msg)
def send(self, event, msg, key):
id, url = key
request = self.format_request("%s: %s" % (event, msg) if msg else event)
webservice = httplib.HTTP(url)
webservice.putrequest("POST", id)
webservice.putheader("Host", url)
webservice.putheader("Content-type", "text/xml")
webservice.putheader("X-NotificationClass", "2")
webservice.putheader("X-WindowsPhone-Target", "toast")
webservice.putheader("Content-length", "%d" % len(request))
webservice.endheaders()
webservice.send(request)
webservice.close()
|
gpl-3.0
|
GustavoRD78/78Kernel-ZL-283
|
Documentation/target/tcm_mod_builder.py
|
4981
|
41422
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
gpl-2.0
|
krishna-pandey-git/django
|
tests/field_deconstruction/tests.py
|
69
|
18115
|
from __future__ import unicode_literals
from django.db import models
from django.test import SimpleTestCase, override_settings
from django.utils import six
class FieldDeconstructionTests(SimpleTestCase):
"""
Tests the deconstruct() method on all core fields.
"""
def test_name(self):
"""
Tests the outputting of the correct name if assigned one.
"""
# First try using a "normal" field
field = models.CharField(max_length=65)
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
field.set_attributes_from_name("is_awesome_test")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "is_awesome_test")
self.assertIsInstance(name, six.text_type)
# Now try with a ForeignKey
field = models.ForeignKey("some_fake.ModelName", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
field.set_attributes_from_name("author")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "author")
def test_auto_field(self):
field = models.AutoField(primary_key=True)
field.set_attributes_from_name("id")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.AutoField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"primary_key": True})
def test_big_integer_field(self):
field = models.BigIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BigIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_boolean_field(self):
field = models.BooleanField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.BooleanField(default=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"default": True})
def test_char_field(self):
field = models.CharField(max_length=65)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 65})
field = models.CharField(max_length=65, null=True, blank=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 65, "null": True, "blank": True})
def test_char_field_choices(self):
field = models.CharField(max_length=1, choices=(("A", "One"), ("B", "Two")))
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"choices": [("A", "One"), ("B", "Two")], "max_length": 1})
def test_csi_field(self):
field = models.CommaSeparatedIntegerField(max_length=100)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CommaSeparatedIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 100})
def test_date_field(self):
field = models.DateField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.DateField(auto_now=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now": True})
def test_datetime_field(self):
field = models.DateTimeField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.DateTimeField(auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now_add": True})
# Bug #21785
field = models.DateTimeField(auto_now=True, auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now_add": True, "auto_now": True})
def test_decimal_field(self):
field = models.DecimalField(max_digits=5, decimal_places=2)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DecimalField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 2})
def test_decimal_field_0_decimal_places(self):
"""
A DecimalField with decimal_places=0 should work (#22272).
"""
field = models.DecimalField(max_digits=5, decimal_places=0)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DecimalField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 0})
def test_email_field(self):
field = models.EmailField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.EmailField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 254})
field = models.EmailField(max_length=255)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.EmailField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 255})
def test_file_field(self):
field = models.FileField(upload_to="foo/bar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FileField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/bar"})
# Test max_length
field = models.FileField(upload_to="foo/bar", max_length=200)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FileField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/bar", "max_length": 200})
def test_file_path_field(self):
field = models.FilePathField(match=".*\.txt$")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FilePathField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"match": ".*\.txt$"})
field = models.FilePathField(recursive=True, allow_folders=True, max_length=123)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FilePathField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"recursive": True, "allow_folders": True, "max_length": 123})
def test_float_field(self):
field = models.FloatField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FloatField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_foreign_key(self):
# Test basic pointing
from django.contrib.auth.models import Permission
field = models.ForeignKey("auth.Permission", models.CASCADE)
field.remote_field.model = Permission
field.remote_field.field_name = "id"
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "on_delete": models.CASCADE})
self.assertFalse(hasattr(kwargs['to'], "setting_name"))
# Test swap detection for swappable model
field = models.ForeignKey("auth.User", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User", "on_delete": models.CASCADE})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
# Test nonexistent (for now) model
field = models.ForeignKey("something.Else", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "something.Else", "on_delete": models.CASCADE})
# Test on_delete
field = models.ForeignKey("auth.User", models.SET_NULL)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User", "on_delete": models.SET_NULL})
# Test to_field preservation
field = models.ForeignKey("auth.Permission", models.CASCADE, to_field="foobar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "to_field": "foobar", "on_delete": models.CASCADE})
# Test related_name preservation
field = models.ForeignKey("auth.Permission", models.CASCADE, related_name="foobar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "related_name": "foobar", "on_delete": models.CASCADE})
@override_settings(AUTH_USER_MODEL="auth.Permission")
def test_foreign_key_swapped(self):
# It doesn't matter that we swapped out user for permission;
# there's no validation. We just want to check the setting stuff works.
field = models.ForeignKey("auth.Permission", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "on_delete": models.CASCADE})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
def test_image_field(self):
field = models.ImageField(upload_to="foo/barness", width_field="width", height_field="height")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ImageField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/barness", "width_field": "width", "height_field": "height"})
def test_integer_field(self):
field = models.IntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.IntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_ip_address_field(self):
field = models.IPAddressField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.IPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_generic_ip_address_field(self):
field = models.GenericIPAddressField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.GenericIPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.GenericIPAddressField(protocol="IPv6")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.GenericIPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"protocol": "IPv6"})
def test_many_to_many_field(self):
# Test normal
field = models.ManyToManyField("auth.Permission")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertFalse(hasattr(kwargs['to'], "setting_name"))
# Test swappable
field = models.ManyToManyField("auth.User")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
# Test through
field = models.ManyToManyField("auth.Permission", through="auth.Group")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "through": "auth.Group"})
# Test custom db_table
field = models.ManyToManyField("auth.Permission", db_table="custom_table")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "db_table": "custom_table"})
# Test related_name
field = models.ManyToManyField("auth.Permission", related_name="custom_table")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "related_name": "custom_table"})
@override_settings(AUTH_USER_MODEL="auth.Permission")
def test_many_to_many_field_swapped(self):
# It doesn't matter that we swapped out user for permission;
# there's no validation. We just want to check the setting stuff works.
field = models.ManyToManyField("auth.Permission")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
def test_null_boolean_field(self):
field = models.NullBooleanField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.NullBooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_positive_integer_field(self):
field = models.PositiveIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.PositiveIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_positive_small_integer_field(self):
field = models.PositiveSmallIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.PositiveSmallIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_slug_field(self):
field = models.SlugField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SlugField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.SlugField(db_index=False, max_length=231)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SlugField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"db_index": False, "max_length": 231})
def test_small_integer_field(self):
field = models.SmallIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SmallIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_text_field(self):
field = models.TextField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.TextField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_time_field(self):
field = models.TimeField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.TimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.TimeField(auto_now=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(args, [])
self.assertEqual(kwargs, {'auto_now': True})
field = models.TimeField(auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(args, [])
self.assertEqual(kwargs, {'auto_now_add': True})
def test_url_field(self):
field = models.URLField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.URLField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.URLField(max_length=231)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.URLField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 231})
def test_binary_field(self):
field = models.BinaryField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BinaryField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
|
bsd-3-clause
|
krishna-pandey-git/django
|
tests/template_tests/syntax_tests/test_if_changed.py
|
162
|
10422
|
from django.template import Context, Engine
from django.test import SimpleTestCase
from ..utils import setup
class IfChangedTagTests(SimpleTestCase):
libraries = {'custom': 'template_tests.templatetags.custom'}
@setup({'ifchanged01': '{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}{% endfor %}'})
def test_ifchanged01(self):
output = self.engine.render_to_string('ifchanged01', {'num': (1, 2, 3)})
self.assertEqual(output, '123')
@setup({'ifchanged02': '{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}{% endfor %}'})
def test_ifchanged02(self):
output = self.engine.render_to_string('ifchanged02', {'num': (1, 1, 3)})
self.assertEqual(output, '13')
@setup({'ifchanged03': '{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}{% endfor %}'})
def test_ifchanged03(self):
output = self.engine.render_to_string('ifchanged03', {'num': (1, 1, 1)})
self.assertEqual(output, '1')
@setup({'ifchanged04': '{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}'
'{% for x in numx %}{% ifchanged %}{{ x }}{% endifchanged %}'
'{% endfor %}{% endfor %}'})
def test_ifchanged04(self):
output = self.engine.render_to_string('ifchanged04', {'num': (1, 2, 3), 'numx': (2, 2, 2)})
self.assertEqual(output, '122232')
@setup({'ifchanged05': '{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}'
'{% for x in numx %}{% ifchanged %}{{ x }}{% endifchanged %}'
'{% endfor %}{% endfor %}'})
def test_ifchanged05(self):
output = self.engine.render_to_string('ifchanged05', {'num': (1, 1, 1), 'numx': (1, 2, 3)})
self.assertEqual(output, '1123123123')
@setup({'ifchanged06': '{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}'
'{% for x in numx %}{% ifchanged %}{{ x }}{% endifchanged %}'
'{% endfor %}{% endfor %}'})
def test_ifchanged06(self):
output = self.engine.render_to_string('ifchanged06', {'num': (1, 1, 1), 'numx': (2, 2, 2)})
self.assertEqual(output, '1222')
@setup({'ifchanged07': '{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}'
'{% for x in numx %}{% ifchanged %}{{ x }}{% endifchanged %}'
'{% for y in numy %}{% ifchanged %}{{ y }}{% endifchanged %}'
'{% endfor %}{% endfor %}{% endfor %}'})
def test_ifchanged07(self):
output = self.engine.render_to_string('ifchanged07', {'num': (1, 1, 1), 'numx': (2, 2, 2), 'numy': (3, 3, 3)})
self.assertEqual(output, '1233323332333')
@setup({'ifchanged08': '{% for data in datalist %}{% for c,d in data %}'
'{% if c %}{% ifchanged %}{{ d }}{% endifchanged %}'
'{% endif %}{% endfor %}{% endfor %}'})
def test_ifchanged08(self):
output = self.engine.render_to_string('ifchanged08', {'datalist': [
[(1, 'a'), (1, 'a'), (0, 'b'), (1, 'c')],
[(0, 'a'), (1, 'c'), (1, 'd'), (1, 'd'), (0, 'e')]
]})
self.assertEqual(output, 'accd')
@setup({'ifchanged-param01': '{% for n in num %}{% ifchanged n %}..{% endifchanged %}'
'{{ n }}{% endfor %}'})
def test_ifchanged_param01(self):
"""
Test one parameter given to ifchanged.
"""
output = self.engine.render_to_string('ifchanged-param01', {'num': (1, 2, 3)})
self.assertEqual(output, '..1..2..3')
@setup({'ifchanged-param02': '{% for n in num %}{% for x in numx %}{% ifchanged n %}..{% endifchanged %}'
'{{ x }}{% endfor %}{% endfor %}'})
def test_ifchanged_param02(self):
output = self.engine.render_to_string('ifchanged-param02', {'num': (1, 2, 3), 'numx': (5, 6, 7)})
self.assertEqual(output, '..567..567..567')
@setup({'ifchanged-param03': '{% for n in num %}{{ n }}{% for x in numx %}'
'{% ifchanged x n %}{{ x }}{% endifchanged %}'
'{% endfor %}{% endfor %}'})
def test_ifchanged_param03(self):
"""
Test multiple parameters to ifchanged.
"""
output = self.engine.render_to_string('ifchanged-param03', {'num': (1, 1, 2), 'numx': (5, 6, 6)})
self.assertEqual(output, '156156256')
@setup({'ifchanged-param04': '{% for d in days %}{% ifchanged %}{{ d.day }}{% endifchanged %}'
'{% for h in d.hours %}{% ifchanged d h %}{{ h }}{% endifchanged %}'
'{% endfor %}{% endfor %}'})
def test_ifchanged_param04(self):
"""
Test a date+hour like construct, where the hour of the last day is
the same but the date had changed, so print the hour anyway.
"""
output = self.engine.render_to_string(
'ifchanged-param04',
{'days': [{'hours': [1, 2, 3], 'day': 1}, {'hours': [3], 'day': 2}]},
)
self.assertEqual(output, '112323')
@setup({'ifchanged-param05': '{% for d in days %}{% ifchanged d.day %}{{ d.day }}{% endifchanged %}'
'{% for h in d.hours %}{% ifchanged d.day h %}{{ h }}{% endifchanged %}'
'{% endfor %}{% endfor %}'})
def test_ifchanged_param05(self):
"""
Logically the same as above, just written with explicit ifchanged
for the day.
"""
output = self.engine.render_to_string(
'ifchanged-param05',
{'days': [{'hours': [1, 2, 3], 'day': 1}, {'hours': [3], 'day': 2}]},
)
self.assertEqual(output, '112323')
@setup({'ifchanged-else01': '{% for id in ids %}{{ id }}'
'{% ifchanged id %}-first{% else %}-other{% endifchanged %}'
',{% endfor %}'})
def test_ifchanged_else01(self):
"""
Test the else clause of ifchanged.
"""
output = self.engine.render_to_string('ifchanged-else01', {'ids': [1, 1, 2, 2, 2, 3]})
self.assertEqual(output, '1-first,1-other,2-first,2-other,2-other,3-first,')
@setup({'ifchanged-else02': '{% for id in ids %}{{ id }}-'
'{% ifchanged id %}{% cycle "red" "blue" %}{% else %}grey{% endifchanged %}'
',{% endfor %}'})
def test_ifchanged_else02(self):
output = self.engine.render_to_string('ifchanged-else02', {'ids': [1, 1, 2, 2, 2, 3]})
self.assertEqual(output, '1-red,1-grey,2-blue,2-grey,2-grey,3-red,')
@setup({'ifchanged-else03': '{% for id in ids %}{{ id }}'
'{% ifchanged id %}-{% cycle "red" "blue" %}{% else %}{% endifchanged %}'
',{% endfor %}'})
def test_ifchanged_else03(self):
output = self.engine.render_to_string('ifchanged-else03', {'ids': [1, 1, 2, 2, 2, 3]})
self.assertEqual(output, '1-red,1,2-blue,2,2,3-red,')
@setup({'ifchanged-else04': '{% for id in ids %}'
'{% ifchanged %}***{{ id }}*{% else %}...{% endifchanged %}'
'{{ forloop.counter }}{% endfor %}'})
def test_ifchanged_else04(self):
output = self.engine.render_to_string('ifchanged-else04', {'ids': [1, 1, 2, 2, 2, 3, 4]})
self.assertEqual(output, '***1*1...2***2*3...4...5***3*6***4*7')
@setup({'ifchanged-filter-ws': '{% load custom %}{% for n in num %}'
'{% ifchanged n|noop:"x y" %}..{% endifchanged %}{{ n }}'
'{% endfor %}'})
def test_ifchanged_filter_ws(self):
"""
Test whitespace in filter arguments
"""
output = self.engine.render_to_string('ifchanged-filter-ws', {'num': (1, 2, 3)})
self.assertEqual(output, '..1..2..3')
class IfChangedTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine()
super(IfChangedTests, cls).setUpClass()
def test_ifchanged_concurrency(self):
"""
#15849 -- ifchanged should be thread-safe.
"""
template = self.engine.from_string(
'[0{% for x in foo %},{% with var=get_value %}{% ifchanged %}'
'{{ var }}{% endifchanged %}{% endwith %}{% endfor %}]'
)
# Using generator to mimic concurrency.
# The generator is not passed to the 'for' loop, because it does a list(values)
# instead, call gen.next() in the template to control the generator.
def gen():
yield 1
yield 2
# Simulate that another thread is now rendering.
# When the IfChangeNode stores state at 'self' it stays at '3' and skip the last yielded value below.
iter2 = iter([1, 2, 3])
output2 = template.render(Context({'foo': range(3), 'get_value': lambda: next(iter2)}))
self.assertEqual(output2, '[0,1,2,3]', 'Expected [0,1,2,3] in second parallel template, got {}'.format(output2))
yield 3
gen1 = gen()
output1 = template.render(Context({'foo': range(3), 'get_value': lambda: next(gen1)}))
self.assertEqual(output1, '[0,1,2,3]', 'Expected [0,1,2,3] in first template, got {}'.format(output1))
def test_ifchanged_render_once(self):
"""
#19890. The content of ifchanged template tag was rendered twice.
"""
template = self.engine.from_string('{% ifchanged %}{% cycle "1st time" "2nd time" %}{% endifchanged %}')
output = template.render(Context({}))
self.assertEqual(output, '1st time')
def test_include(self):
"""
#23516 -- This works as a regression test only if the cached loader
isn't used. Hence we don't use the @setup decorator.
"""
engine = Engine(loaders=[
('django.template.loaders.locmem.Loader', {
'template': '{% for x in vars %}{% include "include" %}{% endfor %}',
'include': '{% ifchanged %}{{ x }}{% endifchanged %}',
}),
])
output = engine.render_to_string('template', dict(vars=[1, 1, 2, 2, 3, 3]))
self.assertEqual(output, "123")
|
bsd-3-clause
|
h3biomed/ansible
|
test/units/modules/network/nxos/test_nxos_bgp_af.py
|
38
|
4391
|
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.nxos import nxos_bgp_af
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosBgpAfModule(TestNxosModule):
module = nxos_bgp_af
def setUp(self):
super(TestNxosBgpAfModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_bgp_af.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_bgp_af.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosBgpAfModule, self).tearDown()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_bgp', 'config.cfg')
self.load_config.return_value = None
def test_nxos_bgp_af(self):
set_module_args(dict(asn=65535, afi='ipv4', safi='unicast'))
self.execute_module(
changed=True, sort=False,
commands=['router bgp 65535', 'address-family ipv4 unicast']
)
def test_nxos_bgp_af_vrf(self):
set_module_args(dict(asn=65535, vrf='test', afi='ipv4', safi='unicast'))
self.execute_module(
changed=True, sort=False,
commands=['router bgp 65535', 'vrf test', 'address-family ipv4 unicast']
)
def test_nxos_bgp_af_vrf_exists(self):
set_module_args(dict(asn=65535, vrf='test2', afi='ipv4', safi='unicast'))
self.execute_module(changed=False, commands=[])
def test_nxos_bgp_af_dampening_routemap(self):
set_module_args(dict(asn=65535, afi='ipv4', safi='unicast',
dampening_routemap='route-map-a'))
self.execute_module(
changed=True,
commands=['router bgp 65535', 'address-family ipv4 unicast',
'dampening route-map route-map-a']
)
def test_nxos_bgp_af_dampening_manual(self):
set_module_args(dict(asn=65535, afi='ipv4', safi='unicast',
dampening_half_time=5, dampening_suppress_time=2000,
dampening_reuse_time=1900, dampening_max_suppress_time=10))
self.execute_module(
changed=True,
commands=['router bgp 65535', 'address-family ipv4 unicast',
'dampening 5 1900 2000 10']
)
def test_nxos_bgp_af_dampening_mix(self):
set_module_args(dict(asn=65535, afi='ipv4', safi='unicast',
dampening_routemap='route-map-a',
dampening_half_time=5, dampening_suppress_time=2000,
dampening_reuse_time=1900, dampening_max_suppress_time=10))
result = self.execute_module(failed=True)
self.assertEqual(result['msg'], 'parameters are mutually exclusive: dampening_routemap|dampening_half_time, '
'dampening_routemap|dampening_suppress_time, dampening_routemap|dampening_reuse_time, '
'dampening_routemap|dampening_max_suppress_time')
def test_nxos_bgp_af_client(self):
set_module_args(dict(asn=65535, afi='ipv4', safi='unicast',
client_to_client=False))
self.execute_module(
changed=True,
commands=['router bgp 65535', 'address-family ipv4 unicast',
'no client-to-client reflection']
)
|
gpl-3.0
|
DistributedSystemsGroup/YELP-DS
|
Blending.py
|
2
|
2128
|
#!/usr/bin/env python
# encoding: utf-8
"""
This code implemented review texts classication by using Support Vector Machine, Support Vector Regression,
Decision Tree and Random Forest, the evaluation function has been implemented as well.
"""
from time import gmtime, strftime
from sklearn import ensemble, svm
import Scikit_Classification as sc
features = []
labels = []
def main():
starttime = strftime("%Y-%m-%d %H:%M:%S",gmtime())
config = {}
execfile("params.conf", config)
inputfile = config["histogram_dataset"]
trainingSamples = config["trainingSamples"]
testingSamples = config["testingSamples"]
numberOfSamples = trainingSamples + testingSamples
rf_selectedFeatures = "all"
svm_selectedFeatures = [20, 21, 22, 23, 24]
rf_features, rf_labels = sc.Data_Preparation(inputfile, rf_selectedFeatures)
svm_features, svm_labels = sc.Data_Preparation(inputfile, svm_selectedFeatures)
Scikit_RandomForest_Model = ensemble.RandomForestClassifier(n_estimators=510, criterion='gini', max_depth=7,
min_samples_split=2, min_samples_leaf=1, max_features='sqrt',
bootstrap=True, oob_score=False, n_jobs=-1, random_state=None, verbose=0,
min_density=None, compute_importances=None)
Scikit_SVM_Model = svm.SVC(C=1.0, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, shrinking=True, probability=True, tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=-1, random_state=None)
accuracy, testing_Labels, predict_Labels = sc.Classification_Blending(Scikit_RandomForest_Model, rf_features, rf_labels, Scikit_SVM_Model, svm_features, svm_labels, trainingSamples, testingSamples)
sc.Result_Evaluation('data/evaluation_result/evaluation_Blending.txt', accuracy, testing_Labels, predict_Labels)
endtime = strftime("%Y-%m-%d %H:%M:%S",gmtime())
print(starttime)
print(endtime)
if __name__ == "__main__":
main()
|
apache-2.0
|
adrianholovaty/django
|
tests/regressiontests/localflavor/se/tests.py
|
33
|
6441
|
# -*- coding: utf-8 -*-
from django.contrib.localflavor.se.forms import (SECountySelect,
SEOrganisationNumberField, SEPersonalIdentityNumberField,
SEPostalCodeField)
import datetime
from django.test import SimpleTestCase
class SELocalFlavorTests(SimpleTestCase):
def setUp(self):
# Mocking datetime.date to make sure
# localflavor.se.utils.validate_id_birthday works
class MockDate(datetime.date):
def today(cls):
return datetime.date(2008, 5, 14)
today = classmethod(today)
self._olddate = datetime.date
datetime.date = MockDate
def tearDown(self):
datetime.date = self._olddate
def test_SECountySelect(self):
f = SECountySelect()
out = u'''<select name="swedish_county">
<option value="AB">Stockholm</option>
<option value="AC">V\xe4sterbotten</option>
<option value="BD">Norrbotten</option>
<option value="C">Uppsala</option>
<option value="D">S\xf6dermanland</option>
<option value="E" selected="selected">\xd6sterg\xf6tland</option>
<option value="F">J\xf6nk\xf6ping</option>
<option value="G">Kronoberg</option>
<option value="H">Kalmar</option>
<option value="I">Gotland</option>
<option value="K">Blekinge</option>
<option value="M">Sk\xe5ne</option>
<option value="N">Halland</option>
<option value="O">V\xe4stra G\xf6taland</option>
<option value="S">V\xe4rmland</option>
<option value="T">\xd6rebro</option>
<option value="U">V\xe4stmanland</option>
<option value="W">Dalarna</option>
<option value="X">G\xe4vleborg</option>
<option value="Y">V\xe4sternorrland</option>
<option value="Z">J\xe4mtland</option>
</select>'''
self.assertHTMLEqual(f.render('swedish_county', 'E'), out)
def test_SEOrganizationNumberField(self):
error_invalid = [u'Enter a valid Swedish organisation number.']
valid = {
'870512-1989': '198705121989',
'19870512-1989': '198705121989',
'870512-2128': '198705122128',
'081015-6315': '190810156315',
'081015+6315': '180810156315',
'0810156315': '190810156315',
# Test some different organisation numbers
# IKEA Linköping
'556074-7569': '5560747569',
# Volvo Personvagnar
'556074-3089': '5560743089',
# LJS (organisation)
'822001-5476': '8220015476',
# LJS (organisation)
'8220015476': '8220015476',
# Katedralskolan Linköping (school)
'2120000449': '2120000449',
# Faux organisation number, which tests that the checksum can be 0
'232518-5060': '2325185060',
}
invalid = {
# Ordinary personal identity numbers for sole proprietors
# The same rules as for SEPersonalIdentityField applies here
'081015 6315': error_invalid,
'950231-4496': error_invalid,
'6914104499': error_invalid,
'950d314496': error_invalid,
'invalid!!!': error_invalid,
'870514-1111': error_invalid,
# Co-ordination number checking
# Co-ordination numbers are not valid organisation numbers
'870574-1315': error_invalid,
'870573-1311': error_invalid,
# Volvo Personvagnar, bad format
'556074+3089': error_invalid,
# Invalid checksum
'2120000441': error_invalid,
# Valid checksum but invalid organisation type
'1120000441': error_invalid,
}
self.assertFieldOutput(SEOrganisationNumberField, valid, invalid)
def test_SEPersonalIdentityNumberField(self):
error_invalid = [u'Enter a valid Swedish personal identity number.']
error_coord = [u'Co-ordination numbers are not allowed.']
valid = {
'870512-1989': '198705121989',
'870512-2128': '198705122128',
'19870512-1989': '198705121989',
'198705121989': '198705121989',
'081015-6315': '190810156315',
'0810156315': '190810156315',
# This is a "special-case" in the checksum calculation,
# where the sum is divisible by 10 (the checksum digit == 0)
'8705141060': '198705141060',
# + means that the person is older than 100 years
'081015+6315': '180810156315',
# Co-ordination number checking
'870574-1315': '198705741315',
'870574+1315': '188705741315',
'198705741315': '198705741315',
}
invalid = {
'081015 6315': error_invalid,
'950d314496': error_invalid,
'invalid!!!': error_invalid,
# Invalid dates
# February 31st does not exist
'950231-4496': error_invalid,
# Month 14 does not exist
'6914104499': error_invalid,
# There are no Swedish personal id numbers where year < 1800
'17430309-7135': error_invalid,
# Invalid checksum
'870514-1111': error_invalid,
# Co-ordination number with bad checksum
'870573-1311': error_invalid,
}
self.assertFieldOutput(SEPersonalIdentityNumberField, valid, invalid)
valid = {}
invalid = {
# Check valid co-ordination numbers that should not be accepted
# because of coordination_number=False
'870574-1315': error_coord,
'870574+1315': error_coord,
'8705741315': error_coord,
# Invalid co-ordination numbers should be treated as invalid, and not
# as co-ordination numbers
'870573-1311': error_invalid,
}
kwargs = {'coordination_number': False,}
self.assertFieldOutput(SEPersonalIdentityNumberField, valid, invalid,
field_kwargs=kwargs)
def test_SEPostalCodeField(self):
error_format = [u'Enter a Swedish postal code in the format XXXXX.']
valid = {
'589 37': '58937',
'58937': '58937',
}
invalid = {
'abcasfassadf': error_format,
# Only one space is allowed for separation
'589 37': error_format,
# The postal code must not start with 0
'01234': error_format,
}
self.assertFieldOutput(SEPostalCodeField, valid, invalid)
|
bsd-3-clause
|
VeritasOS/cloud-custodian
|
c7n/resources/shield.py
|
2
|
1371
|
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from c7n.manager import resources
from c7n.query import QueryResourceManager
@resources.register('shield-protection')
class ShieldProtection(QueryResourceManager):
class resource_type(object):
service = 'shield'
enum_spec = ('list_projections', 'Protections', None)
id = 'Id'
name = 'Name'
dimension = None
@resources.register('shield-attack')
class ShieldAttack(QueryResourceManager):
class resource_type(object):
service = 'shield'
enum_spec = ('list_attacks', 'Attacks', None)
detail_spec = ('describe_attack', 'AttackId', 'AttackId', 'Attack')
id = 'AttackId'
date = 'StartTime'
dimension = None
|
apache-2.0
|
mlperf/training_results_v0.7
|
Google/benchmarks/maskrcnn/implementations/maskrcnn-research-TF-tpu-v3-1024/eval_multiprocess.py
|
2
|
4040
|
# Copyright 2018 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Use multiprocess to perform COCO metric evaluation.
"""
# copybara:insert import multiprocessing
from REDACTED.mask_rcnn import mask_rcnn_params
from REDACTED.mask_rcnn import segm_utils
# copybara:strip_begin
from REDACTED.REDACTED.multiprocessing import REDACTEDprocess
# copybara:strip_end
# copybara:strip_begin
def REDACTED_post_processing():
"""REDACTED batch-processes the predictions."""
q_in, q_out = REDACTEDprocess.get_user_data()
post_processing(q_in, q_out)
# copybara:strip_end
def post_processing(q_in, q_out):
"""Batch-processes the predictions."""
boxes, masks, image_info = q_in.get()
while boxes is not None:
detections = []
segmentations = []
for i, box in enumerate(boxes):
# Slice out the padding data where score is zero
num = max(1, sum(box[:, 5] > 0))
box = box[:num, :]
segms = segm_utils.segm_results(
masks[i], box[:, 1:5], int(image_info[i][3]), int(image_info[i][4]))
detections.extend(box)
segmentations.append(segms)
q_out.put((detections, segmentations))
boxes, masks, image_info = q_in.get()
# signal the parent process that we have completed all work.
q_out.put((None, None))
def update_eval_metric(q_out, eval_metric, exited_process):
detections, segmentations = q_out.get()
if detections is None and segmentations is None:
exited_process += 1
else:
eval_metric.update(detections, segmentations)
return exited_process
def eval_multiprocessing(predictions,
eval_metric,
eval_worker_count,
queue_size=mask_rcnn_params.QUEUE_SIZE):
"""Enables multiprocessing to update eval metrics."""
# copybara:strip_begin
q_in, q_out = REDACTEDprocess.get_user_data()
processes = [
REDACTEDprocess.Process(target=REDACTED_post_processing)
for _ in range(eval_worker_count)
]
# copybara:strip_end_and_replace_begin
# q_in = multiprocessing.Queue(maxsize=queue_size)
# q_out = multiprocessing.Queue(maxsize=queue_size)
# processes = [
# multiprocessing.Process(target=post_processing, args=(q_in, q_out))
# for _ in range(eval_worker_count)
# ]
# copybara:replace_end
for p in processes:
p.start()
# TODO(b/129410706): investigate whether threading improves speed.
# Every predictor.next() gets a batch of prediction (a dictionary).
exited_process = 0
samples = len(predictions['detections']) // eval_worker_count
for i in range(eval_worker_count):
while q_in.full() or q_out.qsize() > queue_size // 4:
exited_process = update_eval_metric(q_out, eval_metric, exited_process)
q_in.put((predictions['detections'][i * samples:(i + 1) * samples],
predictions['mask_outputs'][i * samples:(i + 1) * samples],
predictions['image_info'][i * samples:(i + 1) * samples]))
# Adds empty items to signal the children to quit.
for _ in processes:
q_in.put((None, None, None))
# Cleans up q_out and waits for all the processes to finish work.
while not q_out.empty() or exited_process < eval_worker_count:
exited_process = update_eval_metric(q_out, eval_metric, exited_process)
for p in processes:
# actively terminate all processes (to work around the multiprocessing
# deadlock issue in Cloud)
# copybara:insert p.terminate()
p.join()
|
apache-2.0
|
artiya4u/thefuck
|
tests/rules/test_git_add.py
|
20
|
1461
|
import pytest
from thefuck.rules.git_add import match, get_new_command
from tests.utils import Command
@pytest.fixture
def did_not_match(target, did_you_forget=True):
error = ("error: pathspec '{}' did not match any "
"file(s) known to git.".format(target))
if did_you_forget:
error = ("{}\nDid you forget to 'git add'?'".format(error))
return error
@pytest.mark.parametrize('command', [
Command(script='git submodule update unknown',
stderr=did_not_match('unknown')),
Command(script='git commit unknown',
stderr=did_not_match('unknown'))]) # Older versions of Git
def test_match(command):
assert match(command, None)
@pytest.mark.parametrize('command', [
Command(script='git submodule update known', stderr=('')),
Command(script='git commit known', stderr=('')),
Command(script='git commit unknown', # Newer versions of Git
stderr=did_not_match('unknown', False))])
def test_not_match(command):
assert not match(command, None)
@pytest.mark.parametrize('command, new_command', [
(Command('git submodule update unknown', stderr=did_not_match('unknown')),
'git add -- unknown && git submodule update unknown'),
(Command('git commit unknown', stderr=did_not_match('unknown')), # Old Git
'git add -- unknown && git commit unknown')])
def test_get_new_command(command, new_command):
assert get_new_command(command, None) == new_command
|
mit
|
partofthething/home-assistant
|
homeassistant/components/xiaomi_miio/air_quality.py
|
7
|
7075
|
"""Support for Xiaomi Mi Air Quality Monitor (PM2.5)."""
import logging
from miio import AirQualityMonitor, Device, DeviceException
import voluptuous as vol
from homeassistant.components.air_quality import PLATFORM_SCHEMA, AirQualityEntity
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN
from homeassistant.exceptions import NoEntitySpecifiedError, PlatformNotReady
import homeassistant.helpers.config_validation as cv
from .const import (
MODEL_AIRQUALITYMONITOR_B1,
MODEL_AIRQUALITYMONITOR_S1,
MODEL_AIRQUALITYMONITOR_V1,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Xiaomi Miio Air Quality Monitor"
ATTR_CO2E = "carbon_dioxide_equivalent"
ATTR_TVOC = "total_volatile_organic_compounds"
ATTR_TEMP = "temperature"
ATTR_HUM = "humidity"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
PROP_TO_ATTR = {
"carbon_dioxide_equivalent": ATTR_CO2E,
"total_volatile_organic_compounds": ATTR_TVOC,
"temperature": ATTR_TEMP,
"humidity": ATTR_HUM,
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the sensor from config."""
host = config[CONF_HOST]
token = config[CONF_TOKEN]
name = config[CONF_NAME]
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
miio_device = Device(host, token)
try:
device_info = await hass.async_add_executor_job(miio_device.info)
except DeviceException as ex:
raise PlatformNotReady from ex
model = device_info.model
unique_id = f"{model}-{device_info.mac_address}"
_LOGGER.debug(
"%s %s %s detected",
model,
device_info.firmware_version,
device_info.hardware_version,
)
device = AirQualityMonitor(host, token, model=model)
if model == MODEL_AIRQUALITYMONITOR_S1:
entity = AirMonitorS1(name, device, unique_id)
elif model == MODEL_AIRQUALITYMONITOR_B1:
entity = AirMonitorB1(name, device, unique_id)
elif model == MODEL_AIRQUALITYMONITOR_V1:
entity = AirMonitorV1(name, device, unique_id)
else:
raise NoEntitySpecifiedError(f"Not support for entity {unique_id}")
async_add_entities([entity], update_before_add=True)
class AirMonitorB1(AirQualityEntity):
"""Air Quality class for Xiaomi cgllc.airmonitor.b1 device."""
def __init__(self, name, device, unique_id):
"""Initialize the entity."""
self._name = name
self._device = device
self._unique_id = unique_id
self._icon = "mdi:cloud"
self._available = None
self._air_quality_index = None
self._carbon_dioxide = None
self._carbon_dioxide_equivalent = None
self._particulate_matter_2_5 = None
self._total_volatile_organic_compounds = None
self._temperature = None
self._humidity = None
async def async_update(self):
"""Fetch state from the miio device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._carbon_dioxide_equivalent = state.co2e
self._particulate_matter_2_5 = round(state.pm25, 1)
self._total_volatile_organic_compounds = round(state.tvoc, 3)
self._temperature = round(state.temperature, 2)
self._humidity = round(state.humidity, 2)
self._available = True
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
@property
def name(self):
"""Return the name of this entity, if any."""
return self._name
@property
def icon(self):
"""Return the icon to use for device if any."""
return self._icon
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def unique_id(self):
"""Return the unique ID."""
return self._unique_id
@property
def air_quality_index(self):
"""Return the Air Quality Index (AQI)."""
return self._air_quality_index
@property
def carbon_dioxide(self):
"""Return the CO2 (carbon dioxide) level."""
return self._carbon_dioxide
@property
def carbon_dioxide_equivalent(self):
"""Return the CO2e (carbon dioxide equivalent) level."""
return self._carbon_dioxide_equivalent
@property
def particulate_matter_2_5(self):
"""Return the particulate matter 2.5 level."""
return self._particulate_matter_2_5
@property
def total_volatile_organic_compounds(self):
"""Return the total volatile organic compounds."""
return self._total_volatile_organic_compounds
@property
def temperature(self):
"""Return the current temperature."""
return self._temperature
@property
def humidity(self):
"""Return the current humidity."""
return self._humidity
@property
def device_state_attributes(self):
"""Return the state attributes."""
data = {}
for prop, attr in PROP_TO_ATTR.items():
value = getattr(self, prop)
if value is not None:
data[attr] = value
return data
class AirMonitorS1(AirMonitorB1):
"""Air Quality class for Xiaomi cgllc.airmonitor.s1 device."""
async def async_update(self):
"""Fetch state from the miio device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._carbon_dioxide = state.co2
self._particulate_matter_2_5 = state.pm25
self._total_volatile_organic_compounds = state.tvoc
self._temperature = state.temperature
self._humidity = state.humidity
self._available = True
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
class AirMonitorV1(AirMonitorB1):
"""Air Quality class for Xiaomi cgllc.airmonitor.s1 device."""
async def async_update(self):
"""Fetch state from the miio device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._air_quality_index = state.aqi
self._available = True
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return None
|
mit
|
ElvisLouis/code
|
work/ML/tensorflow/practice/word2vec.py
|
1
|
19527
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec mini-batched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does traditional minibatching.
The key ops used are:
* placeholder for feeding in tensors for each example.
* embedding_lookup for fetching rows from the embedding matrix.
* sigmoid_cross_entropy_with_logits to calculate the loss.
* GradientDescentOptimizer for optimizing the loss.
* skipgram custom op that does input processing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model and "
"training summaries.")
flags.DEFINE_string("train_data", None, "Training text file. "
"E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "File consisting of analogies of four tokens."
"embedding 2 - embedding 1 + embedding 3 should be close "
"to embedding 4."
"See README.md for how to get 'questions-words.txt'.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.2, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 100,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 16,
"Number of training examples processed per step "
"(size of a minibatch).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy(b'france', b'paris', b'russia') and "
"model.nearby([b'proton', b'elephant', b'maxwell'])")
flags.DEFINE_integer("statistics_interval", 5,
"Print statistics every n seconds.")
flags.DEFINE_integer("summary_interval", 5,
"Save training summary to file every n seconds (rounded "
"up to statistics interval).")
flags.DEFINE_integer("checkpoint_interval", 600,
"Checkpoint the model (i.e. save the parameters) every n "
"seconds (rounded up to statistics interval).")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# How often to print statistics.
self.statistics_interval = FLAGS.statistics_interval
# How often to write to the summary file (rounds up to the nearest
# statistics_interval).
self.summary_interval = FLAGS.summary_interval
# How often to write checkpoints (rounds up to the nearest statistics
# interval).
self.checkpoint_interval = FLAGS.checkpoint_interval
# Where to write out summaries.
self.save_path = FLAGS.save_path
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
def read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def forward(self, examples, labels):
"""Build the graph for the forward pass."""
opts = self._options
# Declare all variables we need.
# Embedding: [vocab_size, emb_dim]
init_width = 0.5 / opts.emb_dim
emb = tf.Variable(
tf.random_uniform(
[opts.vocab_size, opts.emb_dim], -init_width, init_width),
name="emb")
self._emb = emb
# Softmax weight: [vocab_size, emb_dim]. Transposed.
sm_w_t = tf.Variable(
tf.zeros([opts.vocab_size, opts.emb_dim]),
name="sm_w_t")
# Softmax bias: [emb_dim].
sm_b = tf.Variable(tf.zeros([opts.vocab_size]), name="sm_b")
# Global step: scalar, i.e., shape [].
self.global_step = tf.Variable(0, name="global_step")
# Nodes to compute the nce loss w/ candidate sampling.
labels_matrix = tf.reshape(
tf.cast(labels,
dtype=tf.int64),
[opts.batch_size, 1])
# Negative sampling.
sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=opts.num_samples,
unique=True,
range_max=opts.vocab_size,
distortion=0.75,
unigrams=opts.vocab_counts.tolist()))
# Embeddings for examples: [batch_size, emb_dim]
example_emb = tf.nn.embedding_lookup(emb, examples)
# Weights for labels: [batch_size, emb_dim]
true_w = tf.nn.embedding_lookup(sm_w_t, labels)
# Biases for labels: [batch_size, 1]
true_b = tf.nn.embedding_lookup(sm_b, labels)
# Weights for sampled ids: [num_sampled, emb_dim]
sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids)
# Biases for sampled ids: [num_sampled, 1]
sampled_b = tf.nn.embedding_lookup(sm_b, sampled_ids)
# True logits: [batch_size, 1]
true_logits = tf.reduce_sum(tf.mul(example_emb, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise labels for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples])
sampled_logits = tf.matmul(example_emb,
sampled_w,
transpose_b=True) + sampled_b_vec
return true_logits, sampled_logits
def nce_loss(self, true_logits, sampled_logits):
"""Build the graph for the NCE loss."""
# cross-entropy(logits, labels)
opts = self._options
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
true_logits, tf.ones_like(true_logits))
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
sampled_logits, tf.zeros_like(sampled_logits))
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
nce_loss_tensor = (tf.reduce_sum(true_xent) +
tf.reduce_sum(sampled_xent)) / opts.batch_size
return nce_loss_tensor
def optimize(self, loss):
"""Build the graph to optimize the loss function."""
# Optimizer nodes.
# Linear learning rate decay.
opts = self._options
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.GradientDescentOptimizer(lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def build_eval_graph(self):
"""Build the eval graph."""
# Eval graph
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._emb, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, self._options.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
def build_graph(self):
"""Build the graph for the full model."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, self._epoch, self._words, examples,
labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._examples = examples
self._labels = labels
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
true_logits, sampled_logits = self.forward(examples, labels)
loss = self.nce_loss(true_logits, sampled_logits)
tf.scalar_summary("NCE loss", loss)
self._loss = loss
self.optimize(loss)
# Properly initialize all variables.
tf.initialize_all_variables().run()
self.saver = tf.train.Saver()
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
f.write("%s %d\n" % (vocab_word,
opts.vocab_counts[i]))
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(opts.save_path, self._session.graph)
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time, last_summary_time = initial_words, time.time(), 0
last_checkpoint_time = 0
while True:
time.sleep(opts.statistics_interval) # Reports our progress once a while.
(epoch, step, loss, words, lr) = self._session.run(
[self._epoch, self.global_step, self._loss, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r" %
(epoch, step, lr, loss, rate), end="")
sys.stdout.flush()
if now - last_summary_time > opts.summary_interval:
summary_str = self._session.run(summary_op)
summary_writer.add_summary(summary_str, step)
last_summary_time = now
if now - last_checkpoint_time > opts.checkpoint_interval:
self.saver.save(self._session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=step.astype(int))
last_checkpoint_time = now
if epoch != initial_epoch:
break
for t in workers:
t.join()
return epoch
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
try:
total = self._analogy_questions.shape[0]
except AttributeError as e:
raise AttributeError("Need to read analogy questions.")
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
return c
return "unknown"
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
model.read_analogies() # Read analogy questions
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
gpl-2.0
|
codexns/sublime-coverage
|
st2_windows_x64/coverage/config.py
|
16
|
12628
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Config file for coverage.py"""
import collections
import os
import re
import sys
from coverage.backward import configparser, iitems, string_class
from coverage.misc import CoverageException
class HandyConfigParser(configparser.RawConfigParser):
"""Our specialization of ConfigParser."""
def __init__(self, section_prefix):
configparser.RawConfigParser.__init__(self)
self.section_prefix = section_prefix
def read(self, filename):
"""Read a file name as UTF-8 configuration data."""
kwargs = {}
if sys.version_info >= (3, 2):
kwargs['encoding'] = "utf-8"
return configparser.RawConfigParser.read(self, filename, **kwargs)
def has_option(self, section, option):
section = self.section_prefix + section
return configparser.RawConfigParser.has_option(self, section, option)
def has_section(self, section):
section = self.section_prefix + section
return configparser.RawConfigParser.has_section(self, section)
def options(self, section):
section = self.section_prefix + section
return configparser.RawConfigParser.options(self, section)
def get_section(self, section):
"""Get the contents of a section, as a dictionary."""
d = {}
for opt in self.options(section):
d[opt] = self.get(section, opt)
return d
def get(self, section, *args, **kwargs):
"""Get a value, replacing environment variables also.
The arguments are the same as `RawConfigParser.get`, but in the found
value, ``$WORD`` or ``${WORD}`` are replaced by the value of the
environment variable ``WORD``.
Returns the finished value.
"""
section = self.section_prefix + section
v = configparser.RawConfigParser.get(self, section, *args, **kwargs)
def dollar_replace(m):
"""Called for each $replacement."""
# Only one of the groups will have matched, just get its text.
word = next(w for w in m.groups() if w is not None) # pragma: part covered
if word == "$":
return "$"
else:
return os.environ.get(word, '')
dollar_pattern = r"""(?x) # Use extended regex syntax
\$(?: # A dollar sign, then
(?P<v1>\w+) | # a plain word,
{(?P<v2>\w+)} | # or a {-wrapped word,
(?P<char>[$]) # or a dollar sign.
)
"""
v = re.sub(dollar_pattern, dollar_replace, v)
return v
def getlist(self, section, option):
"""Read a list of strings.
The value of `section` and `option` is treated as a comma- and newline-
separated list of strings. Each value is stripped of whitespace.
Returns the list of strings.
"""
value_list = self.get(section, option)
values = []
for value_line in value_list.split('\n'):
for value in value_line.split(','):
value = value.strip()
if value:
values.append(value)
return values
def getregexlist(self, section, option):
"""Read a list of full-line regexes.
The value of `section` and `option` is treated as a newline-separated
list of regexes. Each value is stripped of whitespace.
Returns the list of strings.
"""
line_list = self.get(section, option)
value_list = []
for value in line_list.splitlines():
value = value.strip()
try:
re.compile(value)
except re.error as e:
raise CoverageException(
"Invalid [%s].%s value %r: %s" % (section, option, value, e)
)
if value:
value_list.append(value)
return value_list
# The default line exclusion regexes.
DEFAULT_EXCLUDE = [
r'(?i)#\s*pragma[:\s]?\s*no\s*cover',
]
# The default partial branch regexes, to be modified by the user.
DEFAULT_PARTIAL = [
r'(?i)#\s*pragma[:\s]?\s*no\s*branch',
]
# The default partial branch regexes, based on Python semantics.
# These are any Python branching constructs that can't actually execute all
# their branches.
DEFAULT_PARTIAL_ALWAYS = [
'while (True|1|False|0):',
'if (True|1|False|0):',
]
class CoverageConfig(object):
"""Coverage.py configuration.
The attributes of this class are the various settings that control the
operation of coverage.py.
"""
def __init__(self):
"""Initialize the configuration attributes to their defaults."""
# Metadata about the config.
self.attempted_config_files = []
self.config_files = []
# Defaults for [run]
self.branch = False
self.concurrency = None
self.cover_pylib = False
self.data_file = ".coverage"
self.debug = []
self.note = None
self.parallel = False
self.plugins = []
self.source = None
self.timid = False
# Defaults for [report]
self.exclude_list = DEFAULT_EXCLUDE[:]
self.fail_under = 0
self.ignore_errors = False
self.include = None
self.omit = None
self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:]
self.partial_list = DEFAULT_PARTIAL[:]
self.precision = 0
self.show_missing = False
self.skip_covered = False
# Defaults for [html]
self.extra_css = None
self.html_dir = "htmlcov"
self.html_title = "Coverage report"
# Defaults for [xml]
self.xml_output = "coverage.xml"
self.xml_package_depth = 99
# Defaults for [paths]
self.paths = {}
# Options for plugins
self.plugin_options = {}
MUST_BE_LIST = ["omit", "include", "debug", "plugins"]
def from_args(self, **kwargs):
"""Read config values from `kwargs`."""
for k, v in iitems(kwargs):
if v is not None:
if k in self.MUST_BE_LIST and isinstance(v, string_class):
v = [v]
setattr(self, k, v)
def from_file(self, filename, section_prefix=""):
"""Read configuration from a .rc file.
`filename` is a file name to read.
Returns True or False, whether the file could be read.
"""
self.attempted_config_files.append(filename)
cp = HandyConfigParser(section_prefix)
try:
files_read = cp.read(filename)
except configparser.Error as err:
raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
if not files_read:
return False
self.config_files.extend(files_read)
try:
for option_spec in self.CONFIG_FILE_OPTIONS:
self._set_attr_from_config_option(cp, *option_spec)
except ValueError as err:
raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
# Check that there are no unrecognized options.
all_options = collections.defaultdict(set)
for option_spec in self.CONFIG_FILE_OPTIONS:
section, option = option_spec[1].split(":")
all_options[section].add(option)
for section, options in iitems(all_options):
if cp.has_section(section):
for unknown in set(cp.options(section)) - options:
if section_prefix:
section = section_prefix + section
raise CoverageException(
"Unrecognized option '[%s] %s=' in config file %s" % (
section, unknown, filename
)
)
# [paths] is special
if cp.has_section('paths'):
for option in cp.options('paths'):
self.paths[option] = cp.getlist('paths', option)
# plugins can have options
for plugin in self.plugins:
if cp.has_section(plugin):
self.plugin_options[plugin] = cp.get_section(plugin)
return True
CONFIG_FILE_OPTIONS = [
# These are *args for _set_attr_from_config_option:
# (attr, where, type_="")
#
# attr is the attribute to set on the CoverageConfig object.
# where is the section:name to read from the configuration file.
# type_ is the optional type to apply, by using .getTYPE to read the
# configuration value from the file.
# [run]
('branch', 'run:branch', 'boolean'),
('concurrency', 'run:concurrency'),
('cover_pylib', 'run:cover_pylib', 'boolean'),
('data_file', 'run:data_file'),
('debug', 'run:debug', 'list'),
('include', 'run:include', 'list'),
('note', 'run:note'),
('omit', 'run:omit', 'list'),
('parallel', 'run:parallel', 'boolean'),
('plugins', 'run:plugins', 'list'),
('source', 'run:source', 'list'),
('timid', 'run:timid', 'boolean'),
# [report]
('exclude_list', 'report:exclude_lines', 'regexlist'),
('fail_under', 'report:fail_under', 'int'),
('ignore_errors', 'report:ignore_errors', 'boolean'),
('include', 'report:include', 'list'),
('omit', 'report:omit', 'list'),
('partial_always_list', 'report:partial_branches_always', 'regexlist'),
('partial_list', 'report:partial_branches', 'regexlist'),
('precision', 'report:precision', 'int'),
('show_missing', 'report:show_missing', 'boolean'),
('skip_covered', 'report:skip_covered', 'boolean'),
# [html]
('extra_css', 'html:extra_css'),
('html_dir', 'html:directory'),
('html_title', 'html:title'),
# [xml]
('xml_output', 'xml:output'),
('xml_package_depth', 'xml:package_depth', 'int'),
]
def _set_attr_from_config_option(self, cp, attr, where, type_=''):
"""Set an attribute on self if it exists in the ConfigParser."""
section, option = where.split(":")
if cp.has_option(section, option):
method = getattr(cp, 'get' + type_)
setattr(self, attr, method(section, option))
def get_plugin_options(self, plugin):
"""Get a dictionary of options for the plugin named `plugin`."""
return self.plugin_options.get(plugin, {})
def set_option(self, option_name, value):
"""Set an option in the configuration.
`option_name` is a colon-separated string indicating the section and
option name. For example, the ``branch`` option in the ``[run]``
section of the config file would be indicated with `"run:branch"`.
`value` is the new value for the option.
"""
# Check all the hard-coded options.
for option_spec in self.CONFIG_FILE_OPTIONS:
attr, where = option_spec[:2]
if where == option_name:
setattr(self, attr, value)
return
# See if it's a plugin option.
plugin_name, _, key = option_name.partition(":")
if key and plugin_name in self.plugins:
self.plugin_options.setdefault(plugin_name, {})[key] = value
return
# If we get here, we didn't find the option.
raise CoverageException("No such option: %r" % option_name)
def get_option(self, option_name):
"""Get an option from the configuration.
`option_name` is a colon-separated string indicating the section and
option name. For example, the ``branch`` option in the ``[run]``
section of the config file would be indicated with `"run:branch"`.
Returns the value of the option.
"""
# Check all the hard-coded options.
for option_spec in self.CONFIG_FILE_OPTIONS:
attr, where = option_spec[:2]
if where == option_name:
return getattr(self, attr)
# See if it's a plugin option.
plugin_name, _, key = option_name.partition(":")
if key and plugin_name in self.plugins:
return self.plugin_options.get(plugin_name, {}).get(key)
# If we get here, we didn't find the option.
raise CoverageException("No such option: %r" % option_name)
|
apache-2.0
|
yglazko/socorro
|
webapp-django/crashstats/auth/management/commands/makesuperuser.py
|
11
|
1163
|
import sys
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
def get_input(text):
return raw_input(text).strip()
class Command(BaseCommand):
help = 'emailaddress [, otheremailaddress, ...]'
def handle(self, *args, **options):
if not args:
emails = [get_input('Email address: ').strip()]
else:
emails = args[0].split()
if not [x for x in emails if x.strip()]:
raise CommandError('Must supply at least one email address')
for email in emails:
try:
user = User.objects.get(email__iexact=email)
except User.DoesNotExist:
print >> sys.stderr, "No user with that email %s" % (email,)
break
if user.is_superuser:
print >> sys.stdout, (
'%s was already a superuser' % (user.email,)
)
else:
user.is_superuser = True
user.save()
print >> sys.stdout, (
'%s is now a superuser' % (user.email,)
)
|
mpl-2.0
|
basepi/hubble
|
hubblestack/files/hubblestack_nova/pkgng_audit.py
|
2
|
1111
|
# -*- encoding: utf-8 -*-
'''
Hubble Nova plugin for FreeBSD pkgng audit
'''
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
def __virtual__():
if 'FreeBSD' not in __grains__['os']:
return False, 'This audit module only runs on FreeBSD'
return True
def audit(data_list, tags, labels, debug=False, **kwargs):
'''
Run the pkg.audit command
'''
ret = {'Success': [], 'Failure': []}
__tags__ = []
for profile, data in data_list:
if 'pkgng_audit' in data:
__tags__ = ['pkgng_audit']
break
if debug:
log.debug('pkgng audit __tags__:')
log.debug(__tags__)
if not __tags__:
# No yaml data found, don't do any work
return ret
salt_ret = __salt__['pkg.audit']()
results = {'pkgng_audit': {'result': salt_ret}}
results['pkng_audit']['nova_profile'] = profile
if not verbose:
results = salt_ret
if '0 problem(s)' not in salt_ret:
ret['Failure'].append(results)
else:
ret['Success'].append(results)
return ret
|
apache-2.0
|
skyddv/neutron
|
neutron/objects/qos/rule.py
|
11
|
2193
|
# Copyright 2015 Huawei Technologies India Pvt Ltd, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import sys
from oslo_versionedobjects import base as obj_base
from oslo_versionedobjects import fields as obj_fields
import six
from neutron.common import utils
from neutron.db import api as db_api
from neutron.db.qos import models as qos_db_model
from neutron.objects import base
from neutron.services.qos import qos_consts
def get_rules(context, qos_policy_id):
all_rules = []
with db_api.autonested_transaction(context.session):
for rule_type in qos_consts.VALID_RULE_TYPES:
rule_cls_name = 'Qos%sRule' % utils.camelize(rule_type)
rule_cls = getattr(sys.modules[__name__], rule_cls_name)
rules = rule_cls.get_objects(context, qos_policy_id=qos_policy_id)
all_rules.extend(rules)
return all_rules
@six.add_metaclass(abc.ABCMeta)
class QosRule(base.NeutronDbObject):
fields = {
'id': obj_fields.UUIDField(),
'qos_policy_id': obj_fields.UUIDField()
}
fields_no_update = ['id', 'qos_policy_id']
# should be redefined in subclasses
rule_type = None
def to_dict(self):
dict_ = super(QosRule, self).to_dict()
dict_['type'] = self.rule_type
return dict_
@obj_base.VersionedObjectRegistry.register
class QosBandwidthLimitRule(QosRule):
db_model = qos_db_model.QosBandwidthLimitRule
fields = {
'max_kbps': obj_fields.IntegerField(nullable=True),
'max_burst_kbps': obj_fields.IntegerField(nullable=True)
}
rule_type = qos_consts.RULE_TYPE_BANDWIDTH_LIMIT
|
apache-2.0
|
eXistenZNL/SickRage
|
lib/guessit/date.py
|
33
|
4436
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
import re
from dateutil import parser
_dsep = r'[-/ \.]'
_dsep_bis = r'[-/ \.x]'
date_regexps = [
re.compile('%s(\d{8})%s' % (_dsep, _dsep), re.IGNORECASE),
re.compile('%s(\d{6})%s' % (_dsep, _dsep), re.IGNORECASE),
re.compile('[^\d](\d{2})%s(\d{1,2})%s(\d{1,2})[^\d]' % (_dsep, _dsep), re.IGNORECASE),
re.compile('[^\d](\d{1,2})%s(\d{1,2})%s(\d{2})[^\d]' % (_dsep, _dsep), re.IGNORECASE),
re.compile('[^\d](\d{4})%s(\d{1,2})%s(\d{1,2})[^\d]' % (_dsep_bis, _dsep), re.IGNORECASE),
re.compile('[^\d](\d{1,2})%s(\d{1,2})%s(\d{4})[^\d]' % (_dsep, _dsep_bis), re.IGNORECASE),
re.compile('[^\d](\d{1,2}(?:st|nd|rd|th)?%s(?:[a-z]{3,10})%s\d{4})[^\d]' % (_dsep, _dsep), re.IGNORECASE)]
def valid_year(year, today=None):
"""Check if number is a valid year"""
if not today:
today = datetime.date.today()
return 1920 < year < today.year + 5
def search_year(string):
"""Looks for year patterns, and if found return the year and group span.
Assumes there are sentinels at the beginning and end of the string that
always allow matching a non-digit delimiting the date.
Note this only looks for valid production years, that is between 1920
and now + 5 years, so for instance 2000 would be returned as a valid
year but 1492 would not.
>>> search_year(' in the year 2000... ')
(2000, (13, 17))
>>> search_year(' they arrived in 1492. ')
(None, None)
"""
match = re.search(r'[^0-9]([0-9]{4})[^0-9]', string)
if match:
year = int(match.group(1))
if valid_year(year):
return year, match.span(1)
return None, None
def search_date(string, year_first=None, day_first=True):
"""Looks for date patterns, and if found return the date and group span.
Assumes there are sentinels at the beginning and end of the string that
always allow matching a non-digit delimiting the date.
Year can be defined on two digit only. It will return the nearest possible
date from today.
>>> search_date(' This happened on 2002-04-22. ')
(datetime.date(2002, 4, 22), (18, 28))
>>> search_date(' And this on 17-06-1998. ')
(datetime.date(1998, 6, 17), (13, 23))
>>> search_date(' no date in here ')
(None, None)
"""
start, end = None, None
match = None
for date_re in date_regexps:
s = date_re.search(string)
if s and (match is None or s.end() - s.start() > len(match)):
start, end = s.start(), s.end()
if date_re.groups:
match = '-'.join(s.groups())
else:
match = s.group()
if match is None:
return None, None
today = datetime.date.today()
# If day_first/year_first is undefined, parse is made using both possible values.
yearfirst_opts = [False, True]
if year_first is not None:
yearfirst_opts = [year_first]
dayfirst_opts = [True, False]
if day_first is not None:
dayfirst_opts = [day_first]
kwargs_list = ({'dayfirst': d, 'yearfirst': y} for d in dayfirst_opts for y in yearfirst_opts)
for kwargs in kwargs_list:
try:
date = parser.parse(match, **kwargs)
except (ValueError, TypeError) as e: #see https://bugs.launchpad.net/dateutil/+bug/1247643
date = None
pass
# check date plausibility
if date and valid_year(date.year, today=today):
return date.date(), (start+1, end-1) #compensate for sentinels
return None, None
|
gpl-3.0
|
almeidapaulopt/frappe
|
frappe/utils/verified_command.py
|
9
|
2216
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import hmac
from six.moves.urllib.parse import urlencode
from frappe import _
import frappe
import frappe.utils
from six import string_types
def get_signed_params(params):
"""Sign a url by appending `&_signature=xxxxx` to given params (string or dict).
:param params: String or dict of parameters."""
if not isinstance(params, string_types):
params = urlencode(params)
signature = hmac.new(params.encode())
signature.update(get_secret().encode())
return params + "&_signature=" + signature.hexdigest()
def get_secret():
return frappe.local.conf.get("secret") or str(frappe.db.get_value("User", "Administrator", "creation"))
def verify_request():
"""Verify if the incoming signed request if it is correct."""
query_string = frappe.local.flags.signed_query_string or \
getattr(frappe.request, 'query_string', None) \
valid = False
if '&_signature=' in query_string:
params, signature = query_string.split("&_signature=")
given_signature = hmac.new(params.encode("utf-8"))
given_signature.update(get_secret().encode())
valid = signature == given_signature.hexdigest()
if not valid:
frappe.respond_as_web_page(_("Invalid Link"),
_("This link is invalid or expired. Please make sure you have pasted correctly."))
return valid
def get_url(cmd, params, nonce=None, secret=None):
if not nonce:
nonce = params
signature = get_signature(params, nonce, secret)
params['signature'] = signature
return frappe.utils.get_url("".join(['api/method/', cmd, '?', urlencode(params)]))
def get_signature(params, nonce, secret=None):
params = "".join((frappe.utils.cstr(p) for p in params.values()))
if not secret:
secret = frappe.local.conf.get("secret") or "secret"
signature = hmac.new(str(nonce))
signature.update(secret)
signature.update(params)
return signature.hexdigest()
def verify_using_doc(doc, signature, cmd):
params = doc.get_signature_params()
return signature == get_signature(params, doc.get_nonce())
def get_url_using_doc(doc, cmd):
params = doc.get_signature_params()
return get_url(cmd, params, doc.get_nonce())
|
mit
|
mayavanand/RMMAFinalProject
|
azimuth/model_comparison.py
|
1
|
31399
|
import predict as pd
import copy
import os
import numpy as np
import util
import shutil
import pickle
import pylab as plt
import pandas
import local_multiprocessing
import load_data
import features.featurization as feat
def check_feature_set_dims(feature_sets):
F2 = None
for set in feature_sets.keys():
F = feature_sets[set].shape[0]
if F2 is None: F = F2
assert F == F2, "not same # individuals for feature %s" % set
assert feature_sets !={}, "features are empty, check learn_options"
def set_target(learn_options, classification):
assert 'target_name' not in learn_options.keys() or learn_options['target_name'] is not None, "changed it to be automatically set here"
if not classification:
learn_options["target_name"] = learn_options['rank-transformed target name']
learn_options["training_metric"] = 'spearmanr'
learn_options['ground_truth_label'] = learn_options['target_name']
else:
learn_options["target_name"] = learn_options['binary target name']
learn_options["training_metric"] = 'AUC'
learn_options['ground_truth_label'] = learn_options['binary target name']
if learn_options["V"]==3:
assert learn_options['target_name']=='score_drug_gene_rank' or learn_options['target_name']=='score_drug_gene_threshold', "cannot use raw scores when mergind data"
assert learn_options["ground_truth_label"]=='score_drug_gene_rank' or learn_options["ground_truth_label"]=='score_drug_gene_threshold', "cannot use raw scores when mergind data"
return learn_options
def GP_setup(learn_options, likelihood='gaussian', degree=3, set_target_fn=set_target):
learn_options["method"] = "GPy"
learn_options['kernel degree'] = degree
if likelihood == 'warped':
learn_options['warpedGP'] = True
else:
learn_options['warpedGP'] = False
learn_options = set_target_fn(learn_options, classification=False)
return learn_options
def SVC_setup(learn_options, likelihood='gaussian', degree=3, set_target_fn=set_target):
learn_options["method"] = "SVC"
learn_options = set_target_fn(learn_options, classification=True)
return learn_options
def L1_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=False)
learn_options["method"] = "linreg"
learn_options["penalty"] = "L1"
learn_options["feature_select"] = False
if "alpha" not in learn_options.keys():
learn_options["alpha"] = np.array([1e-6*pow(1.3,x) for x in range(0,100)])
learn_options["loss"] = "squared"
return learn_options
def L2_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=False)
learn_options["method"] = "linreg"
learn_options["penalty"] = "L2"
learn_options["feature_select"] = False
if "alpha" not in learn_options.keys():
learn_options["alpha"] = np.array([1e-6*pow(1.3,x) for x in range(0,100)])
learn_options["loss"] = "squared"
return learn_options
def mean_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=False)
learn_options['method'] = 'mean'
return learn_options
def random_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=False)
learn_options['method'] = 'random'
return learn_options
def elasticnet_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=False)
learn_options["method"] = "linreg"
learn_options["penalty"] = "EN"
learn_options["feature_select"] = False
learn_options["loss"] = "squared"
if "alpha" not in learn_options.keys():
learn_options["alpha"] = np.array([1e-5*pow(2,x) for x in range(0,30)])
return learn_options
def DNN_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=False)
learn_options['method'] = 'DNN'
learn_options['DNN target variable'] = 'score'#'score_drug_gene_quantized'
# learn_options['DNN architecture'] = (119, 10, 10, 10, 2)
return learn_options
def RF_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=False)
learn_options['method'] = 'RandomForestRegressor'
return learn_options
def doench_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=True)
learn_options['method'] = 'doench'
return learn_options
def sgrna_from_doench_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=False)
learn_options['method'] = 'sgrna_from_doench'
return learn_options
def linreg_setup(learn_options, set_target_fn=set_target):
learn_options["method"] = "linreg"
learn_options["penalty"] = "L1"
learn_options["feature_select"] = False
if "alpha" not in learn_options.keys():
learn_options["alpha"] = np.array([0.0])
learn_options["loss"] = "squared"
learn_options = set_target_fn(learn_options, classification=False)
return learn_options
def logregL1_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=True)
learn_options["method"] = "logregL1"
learn_options["penalty"] = "L1"
learn_options["feature_select"] = False
if "alpha" not in learn_options.keys():
learn_options["alpha"] = np.array([1e-6*pow(1.3,x) for x in range(0,100)])
return learn_options
def LASSOs_ensemble_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=False)
learn_options["method"] = "lasso_ensemble"
learn_options["penalty"] = "L1"
learn_options["feature_select"] = False
if "alpha" not in learn_options.keys():
learn_options["alpha"] = np.array([1e-6*pow(1.3,x) for x in range(0,100)])
learn_options["loss"] = "squared"
return learn_options
def xu_et_al_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=True)
learn_options["method"] = "xu_et_al"
return learn_options
def adaboost_setup(learn_options, num_estimators=100, max_depth=3, learning_rate=0.1, set_target_fn=set_target, model="AdaBoost"):
"""
"""
learn_options = set_target_fn(learn_options, classification=False)
if model=="AdaBoost":
learn_options['method'] = "AdaBoostRegressor"
elif model=="AdaBoostClassifier":
learn_options['method'] = "AdaBoostClassifier"
else:
raise Exception("model must be either AdaBoost or AdaBoost Classifier")
learn_options['adaboost_version'] = 'python' # "R" or "python"
if 'adaboost_loss' not in learn_options.keys() and model=="AdaBoostRegressor":
learn_options['adaboost_loss'] = 'ls' # alternatives: "lad", "huber", "quantile", see scikit docs for details
if 'adaboost_alpha' not in learn_options.keys():
learn_options['adaboost_alpha'] = 0.5 # this parameter is only used by the huber and quantile loss functions.
if not learn_options['adaboost_CV']:
learn_options['adaboost_learning_rate'] = learning_rate
learn_options['adaboost_n_estimators'] = num_estimators
learn_options['adaboost_max_depth'] = max_depth
else:
learn_options['adaboost_n_estimators'] = num_estimators
return learn_options
def shared_setup(learn_options, order, test):
if 'num_proc' not in learn_options.keys():
learn_options['num_proc'] = None
if 'num_thread_per_proc' not in learn_options.keys():
learn_options['num_thread_per_proc'] = None
num_proc = local_multiprocessing.configure(TEST=test, num_proc=learn_options["num_proc"],
num_thread_per_proc=learn_options["num_thread_per_proc"])
learn_options["num_proc"] = num_proc
learn_options["order"] = order # gets used many places in code, not just here
if "cv" not in learn_options.keys():
# if no CV preference is specified, use leave-one-gene-out
learn_options["cv"] = "gene"
if "normalize_features" not in learn_options.keys():
# if no CV preference is specified, use leave-one-gene-out
learn_options["normalize_features"] = True
if "weighted" not in learn_options.keys():
learn_options['weighted'] = None
if "all pairs" not in learn_options.keys():
learn_options["all pairs"] = False
if "include_known_pairs" not in learn_options.keys():
learn_options["include_known_pairs"] = False
if "include_gene_guide_feature" not in learn_options.keys():
learn_options["include_gene_guide_feature"] = 0 #used as window size, so 0 is none
#these should default to true to match experiments before they were options:
if "gc_features" not in learn_options.keys():
learn_options["gc_features"] = True
if "nuc_features" not in learn_options.keys():
learn_options["nuc_features"] = True
if 'train_genes' not in learn_options.keys():
learn_options["train_genes"] = None
if 'test_genes' not in learn_options.keys():
learn_options["test_genes"] = None
if "num_proc" not in learn_options:
learn_options["num_proc"] = None
if "num_thread_per_proc" not in learn_options:
learn_options["num_thread_per_proc"] = None
if 'seed' not in learn_options:
learn_options['seed'] = 1
if "flipV1target" not in learn_options:
learn_options["flipV1target"] = False
if 'num_genes_remove_train' not in learn_options:
learn_options['num_genes_remove_train'] = None
if "include_microhomology" not in learn_options:
learn_options["include_microhomology"] = False
if "algorithm_hyperparam_search" not in learn_options:
learn_options["algorithm_hyperparam_search"] = "grid" # other options is bo for bayesian optimization
return num_proc
def setup(test=False, order=1, learn_options=None, data_file=None, pam_audit=True, length_audit=True):
num_proc = shared_setup(learn_options, order, test)
assert "testing_non_binary_target_name" in learn_options.keys(), "need this in order to get metrics, though used to be not needed, so you may newly see this error"
if learn_options["testing_non_binary_target_name"] not in ['ranks', 'raw', 'thrs']:
raise Exception('learn_otions["testing_non_binary_target_name"] must be in ["ranks", "raw", "thrs"]')
Xdf, Y, gene_position, target_genes = load_data.from_file(data_file, learn_options)
learn_options['all_genes'] = target_genes
if test:
learn_options["order"] = 1
if 'convert_30mer_to_31mer' in learn_options and learn_options['convert_30mer_to_31mer'] is True:
print "WARNING!!! converting 30 mer to 31 mer (and then cutting off first nucleotide to go back to 30mer with a right shift)"
for i in range(Xdf.shape[0]):
Xdf['30mer'].iloc[i] = util.convert_to_thirty_one(Xdf.iloc[i]["30mer"], Xdf.index.values[i][1], Xdf.iloc[i]["Strand"])
# to_keep = Xdf['30mer'].isnull() == False
# Xdf = Xdf[to_keep]
# gene_position = gene_position[to_keep]
# Y = Y[to_keep]
Xdf["30mer"] = Xdf["30mer"].apply(lambda x: x[1:]) # chop the first nucleotide
if learn_options.has_key('left_right_guide_ind') and learn_options['left_right_guide_ind'] is not None:
seq_start, seq_end, expected_length = learn_options['left_right_guide_ind']
Xdf['30mer'] = Xdf['30mer'].apply(lambda seq: seq[seq_start:seq_end])
feature_sets = feat.featurize_data(Xdf, learn_options, Y, gene_position, pam_audit=pam_audit, length_audit=length_audit)
np.random.seed(learn_options['seed'])
return Y, feature_sets, target_genes, learn_options, num_proc
def run_models(models, orders, GP_likelihoods=['gaussian', 'warped'], WD_kernel_degrees=[3],
adaboost_learning_rates=[0.1], adaboost_num_estimators=[100], adaboost_max_depths=[3],
learn_options_set=None, test=False, CV=True, setup_function=setup, set_target_fn=set_target, pam_audit=True, length_audit=True):
'''
CV is set to false if want to train a final model and not cross-validate, but it goes in to what
looks like cv code
'''
results = {}
assert learn_options_set is not None, "need to specify learn_options_set"
all_learn_options = {}
#shorten so easier to display on graphs
feat_models_short = {'L1':"L1", 'L2':"L2", 'elasticnet':"EN", 'linreg':"LR",
'RandomForest': "RF",
'AdaBoost':"AB", 'AdaBoostClassifier':"ABClass", 'doench': 'doench',
"logregL1": "logregL1", "sgrna_from_doench":"sgrna_from_doench", 'SVC': 'SVC', 'xu_et_al': 'xu_et_al'}
if not CV:
print "Received option CV=False, so I'm training using all of the data"
assert len(learn_options_set.keys()) == 1, "when CV is False, only 1 set of learn options is allowed"
assert len(models) == 1, "when CV is False, only 1 model is allowed"
for learn_options_str in learn_options_set.keys():
# these options get augmented in setup
partial_learn_opt = learn_options_set[learn_options_str]
# if the model requires encoded features
for model in models:
# models requiring explicit featurization
if model in feat_models_short.keys():
for order in orders:
print "running %s, order %d for %s" % (model, order, learn_options_str)
Y, feature_sets, target_genes, learn_options, num_proc = setup_function(test=test, order=order, learn_options=partial_learn_opt, pam_audit=pam_audit, length_audit=length_audit) # TODO precompute features for all orders, as this is repated for each model
if model == 'L1':
learn_options_model = L1_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == 'L2':
learn_options_model = L2_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == 'elasticnet':
learn_options_model = elasticnet_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == 'linreg':
learn_options_model = linreg_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == "logregL1":
learn_options_model = logregL1_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == 'RandomForest':
learn_options_model = RF_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == 'SVC':
learn_options_model = SVC_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == 'doench':
learn_options_model = doench_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == 'sgrna_from_doench':
learn_options_model = sgrna_from_doench_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == 'xu_et_al':
learn_options_model = xu_et_al_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == 'AdaBoost' or 'AdaBoostClassifier':
for learning_rate in adaboost_learning_rates:
for num_estimators in adaboost_num_estimators:
for max_depth in adaboost_max_depths:
learn_options_model = adaboost_setup(copy.deepcopy(learn_options), learning_rate=learning_rate, num_estimators=num_estimators, max_depth=max_depth, set_target_fn=set_target_fn, model=model)
model_string = feat_models_short[model] + '_or%d_md%d_lr%.2f_n%d_%s' % (learn_options_set[learn_options_str]["order"], max_depth, learning_rate, num_estimators, learn_options_str)
if model != 'AdaBoost':
model_string = feat_models_short[model] + '_ord%d_%s' % (learn_options_set[learn_options_str]["order"], learn_options_str)
results[model_string] = pd.cross_validate(Y, feature_sets, learn_options=learn_options_model, TEST=test, CV=CV)
all_learn_options[model_string] = learn_options_model
# if the model doesn't require explicit featurization
else:
assert setup_fn==setup, "not yet modified to handle this"
print "running %s for %s" % (model, learn_options_str)
Y, feature_sets, target_genes, learn_options, num_proc = setup(test=test, order=1, learn_options=partial_learn_opt, pam_audit=pam_audit, length_audit=length_audit)
if model == 'mean':
learn_options_model = mean_setup(copy.deepcopy(learn_options))
elif model == 'random':
learn_options_model = random_setup(copy.deepcopy(learn_options))
elif model == 'DNN':
learn_options_model = DNN_setup(copy.deepcopy(learn_options))
elif model == 'GP':
for likelihood in GP_likelihoods:
for degree in WD_kernel_degrees:
learn_options_model = GP_setup(copy.deepcopy(learn_options), likelihood=likelihood, degree=degree)
model_string = '%s_%s_degree%d_%s' % (model, likelihood, degree, learn_options_str)
results[model_string] = pd.cross_validate(Y, feature_sets, learn_options=learn_options_model,TEST=test, CV=CV)
else:
raise NotImplementedError("model %s not supported" % model)
# "GP" already calls pd.cross_validate() and has its own model_string, so skip this.
if model != "GP":
model_string = model + '_%s' % learn_options_str
results[model_string] = pd.cross_validate(Y, feature_sets, learn_options=learn_options_model, TEST=test, CV=CV)
all_learn_options[model_string] = learn_options_model
return results, all_learn_options
def pickle_runner_results(exp_name, results, all_learn_options, relpath="/../" + "results"):
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath) + relpath
if not os.path.exists(dname):
os.makedirs(dname)
print "Created directory: %s" % str(dname)
if exp_name is None:
exp_name = results.keys()[0]
myfile = dname+'/'+ exp_name + '.pickle'
with open(myfile, 'wb') as f:
print "writing results to %s" % myfile
pickle.dump((results, all_learn_options), f, -1)
def runner(models, learn_options, GP_likelihoods=None, orders=None, WD_kernel_degrees=None, where='local', cluster_user='fusi', cluster='RR1-N13-09-H44', test=False, exp_name = None, **kwargs):
if where == 'local':
results, all_learn_options = run_models(models, orders=orders, GP_likelihoods=GP_likelihoods, learn_options_set=learn_options, WD_kernel_degrees=WD_kernel_degrees, test=test, **kwargs)
all_metrics, gene_names = util.get_all_metrics(results, learn_options)
util.plot_all_metrics(all_metrics, gene_names, all_learn_options, save=True)
# for non-local (i.e. cluster), the comparable code is in cli_run_model.py
pickle_runner_results(exp_name, results, all_learn_options)
return results, all_learn_options, all_metrics, gene_names
elif where == 'cluster':
import cluster_job
# create random cluster directory, dump learn options, and create cluster file
tempdir, user, clust_filename = cluster_job.create(cluster_user, models, orders, WD_kernel_degrees, GP_likelihoods, exp_name=exp_name, learn_options=learn_options, **kwargs)
# raw_input("Submit job to HPC and press any key when it's finished: ")
# util.plot_cluster_results(directory=tempdir)
#stdout = tempdir + r"/stdout"
#stderr = tempdir + r"/stderr"
#if not os.path.exists(stdout): os.makedirs(stdout)
#if not os.path.exists(stderr): os.makedirs(stderr)
return tempdir, clust_filename, user#, stdout, stderr
def save_final_model_V3(filename=None, include_position=True, learn_options=None, short_name='final', pam_audit=True, length_audit=True):
'''
run_models(produce_final_model=True) is what saves the model
'''
test = False
assert filename is not None, "need to provide filename to save final model"
if learn_options is None:
if include_position:
learn_options = {"V": 3,
'train_genes': load_data.get_V3_genes(),
'test_genes': load_data.get_V3_genes(),
"testing_non_binary_target_name": 'ranks',
'include_pi_nuc_feat': True,
"gc_features": True,
"pam_features": True,
"repeat_features": None,
"nuc_features": True,
"include_gene_position": True,
"include_NGGX_interaction": True,
"include_NGGXX_interaction": None,
"include_Tm": True,
"include_strand": False,
"include_gene_feature": False,
"include_gene_guide_feature": 0,
"extra pairs": False,
"weighted": None,
"training_metric": 'spearmanr',
"NDGC_k": 10,
"cv": "gene",
"include_gene_effect": False,
"include_drug": False,
"include_sgRNAscore": False,
'adaboost_loss' : 'ls', # main "ls", alternatives: "lad", "huber", "quantile", see scikit docs for details
'adaboost_alpha': 0.5, # this parameter is only used by the huber and quantile loss functions.
'normalize_features': False,
'adaboost_CV' : False
}
else:
learn_options = {"V": 3,
'train_genes': load_data.get_V3_genes(),
'test_genes': load_data.get_V3_genes(),
"testing_non_binary_target_name": 'ranks',
'include_pi_nuc_feat': True,
"gc_features": True,
"pam_features": True,
"repeat_features": None,
"nuc_features": True,
"include_gene_position": False,
"include_NGGX_interaction": True,
"include_NGGXX_interaction": None,
"include_Tm": True,
"include_strand": False,
"include_gene_feature": False,
"include_gene_guide_feature": 0,
"extra pairs": False,
"weighted": None,
"training_metric": 'spearmanr',
"NDGC_k": 10,
"cv": "gene",
"include_gene_effect": False,
"include_drug": False,
"include_sgRNAscore": False,
'adaboost_loss' : 'ls', # main "ls", alternatives: "lad", "huber", "quantile", see scikit docs for details
'adaboost_alpha': 0.5, # this parameter is only used by the huber and quantile loss functions.
'normalize_features': False,
'adaboost_CV' : False
}
learn_options_set = {short_name: learn_options}
results, all_learn_options = run_models(["AdaBoost"], orders=[2], adaboost_learning_rates=[0.1],
adaboost_max_depths=[3], adaboost_num_estimators=[100],
learn_options_set=learn_options_set,
test=test, CV=False, pam_audit=length_audit, length_audit=length_audit)
model = results.values()[0][3][0]
with open(filename, 'wb') as f:
pickle.dump((model, learn_options), f, -1)
return model
def predict(seq, aa_cut=-1, percent_peptide=-1, model=None, model_file=None, pam_audit=True, length_audit=False, learn_options_override=None):
"""
if pam_audit==False, then it will not check for GG in the expected position
this is useful if predicting on PAM mismatches, such as with off-target
"""
print "predict function running"
# assert not (model is None and model_file is None), "you have to specify either a model or a model_file"
assert isinstance(seq, (np.ndarray)), "Please ensure seq is a numpy array"
assert len(seq[0]) > 0, "Make sure that seq is not empty"
assert isinstance(seq[0], str), "Please ensure input sequences are in string format, i.e. 'AGAG' rather than ['A' 'G' 'A' 'G'] or alternate representations"
if aa_cut is not None:
assert len(aa_cut) > 0, "Make sure that aa_cut is not empty"
assert isinstance(aa_cut, (np.ndarray)), "Please ensure aa_cut is a numpy array"
assert np.all(np.isreal(aa_cut)), "amino-acid cut position needs to be a real number"
if percent_peptide is not None:
assert len(percent_peptide) > 0, "Make sure that percent_peptide is not empty"
assert isinstance(percent_peptide, (np.ndarray)), "Please ensure percent_peptide is a numpy array"
assert np.all(np.isreal(percent_peptide)), "percent_peptide needs to be a real number"
if model_file is None:
azimuth_saved_model_dir = os.path.join(os.path.dirname(__file__), 'saved_models')
if np.any(percent_peptide == -1) or (percent_peptide is None and aa_cut is None):
print("No model file specified, using V3_model_nopos")
model_name = 'V3_model_nopos.pickle'
else:
print("No model file specified, using V3_model_full")
model_name = 'V3_model_full.pickle'
model_file = os.path.join(azimuth_saved_model_dir, model_name)
if model is None:
with open(model_file, 'rb') as f:
model, learn_options = pickle.load(f)
print model_file
print learn_options
else:
model, learn_options = model
learn_options["V"] = 2
learn_options = override_learn_options(learn_options_override, learn_options)
# Y, feature_sets, target_genes, learn_options, num_proc = setup(test=False, order=2, learn_options=learn_options, data_file=test_filename)
# inputs, dim, dimsum, feature_names = pd.concatenate_feature_sets(feature_sets)
Xdf = pandas.DataFrame(columns=[u'30mer', u'Strand'], data=zip(seq, ['NA' for x in range(len(seq))]))
if np.all(percent_peptide != -1) and (percent_peptide is not None and aa_cut is not None):
gene_position = pandas.DataFrame(columns=[u'Percent Peptide', u'Amino Acid Cut position'], data=zip(percent_peptide, aa_cut))
else:
gene_position = pandas.DataFrame(columns=[u'Percent Peptide', u'Amino Acid Cut position'], data=zip(np.ones(seq.shape[0])*-1, np.ones(seq.shape[0])*-1))
feature_sets = feat.featurize_data(Xdf, learn_options, pandas.DataFrame(), gene_position, pam_audit=pam_audit, length_audit=length_audit)
inputs, dim, dimsum, feature_names = util.concatenate_feature_sets(feature_sets)
# call to scikit-learn, returns a vector of predicted values
preds = model.predict(inputs)
# also check that predictions are not 0/1 from a classifier.predict() (instead of predict_proba() or decision_function())
unique_preds = np.unique(preds)
ok = False
for pr in preds:
if pr not in [0,1]:
ok = True
assert ok, "model returned only 0s and 1s"
return preds
def override_learn_options(learn_options_override, learn_options):
"""
override all keys seen in learn_options_override to alter learn_options
"""
if learn_options_override is not None:
for k in learn_options_override.keys():
learn_options[k] = learn_options_override[k]
return learn_options
def fill_learn_options(learn_options_fill, learn_options):
"""
only fill in keys that are missing form learn_options from learn_options_fill
"""
if learn_options_fill is not None:
for k in learn_options_fill.keys():
if not learn_options.has_key(k):
learn_options[k] = learn_options_fill[k]
return learn_options
def write_results(predictions, file_to_predict):
newfile = file_to_predict.replace(".csv", ".pred.csv")
data = pandas.read_csv(file_to_predict)
data['predictions'] = predictions
data.to_csv(newfile)
print "wrote results to %s" % newfile
return data, newfile
if __name__ == '__main__':
#save_final_model_V3(filename='azimuth/azure_models/V3_model_full.pickle', include_position=True)
save_final_model_V3(filename='saved_models/model_8_nopos.pickle', include_position=False)
save_final_model_V3(filename='saved_models/model_8.pickle', include_position=True)
# predict('GGGCCGCTGTTGCAGGTGGCGGGTAGGATC', 'sense', 1200, 30.3, model_file='../saved_models/final_model_nicolo.pickle')
learn_options = {"V": 3,
"train_genes": load_data.get_V3_genes(),
"test_genes": load_data.get_V3_genes(),
"target_name": 'score_drug_gene_rank',
"testing_non_binary_target_name": 'ranks',
'include_pi_nuc_feat': True,
"gc_features": True,
"pam_features": True,
"repeat_features": True,
"nuc_features": True,
"include_gene_position": True,
"include_NGGX_interaction": None,
"include_NGGXX_interaction": True,
"include_Tm": True,
"include_strand": False,
"include_gene_feature": False,
"include_gene_guide_feature": 0,
"extra pairs": False,
"weighted": None,
"training_metric": 'spearmanr',
"NDGC_k": 10,
"cv": "gene",
"adaboost_loss" : 'ls',
"include_gene_effect": False,
"include_drug": False,
"include_sgRNAscore": False,
'adaboost_loss' : 'ls', # main "ls", alternatives: "lad", "huber", "quantile", see scikit docs for details
'adaboost_alpha': 0.5, # this parameter is only used by the huber and quantile loss functions.
'adaboost_CV' : False
}
learn_options_set = {"post bug fix":learn_options}
#runner(['AdaBoost'], learn_options_set, orders=[2], where='local', adaboost_learning_rates=[0.1], adaboost_max_depths=[3], adaboost_num_estimators=[100], exp_name='post-index-fix')
# #util.feature_importances(results)
|
bsd-3-clause
|
LiveZenLK/CeygateERP
|
addons/account/wizard/account_financial_report.py
|
43
|
2809
|
# -*- coding: utf-8 -*-
from openerp import api, fields, models
class AccountingReport(models.TransientModel):
_name = "accounting.report"
_inherit = "account.common.report"
_description = "Accounting Report"
@api.model
def _get_account_report(self):
reports = []
if self._context.get('active_id'):
menu = self.env['ir.ui.menu'].browse(self._context.get('active_id')).name
reports = self.env['account.financial.report'].search([('name', 'ilike', menu)])
return reports and reports[0] or False
enable_filter = fields.Boolean(string='Enable Comparison')
account_report_id = fields.Many2one('account.financial.report', string='Account Reports', required=True, default=_get_account_report)
label_filter = fields.Char(string='Column Label', help="This label will be displayed on report to show the balance computed for the given comparison filter.")
filter_cmp = fields.Selection([('filter_no', 'No Filters'), ('filter_date', 'Date')], string='Filter by', required=True, default='filter_no')
date_from_cmp = fields.Date(string='Start Date')
date_to_cmp = fields.Date(string='End Date')
debit_credit = fields.Boolean(string='Display Debit/Credit Columns', help="This option allows you to get more details about the way your balances are computed. Because it is space consuming, we do not allow to use it while doing a comparison.")
def _build_comparison_context(self, data):
result = {}
result['journal_ids'] = 'journal_ids' in data['form'] and data['form']['journal_ids'] or False
result['state'] = 'target_move' in data['form'] and data['form']['target_move'] or ''
if data['form']['filter_cmp'] == 'filter_date':
result['date_from'] = data['form']['date_from_cmp']
result['date_to'] = data['form']['date_to_cmp']
result['strict_range'] = True
return result
@api.multi
def check_report(self):
res = super(AccountingReport, self).check_report()
data = {}
data['form'] = self.read(['account_report_id', 'date_from_cmp', 'date_to_cmp', 'journal_ids', 'filter_cmp', 'target_move'])[0]
for field in ['account_report_id']:
if isinstance(data['form'][field], tuple):
data['form'][field] = data['form'][field][0]
comparison_context = self._build_comparison_context(data)
res['data']['form']['comparison_context'] = comparison_context
return res
def _print_report(self, data):
data['form'].update(self.read(['date_from_cmp', 'debit_credit', 'date_to_cmp', 'filter_cmp', 'account_report_id', 'enable_filter', 'label_filter', 'target_move'])[0])
return self.env['report'].get_action(self, 'account.report_financial', data=data)
|
gpl-3.0
|
phoebe-project/phoebe2-docs
|
2.1/tutorials/saving_and_loading.py
|
1
|
2914
|
#!/usr/bin/env python
# coding: utf-8
# Saving and Loading
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.1,<2.2"')
# As always, let's do imports and initialize a logger and a new bundle. See [Building a System](building_a_system.ipynb) for more details.
# In[1]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger(clevel='INFO')
b = phoebe.default_binary()
# Saving a Bundle
# -----------------------
#
#
# In[2]:
b['incl@orbit'] = 56.789
# To save the Bundle to a file, we can call the [save](../api/phoebe.parameters.ParameterSet.save.md) method of the Bundle and pass a filename.
# In[3]:
print b.save('test.phoebe')
# We can now inspect the contents of the created file.
#
# This file is in the JSON-format and is simply a list of dictionaries - where each dictionary represents the attributes of a single Parameter.
#
# You could edit this file in a text-editor - but do be careful if changing any of the tags. For example: if you want to change the component tag of one of your stars, make sure to change ALL instances of the component tag to match (as well as the hierarchy Parameter).
# In[4]:
get_ipython().system('head -n 30 test.phoebe')
# Loading a Bundle
# ----------------------
# To open an existing Bundle from the file we just created, call [Bundle.open](../api/phoebe.frontend.bundle.Bundle.open.md) and pass the filename.
# In[5]:
b2 = phoebe.Bundle.open('test.phoebe')
# Just to prove this worked, we can check to make sure we retained the changed value of inclination.
# In[6]:
print b2.get_value('incl@orbit')
# Support for Other Codes
# ------------------------------
#
# ### Legacy
#
# Importing from a PHOEBE Legacy file is as simple as passing the filename to [from_legacy](../api/phoebe.frontend.bundle.Bundle.from_legacy.md):
# In[7]:
b = phoebe.Bundle.from_legacy('legacy.phoebe')
# Exporting to a PHOEBE Legacy file is also possible (although note that some parameters don't translate exactly or are not supported in PHOEBE Legacy), via [b.export_legacy](../api/phoebe.frontend.bundle.Bundle.export_legacy.md).
# In[8]:
b.export_legacy('legacy_export.phoebe')
# For the parameters that could not be directly translated, you should see a warning message (if you have warning messages enabled in your logger).
#
# We can now look at the beginning of the saved file and see that it matches the PHOEBE Legacy file-format.
# In[9]:
get_ipython().system('head -n 30 legacy_export.phoebe')
# Next
# ---------
#
# Next up: let's learn all about [constraints](constraints.ipynb)
|
gpl-3.0
|
ivanamihalek/smallmol
|
gmx_lib/solvent_equilibration.py
|
1
|
2572
|
import os, subprocess
import grompp
#########################################
# this is a hack to find all restraint files
# we have available
def itp_files(params):
proc = subprocess.Popen(["bash", "-c", "grep include ../%s/*top | grep -v %s"%(params.rundirs.top_dir,params.physical.forcefield)],
stdout=subprocess.PIPE, stderr=None)
itps = []
for line in proc.stdout.readlines():
itps.append(line.rstrip().replace("#","").replace('"','').replace("include","").replace(" ",""))
return itps
#########################################
def run(params, stage):
#grompp = generate parametrized topology file (tpr; binary; complete input compiled)
grompp.generate(params, stage, position_restrained_run=True) # use the previous step to construct the tpr file
# change to directory for this stage
currdir = params.rundirs.name[stage]
os.chdir("/".join([params.run_options.workdir, currdir]))
# apparently there is no other way to pass yhe restraints to mdrun:
# (alternatively I could have the *top file each time,
# it contains the line #include "posre.itp")
for itp in itp_files(params):
subprocess.call(["bash", "-c", "ln -sf ../%s/%s %s"%(params.rundirs.top_dir,itp,itp)],
stdout=None, stderr=None)
pdbname = params.run_options.pdb
tprfile_in = pdbname+".pr_input.tpr"
grofile_out = pdbname+".pr_out.gro"
traj_out = pdbname+".pr_out.trr"
edrfile_out = pdbname+".pr_out.edr"
native_log = pdbname+".pr_native.log"
if os.path.exists(grofile_out):
print "\t %s found" % (grofile_out)
return
for infile in [tprfile_in]:
if not os.path.exists(infile):
print "\t in local_energy_minimum.find(%s): %s not found (?)" % (stage, infile)
exit(1)
program = "mdrun" # nt 1; run multiple trajectories instead
# mdrun will produce trajectory and edr (energy) files, whether we ask for it or not,
# so we might just as well name them so we can remove them later
cmdline_args = " -s %s -c %s -nt 4 -o %s -e %s -g %s" % \
(tprfile_in, grofile_out, traj_out, edrfile_out, native_log)
params.command_log.write("in %s:\n" % (currdir))
msg = "solvent equilibration with peptide restrained"
params.gmx_engine.run(program, cmdline_args, msg, params.command_log)
# check for errors
false_alarms = ["masses will be determined based on residue and atom names"]
params.gmx_engine.check_logs_for_error(program, false_alarms)
# check convergence
print "\t ", params.gmx_engine.convergence_line(program)
if os.path.exists(traj_out): os.remove(traj_out)
if os.path.exists(edrfile_out): os.remove(edrfile_out)
|
gpl-2.0
|
robmagee/django-cms
|
cms/test_utils/project/sampleapp/cms_apps.py
|
32
|
1467
|
from cms.app_base import CMSApp
from cms.test_utils.project.sampleapp.cms_menus import SampleAppMenu, StaticMenu3
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class SampleApp(CMSApp):
name = _("Sample App")
urls = ["cms.test_utils.project.sampleapp.urls"]
menus = [SampleAppMenu]
permissions = True
apphook_pool.register(SampleApp)
class SampleAppWithExcludedPermissions(CMSApp):
name = _("Sample App with excluded permissions")
urls = [
"cms.test_utils.project.sampleapp.urls_excluded"
]
permissions = True
exclude_permissions = ['excluded']
apphook_pool.register(SampleAppWithExcludedPermissions)
class SampleApp2(CMSApp):
name = _("Sample App 2")
urls = ["cms.test_utils.project.sampleapp.urls2"]
menus = [StaticMenu3]
apphook_pool.register(SampleApp2)
class NamespacedApp(CMSApp):
name = _("Namespaced App")
urls = [
"cms.test_utils.project.sampleapp.ns_urls",
"cms.test_utils.project.sampleapp.urls"
]
menus = [SampleAppMenu, StaticMenu3]
app_name = 'namespaced_app_ns'
apphook_pool.register(NamespacedApp)
class ParentApp(CMSApp):
name = _("Parent app")
urls = ["cms.test_utils.project.sampleapp.urls_parentapp"]
apphook_pool.register(ParentApp)
class ChildApp(CMSApp):
name = _("Child app")
urls = ["cms.test_utils.project.sampleapp.urls_childapp"]
apphook_pool.register(ChildApp)
|
bsd-3-clause
|
Distrotech/qtwebkit
|
Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py
|
119
|
3847
|
# Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.bot.patchanalysistask import PatchAnalysisTask, PatchAnalysisTaskDelegate
class CommitQueueTaskDelegate(PatchAnalysisTaskDelegate):
def parent_command(self):
return "commit-queue"
def did_pass_testing_ews(self, patch):
raise NotImplementedError("subclasses must implement")
class CommitQueueTask(PatchAnalysisTask):
def validate(self):
# Bugs might get closed, or patches might be obsoleted or r-'d while the
# commit-queue is processing.
self._patch = self._delegate.refetch_patch(self._patch)
if self._patch.is_obsolete():
return False
if self._patch.bug().is_closed():
return False
if not self._patch.committer():
return False
if self._patch.review() == "-":
return False
return True
def _validate_changelog(self):
return self._run_command([
"validate-changelog",
"--check-oops",
"--non-interactive",
self._patch.id(),
],
"ChangeLog validated",
"ChangeLog did not pass validation")
def _did_pass_tests_recently(self):
if self._delegate.did_pass_testing_ews(self._patch):
return True
return self._test_patch()
def run(self):
if not self.validate():
return False
if not self._clean():
return False
if not self._update():
return False
if not self._apply():
return self.report_failure()
if not self._validate_changelog():
return self.report_failure()
if not self._patch.is_rollout():
if not self._build():
if not self._build_without_patch():
return False
return self.report_failure()
if not self._did_pass_tests_recently():
return False
# Make sure the patch is still valid before landing (e.g., make sure
# no one has set commit-queue- since we started working on the patch.)
if not self.validate():
return False
# FIXME: We should understand why the land failure occurred and retry if possible.
if not self._land():
return self.report_failure()
return True
|
lgpl-3.0
|
joopert/home-assistant
|
homeassistant/components/demo/fan.py
|
2
|
2955
|
"""Demo fan platform that has a fake fan."""
from homeassistant.const import STATE_OFF
from homeassistant.components.fan import (
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SUPPORT_DIRECTION,
SUPPORT_OSCILLATE,
SUPPORT_SET_SPEED,
FanEntity,
)
FULL_SUPPORT = SUPPORT_SET_SPEED | SUPPORT_OSCILLATE | SUPPORT_DIRECTION
LIMITED_SUPPORT = SUPPORT_SET_SPEED
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the demo fan platform."""
async_add_entities(
[
DemoFan(hass, "Living Room Fan", FULL_SUPPORT),
DemoFan(hass, "Ceiling Fan", LIMITED_SUPPORT),
]
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Demo config entry."""
await async_setup_platform(hass, {}, async_add_entities)
class DemoFan(FanEntity):
"""A demonstration fan component."""
def __init__(self, hass, name: str, supported_features: int) -> None:
"""Initialize the entity."""
self.hass = hass
self._supported_features = supported_features
self._speed = STATE_OFF
self.oscillating = None
self._direction = None
self._name = name
if supported_features & SUPPORT_OSCILLATE:
self.oscillating = False
if supported_features & SUPPORT_DIRECTION:
self._direction = "forward"
@property
def name(self) -> str:
"""Get entity name."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo fan."""
return False
@property
def speed(self) -> str:
"""Return the current speed."""
return self._speed
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return [STATE_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn on the entity."""
if speed is None:
speed = SPEED_MEDIUM
self.set_speed(speed)
def turn_off(self, **kwargs) -> None:
"""Turn off the entity."""
self.oscillate(False)
self.set_speed(STATE_OFF)
def set_speed(self, speed: str) -> None:
"""Set the speed of the fan."""
self._speed = speed
self.schedule_update_ha_state()
def set_direction(self, direction: str) -> None:
"""Set the direction of the fan."""
self._direction = direction
self.schedule_update_ha_state()
def oscillate(self, oscillating: bool) -> None:
"""Set oscillation."""
self.oscillating = oscillating
self.schedule_update_ha_state()
@property
def current_direction(self) -> str:
"""Fan direction."""
return self._direction
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
|
apache-2.0
|
Distrotech/scons
|
build/scons/build/lib/SCons/Tool/packaging/src_zip.py
|
2
|
1733
|
"""SCons.Tool.Packaging.zip
The zip SRC packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/src_zip.py 2014/01/04 01:12:18 root"
from SCons.Tool.packaging import putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Zip']
bld.set_suffix('.zip')
target, source = putintopackageroot(target, source, env, PACKAGEROOT, honor_install_location=0)
return bld(env, target, source)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mit
|
jonhadfield/creds
|
setup.py
|
1
|
2702
|
#!/usr/bin/env python
import os
import re
import sys
# from codecs import open
from setuptools import (setup, find_packages)
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass into py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload -r pypi')
sys.exit()
requires = []
test_requirements = ['pytest>=2.9.2', 'pytest-cov>=2.3.1', 'PyYAML>=3.11', 'boto', 'boto3', 'moto', 'mock']
with open('lib/creds/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
readme = open('README.rst').read()
long_description = readme
setup(
name='creds',
version=version,
description='Creds is a library for managing Linux, FreeBSD and OpenBSD user accounts and credentials.',
long_description=long_description,
author='Jon Hadfield',
author_email='[email protected]',
url='http://github.com/jonhadfield/creds',
packages=find_packages('lib'),
package_dir={'': 'lib'},
# package_data={'': ['LICENSE', 'NOTICE'], 'creds': ['*.pem']},
include_package_data=True,
install_requires=requires,
license='MIT',
zip_safe=False,
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: BSD :: Linux',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: BSD :: OpenBSD',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
),
cmdclass={'test': PyTest},
tests_require=test_requirements,
# extras_require={
# 'security': [],
# },
)
|
mit
|
BizzCloud/PosBox
|
addons/mail/mail_message.py
|
12
|
47038
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import logging
from openerp import tools
from email.header import decode_header
from openerp import SUPERUSER_ID, api
from openerp.osv import osv, orm, fields
from openerp.tools import html_email_clean
from openerp.tools.translate import _
from HTMLParser import HTMLParser
_logger = logging.getLogger(__name__)
try:
from mako.template import Template as MakoTemplate
except ImportError:
_logger.warning("payment_acquirer: mako templates not available, payment acquirer will not work!")
""" Some tools for parsing / creating email fields """
def decode(text):
"""Returns unicode() string conversion of the the given encoded smtp header text"""
if text:
text = decode_header(text.replace('\r', ''))
return ''.join([tools.ustr(x[0], x[1]) for x in text])
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
class mail_message(osv.Model):
""" Messages model: system notification (replacing res.log notifications),
comments (OpenChatter discussion) and incoming emails. """
_name = 'mail.message'
_description = 'Message'
_inherit = ['ir.needaction_mixin']
_order = 'id desc'
_rec_name = 'record_name'
_message_read_limit = 30
_message_read_fields = ['id', 'parent_id', 'model', 'res_id', 'body', 'subject', 'date', 'to_read', 'email_from',
'type', 'vote_user_ids', 'attachment_ids', 'author_id', 'partner_ids', 'record_name']
_message_record_name_length = 18
_message_read_more_limit = 1024
def default_get(self, cr, uid, fields, context=None):
# protection for `default_type` values leaking from menu action context (e.g. for invoices)
if context and context.get('default_type') and context.get('default_type') not in self._columns['type'].selection:
context = dict(context, default_type=None)
return super(mail_message, self).default_get(cr, uid, fields, context=context)
def _get_to_read(self, cr, uid, ids, name, arg, context=None):
""" Compute if the message is unread by the current user. """
res = dict((id, False) for id in ids)
partner_id = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
notif_obj = self.pool.get('mail.notification')
notif_ids = notif_obj.search(cr, uid, [
('partner_id', 'in', [partner_id]),
('message_id', 'in', ids),
('is_read', '=', False),
], context=context)
for notif in notif_obj.browse(cr, uid, notif_ids, context=context):
res[notif.message_id.id] = True
return res
def _search_to_read(self, cr, uid, obj, name, domain, context=None):
""" Search for messages to read by the current user. Condition is
inversed because we search unread message on a is_read column. """
return ['&', ('notification_ids.partner_id.user_ids', 'in', [uid]), ('notification_ids.is_read', '=', not domain[0][2])]
def _get_starred(self, cr, uid, ids, name, arg, context=None):
""" Compute if the message is unread by the current user. """
res = dict((id, False) for id in ids)
partner_id = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
notif_obj = self.pool.get('mail.notification')
notif_ids = notif_obj.search(cr, uid, [
('partner_id', 'in', [partner_id]),
('message_id', 'in', ids),
('starred', '=', True),
], context=context)
for notif in notif_obj.browse(cr, uid, notif_ids, context=context):
res[notif.message_id.id] = True
return res
def _search_starred(self, cr, uid, obj, name, domain, context=None):
""" Search for starred messages by the current user."""
return ['&', ('notification_ids.partner_id.user_ids', 'in', [uid]), ('notification_ids.starred', '=', domain[0][2])]
_columns = {
'type': fields.selection([
('email', 'Email'),
('comment', 'Comment'),
('notification', 'System notification'),
], 'Type', size=12,
help="Message type: email for email message, notification for system "\
"message, comment for other messages such as user replies"),
'email_from': fields.char('From',
help="Email address of the sender. This field is set when no matching partner is found for incoming emails."),
'reply_to': fields.char('Reply-To',
help='Reply email address. Setting the reply_to bypasses the automatic thread creation.'),
'same_thread': fields.boolean('Same thread',
help='Redirect answers to the same discussion thread.'),
'author_id': fields.many2one('res.partner', 'Author', select=1,
ondelete='set null',
help="Author of the message. If not set, email_from may hold an email address that did not match any partner."),
'author_avatar': fields.related('author_id', 'image_small', type="binary", string="Author's Avatar"),
'partner_ids': fields.many2many('res.partner', string='Recipients'),
'notified_partner_ids': fields.many2many('res.partner', 'mail_notification',
'message_id', 'partner_id', 'Notified partners',
help='Partners that have a notification pushing this message in their mailboxes'),
'attachment_ids': fields.many2many('ir.attachment', 'message_attachment_rel',
'message_id', 'attachment_id', 'Attachments'),
'parent_id': fields.many2one('mail.message', 'Parent Message', select=True,
ondelete='set null', help="Initial thread message."),
'child_ids': fields.one2many('mail.message', 'parent_id', 'Child Messages'),
'model': fields.char('Related Document Model', size=128, select=1),
'res_id': fields.integer('Related Document ID', select=1),
'record_name': fields.char('Message Record Name', help="Name get of the related document."),
'notification_ids': fields.one2many('mail.notification', 'message_id',
string='Notifications', auto_join=True,
help='Technical field holding the message notifications. Use notified_partner_ids to access notified partners.'),
'subject': fields.char('Subject'),
'date': fields.datetime('Date'),
'message_id': fields.char('Message-Id', help='Message unique identifier', select=1, readonly=1, copy=False),
'body': fields.html('Contents', help='Automatically sanitized HTML contents'),
'to_read': fields.function(_get_to_read, fnct_search=_search_to_read,
type='boolean', string='To read',
help='Current user has an unread notification linked to this message'),
'starred': fields.function(_get_starred, fnct_search=_search_starred,
type='boolean', string='Starred',
help='Current user has a starred notification linked to this message'),
'subtype_id': fields.many2one('mail.message.subtype', 'Subtype',
ondelete='set null', select=1,),
'vote_user_ids': fields.many2many('res.users', 'mail_vote',
'message_id', 'user_id', string='Votes',
help='Users that voted for this message'),
'mail_server_id': fields.many2one('ir.mail_server', 'Outgoing mail server', readonly=1),
}
def _needaction_domain_get(self, cr, uid, context=None):
return [('to_read', '=', True)]
def _get_default_from(self, cr, uid, context=None):
this = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context)
if this.alias_name and this.alias_domain:
return '%s <%s@%s>' % (this.name, this.alias_name, this.alias_domain)
elif this.email:
return '%s <%s>' % (this.name, this.email)
raise osv.except_osv(_('Invalid Action!'), _("Unable to send email, please configure the sender's email address or alias."))
def _get_default_author(self, cr, uid, context=None):
return self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
_defaults = {
'type': 'email',
'date': fields.datetime.now,
'author_id': lambda self, cr, uid, ctx=None: self._get_default_author(cr, uid, ctx),
'body': '',
'email_from': lambda self, cr, uid, ctx=None: self._get_default_from(cr, uid, ctx),
'same_thread': True,
}
#------------------------------------------------------
# Vote/Like
#------------------------------------------------------
def vote_toggle(self, cr, uid, ids, context=None):
''' Toggles vote. Performed using read to avoid access rights issues.
Done as SUPERUSER_ID because uid may vote for a message he cannot modify. '''
for message in self.read(cr, uid, ids, ['vote_user_ids'], context=context):
new_has_voted = not (uid in message.get('vote_user_ids'))
if new_has_voted:
self.write(cr, SUPERUSER_ID, message.get('id'), {'vote_user_ids': [(4, uid)]}, context=context)
else:
self.write(cr, SUPERUSER_ID, message.get('id'), {'vote_user_ids': [(3, uid)]}, context=context)
return new_has_voted or False
#------------------------------------------------------
# download an attachment
#------------------------------------------------------
def download_attachment(self, cr, uid, id_message, attachment_id, context=None):
""" Return the content of linked attachments. """
# this will fail if you cannot read the message
message_values = self.read(cr, uid, [id_message], ['attachment_ids'], context=context)[0]
if attachment_id in message_values['attachment_ids']:
attachment = self.pool.get('ir.attachment').browse(cr, SUPERUSER_ID, attachment_id, context=context)
if attachment.datas and attachment.datas_fname:
return {
'base64': attachment.datas,
'filename': attachment.datas_fname,
}
return False
#------------------------------------------------------
# Notification API
#------------------------------------------------------
@api.cr_uid_ids_context
def set_message_read(self, cr, uid, msg_ids, read, create_missing=True, context=None):
""" Set messages as (un)read. Technically, the notifications related
to uid are set to (un)read. If for some msg_ids there are missing
notifications (i.e. due to load more or thread parent fetching),
they are created.
:param bool read: set notification as (un)read
:param bool create_missing: create notifications for missing entries
(i.e. when acting on displayed messages not notified)
:return number of message mark as read
"""
notification_obj = self.pool.get('mail.notification')
user_pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
domain = [('partner_id', '=', user_pid), ('message_id', 'in', msg_ids)]
if not create_missing:
domain += [('is_read', '=', not read)]
notif_ids = notification_obj.search(cr, uid, domain, context=context)
# all message have notifications: already set them as (un)read
if len(notif_ids) == len(msg_ids) or not create_missing:
notification_obj.write(cr, uid, notif_ids, {'is_read': read}, context=context)
return len(notif_ids)
# some messages do not have notifications: find which one, create notification, update read status
notified_msg_ids = [notification.message_id.id for notification in notification_obj.browse(cr, uid, notif_ids, context=context)]
to_create_msg_ids = list(set(msg_ids) - set(notified_msg_ids))
for msg_id in to_create_msg_ids:
notification_obj.create(cr, uid, {'partner_id': user_pid, 'is_read': read, 'message_id': msg_id}, context=context)
notification_obj.write(cr, uid, notif_ids, {'is_read': read}, context=context)
return len(notif_ids)
@api.cr_uid_ids_context
def set_message_starred(self, cr, uid, msg_ids, starred, create_missing=True, context=None):
""" Set messages as (un)starred. Technically, the notifications related
to uid are set to (un)starred.
:param bool starred: set notification as (un)starred
:param bool create_missing: create notifications for missing entries
(i.e. when acting on displayed messages not notified)
"""
notification_obj = self.pool.get('mail.notification')
user_pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
domain = [('partner_id', '=', user_pid), ('message_id', 'in', msg_ids)]
if not create_missing:
domain += [('starred', '=', not starred)]
values = {
'starred': starred
}
if starred:
values['is_read'] = False
notif_ids = notification_obj.search(cr, uid, domain, context=context)
# all message have notifications: already set them as (un)starred
if len(notif_ids) == len(msg_ids) or not create_missing:
notification_obj.write(cr, uid, notif_ids, values, context=context)
return starred
# some messages do not have notifications: find which one, create notification, update starred status
notified_msg_ids = [notification.message_id.id for notification in notification_obj.browse(cr, uid, notif_ids, context=context)]
to_create_msg_ids = list(set(msg_ids) - set(notified_msg_ids))
for msg_id in to_create_msg_ids:
notification_obj.create(cr, uid, dict(values, partner_id=user_pid, message_id=msg_id), context=context)
notification_obj.write(cr, uid, notif_ids, values, context=context)
return starred
#------------------------------------------------------
# Message loading for web interface
#------------------------------------------------------
def _message_read_dict_postprocess(self, cr, uid, messages, message_tree, context=None):
""" Post-processing on values given by message_read. This method will
handle partners in batch to avoid doing numerous queries.
:param list messages: list of message, as get_dict result
:param dict message_tree: {[msg.id]: msg browse record}
"""
res_partner_obj = self.pool.get('res.partner')
ir_attachment_obj = self.pool.get('ir.attachment')
pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
# 1. Aggregate partners (author_id and partner_ids) and attachments
partner_ids = set()
attachment_ids = set()
for key, message in message_tree.iteritems():
if message.author_id:
partner_ids |= set([message.author_id.id])
if message.subtype_id and message.notified_partner_ids: # take notified people of message with a subtype
partner_ids |= set([partner.id for partner in message.notified_partner_ids])
elif not message.subtype_id and message.partner_ids: # take specified people of message without a subtype (log)
partner_ids |= set([partner.id for partner in message.partner_ids])
if message.attachment_ids:
attachment_ids |= set([attachment.id for attachment in message.attachment_ids])
# Read partners as SUPERUSER -> display the names like classic m2o even if no access
partners = res_partner_obj.name_get(cr, SUPERUSER_ID, list(partner_ids), context=context)
partner_tree = dict((partner[0], partner) for partner in partners)
# 2. Attachments as SUPERUSER, because could receive msg and attachments for doc uid cannot see
attachments = ir_attachment_obj.read(cr, SUPERUSER_ID, list(attachment_ids), ['id', 'datas_fname', 'name', 'file_type_icon'], context=context)
attachments_tree = dict((attachment['id'], {
'id': attachment['id'],
'filename': attachment['datas_fname'],
'name': attachment['name'],
'file_type_icon': attachment['file_type_icon'],
}) for attachment in attachments)
# 3. Update message dictionaries
for message_dict in messages:
message_id = message_dict.get('id')
message = message_tree[message_id]
if message.author_id:
author = partner_tree[message.author_id.id]
else:
author = (0, message.email_from)
partner_ids = []
if message.subtype_id:
partner_ids = [partner_tree[partner.id] for partner in message.notified_partner_ids
if partner.id in partner_tree]
else:
partner_ids = [partner_tree[partner.id] for partner in message.partner_ids
if partner.id in partner_tree]
attachment_ids = []
for attachment in message.attachment_ids:
if attachment.id in attachments_tree:
attachment_ids.append(attachments_tree[attachment.id])
message_dict.update({
'is_author': pid == author[0],
'author_id': author,
'partner_ids': partner_ids,
'attachment_ids': attachment_ids,
'user_pid': pid
})
return True
def _message_read_dict(self, cr, uid, message, parent_id=False, context=None):
""" Return a dict representation of the message. This representation is
used in the JS client code, to display the messages. Partners and
attachments related stuff will be done in post-processing in batch.
:param dict message: mail.message browse record
"""
# private message: no model, no res_id
is_private = False
if not message.model or not message.res_id:
is_private = True
# votes and favorites: res.users ids, no prefetching should be done
vote_nb = len(message.vote_user_ids)
has_voted = uid in [user.id for user in message.vote_user_ids]
try:
if parent_id:
max_length = 300
else:
max_length = 100
body_short = html_email_clean(message.body, remove=False, shorten=True, max_length=max_length)
except Exception:
body_short = '<p><b>Encoding Error : </b><br/>Unable to convert this message (id: %s).</p>' % message.id
_logger.exception(Exception)
return {'id': message.id,
'type': message.type,
'subtype': message.subtype_id.name if message.subtype_id else False,
'body': message.body,
'body_short': body_short,
'model': message.model,
'res_id': message.res_id,
'record_name': message.record_name,
'subject': message.subject,
'date': message.date,
'to_read': message.to_read,
'parent_id': parent_id,
'is_private': is_private,
'author_id': False,
'author_avatar': message.author_avatar,
'is_author': False,
'partner_ids': [],
'vote_nb': vote_nb,
'has_voted': has_voted,
'is_favorite': message.starred,
'attachment_ids': [],
}
def _message_read_add_expandables(self, cr, uid, messages, message_tree, parent_tree,
message_unload_ids=[], thread_level=0, domain=[], parent_id=False, context=None):
""" Create expandables for message_read, to load new messages.
1. get the expandable for new threads
if display is flat (thread_level == 0):
fetch message_ids < min(already displayed ids), because we
want a flat display, ordered by id
else:
fetch message_ids that are not childs of already displayed
messages
2. get the expandables for new messages inside threads if display
is not flat
for each thread header, search for its childs
for each hole in the child list based on message displayed,
create an expandable
:param list messages: list of message structure for the Chatter
widget to which expandables are added
:param dict message_tree: dict [id]: browse record of this message
:param dict parent_tree: dict [parent_id]: [child_ids]
:param list message_unload_ids: list of message_ids we do not want
to load
:return bool: True
"""
def _get_expandable(domain, message_nb, parent_id, max_limit):
return {
'domain': domain,
'nb_messages': message_nb,
'type': 'expandable',
'parent_id': parent_id,
'max_limit': max_limit,
}
if not messages:
return True
message_ids = sorted(message_tree.keys())
# 1. get the expandable for new threads
if thread_level == 0:
exp_domain = domain + [('id', '<', min(message_unload_ids + message_ids))]
else:
exp_domain = domain + ['!', ('id', 'child_of', message_unload_ids + parent_tree.keys())]
ids = self.search(cr, uid, exp_domain, context=context, limit=1)
if ids:
# inside a thread: prepend
if parent_id:
messages.insert(0, _get_expandable(exp_domain, -1, parent_id, True))
# new threads: append
else:
messages.append(_get_expandable(exp_domain, -1, parent_id, True))
# 2. get the expandables for new messages inside threads if display is not flat
if thread_level == 0:
return True
for message_id in message_ids:
message = message_tree[message_id]
# generate only for thread header messages (TDE note: parent_id may be False is uid cannot see parent_id, seems ok)
if message.parent_id:
continue
# check there are message for expandable
child_ids = set([child.id for child in message.child_ids]) - set(message_unload_ids)
child_ids = sorted(list(child_ids), reverse=True)
if not child_ids:
continue
# make groups of unread messages
id_min, id_max, nb = max(child_ids), 0, 0
for child_id in child_ids:
if not child_id in message_ids:
nb += 1
if id_min > child_id:
id_min = child_id
if id_max < child_id:
id_max = child_id
elif nb > 0:
exp_domain = [('id', '>=', id_min), ('id', '<=', id_max), ('id', 'child_of', message_id)]
idx = [msg.get('id') for msg in messages].index(child_id) + 1
# messages.append(_get_expandable(exp_domain, nb, message_id, False))
messages.insert(idx, _get_expandable(exp_domain, nb, message_id, False))
id_min, id_max, nb = max(child_ids), 0, 0
else:
id_min, id_max, nb = max(child_ids), 0, 0
if nb > 0:
exp_domain = [('id', '>=', id_min), ('id', '<=', id_max), ('id', 'child_of', message_id)]
idx = [msg.get('id') for msg in messages].index(message_id) + 1
# messages.append(_get_expandable(exp_domain, nb, message_id, id_min))
messages.insert(idx, _get_expandable(exp_domain, nb, message_id, False))
return True
@api.cr_uid_context
def message_read(self, cr, uid, ids=None, domain=None, message_unload_ids=None,
thread_level=0, context=None, parent_id=False, limit=None):
""" Read messages from mail.message, and get back a list of structured
messages to be displayed as discussion threads. If IDs is set,
fetch these records. Otherwise use the domain to fetch messages.
After having fetch messages, their ancestors will be added to obtain
well formed threads, if uid has access to them.
After reading the messages, expandable messages are added in the
message list (see ``_message_read_add_expandables``). It consists
in messages holding the 'read more' data: number of messages to
read, domain to apply.
:param list ids: optional IDs to fetch
:param list domain: optional domain for searching ids if ids not set
:param list message_unload_ids: optional ids we do not want to fetch,
because i.e. they are already displayed somewhere
:param int parent_id: context of parent_id
- if parent_id reached when adding ancestors, stop going further
in the ancestor search
- if set in flat mode, ancestor_id is set to parent_id
:param int limit: number of messages to fetch, before adding the
ancestors and expandables
:return list: list of message structure for the Chatter widget
"""
assert thread_level in [0, 1], 'message_read() thread_level should be 0 (flat) or 1 (1 level of thread); given %s.' % thread_level
domain = domain if domain is not None else []
message_unload_ids = message_unload_ids if message_unload_ids is not None else []
if message_unload_ids:
domain += [('id', 'not in', message_unload_ids)]
limit = limit or self._message_read_limit
message_tree = {}
message_list = []
parent_tree = {}
# no specific IDS given: fetch messages according to the domain, add their parents if uid has access to
if ids is None:
ids = self.search(cr, uid, domain, context=context, limit=limit)
# fetch parent if threaded, sort messages
for message in self.browse(cr, uid, ids, context=context):
message_id = message.id
if message_id in message_tree:
continue
message_tree[message_id] = message
# find parent_id
if thread_level == 0:
tree_parent_id = parent_id
else:
tree_parent_id = message_id
parent = message
while parent.parent_id and parent.parent_id.id != parent_id:
parent = parent.parent_id
tree_parent_id = parent.id
if not parent.id in message_tree:
message_tree[parent.id] = parent
# newest messages first
parent_tree.setdefault(tree_parent_id, [])
if tree_parent_id != message_id:
parent_tree[tree_parent_id].append(self._message_read_dict(cr, uid, message_tree[message_id], parent_id=tree_parent_id, context=context))
if thread_level:
for key, message_id_list in parent_tree.iteritems():
message_id_list.sort(key=lambda item: item['id'])
message_id_list.insert(0, self._message_read_dict(cr, uid, message_tree[key], context=context))
# create final ordered message_list based on parent_tree
parent_list = parent_tree.items()
parent_list = sorted(parent_list, key=lambda item: max([msg.get('id') for msg in item[1]]) if item[1] else item[0], reverse=True)
message_list = [message for (key, msg_list) in parent_list for message in msg_list]
# get the child expandable messages for the tree
self._message_read_dict_postprocess(cr, uid, message_list, message_tree, context=context)
self._message_read_add_expandables(cr, uid, message_list, message_tree, parent_tree,
thread_level=thread_level, message_unload_ids=message_unload_ids, domain=domain, parent_id=parent_id, context=context)
return message_list
#------------------------------------------------------
# mail_message internals
#------------------------------------------------------
def init(self, cr):
cr.execute("""SELECT indexname FROM pg_indexes WHERE indexname = 'mail_message_model_res_id_idx'""")
if not cr.fetchone():
cr.execute("""CREATE INDEX mail_message_model_res_id_idx ON mail_message (model, res_id)""")
def _find_allowed_model_wise(self, cr, uid, doc_model, doc_dict, context=None):
doc_ids = doc_dict.keys()
allowed_doc_ids = self.pool[doc_model].search(cr, uid, [('id', 'in', doc_ids)], context=context)
return set([message_id for allowed_doc_id in allowed_doc_ids for message_id in doc_dict[allowed_doc_id]])
def _find_allowed_doc_ids(self, cr, uid, model_ids, context=None):
model_access_obj = self.pool.get('ir.model.access')
allowed_ids = set()
for doc_model, doc_dict in model_ids.iteritems():
if not model_access_obj.check(cr, uid, doc_model, 'read', False):
continue
allowed_ids |= self._find_allowed_model_wise(cr, uid, doc_model, doc_dict, context=context)
return allowed_ids
def _search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False, access_rights_uid=None):
""" Override that adds specific access rights of mail.message, to remove
ids uid could not see according to our custom rules. Please refer
to check_access_rule for more details about those rules.
After having received ids of a classic search, keep only:
- if author_id == pid, uid is the author, OR
- a notification (id, pid) exists, uid has been notified, OR
- uid have read access to the related document is model, res_id
- otherwise: remove the id
"""
# Rules do not apply to administrator
if uid == SUPERUSER_ID:
return super(mail_message, self)._search(cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=count, access_rights_uid=access_rights_uid)
# Perform a super with count as False, to have the ids, not a counter
ids = super(mail_message, self)._search(cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=False, access_rights_uid=access_rights_uid)
if not ids and count:
return 0
elif not ids:
return ids
pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
author_ids, partner_ids, allowed_ids = set([]), set([]), set([])
model_ids = {}
messages = super(mail_message, self).read(cr, uid, ids, ['author_id', 'model', 'res_id', 'notified_partner_ids'], context=context)
for message in messages:
if message.get('author_id') and message.get('author_id')[0] == pid:
author_ids.add(message.get('id'))
elif pid in message.get('notified_partner_ids'):
partner_ids.add(message.get('id'))
elif message.get('model') and message.get('res_id'):
model_ids.setdefault(message.get('model'), {}).setdefault(message.get('res_id'), set()).add(message.get('id'))
allowed_ids = self._find_allowed_doc_ids(cr, uid, model_ids, context=context)
final_ids = author_ids | partner_ids | allowed_ids
if count:
return len(final_ids)
else:
# re-construct a list based on ids, because set did not keep the original order
id_list = [id for id in ids if id in final_ids]
return id_list
def check_access_rule(self, cr, uid, ids, operation, context=None):
""" Access rules of mail.message:
- read: if
- author_id == pid, uid is the author, OR
- mail_notification (id, pid) exists, uid has been notified, OR
- uid have read access to the related document if model, res_id
- otherwise: raise
- create: if
- no model, no res_id, I create a private message OR
- pid in message_follower_ids if model, res_id OR
- mail_notification (parent_id.id, pid) exists, uid has been notified of the parent, OR
- uid have write or create access on the related document if model, res_id, OR
- otherwise: raise
- write: if
- author_id == pid, uid is the author, OR
- uid has write or create access on the related document if model, res_id
- otherwise: raise
- unlink: if
- uid has write or create access on the related document if model, res_id
- otherwise: raise
"""
def _generate_model_record_ids(msg_val, msg_ids):
""" :param model_record_ids: {'model': {'res_id': (msg_id, msg_id)}, ... }
:param message_values: {'msg_id': {'model': .., 'res_id': .., 'author_id': ..}}
"""
model_record_ids = {}
for id in msg_ids:
vals = msg_val.get(id, {})
if vals.get('model') and vals.get('res_id'):
model_record_ids.setdefault(vals['model'], set()).add(vals['res_id'])
return model_record_ids
if uid == SUPERUSER_ID:
return
if isinstance(ids, (int, long)):
ids = [ids]
not_obj = self.pool.get('mail.notification')
fol_obj = self.pool.get('mail.followers')
partner_id = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=None).partner_id.id
# Read mail_message.ids to have their values
message_values = dict.fromkeys(ids, {})
cr.execute('SELECT DISTINCT id, model, res_id, author_id, parent_id FROM "%s" WHERE id = ANY (%%s)' % self._table, (ids,))
for id, rmod, rid, author_id, parent_id in cr.fetchall():
message_values[id] = {'model': rmod, 'res_id': rid, 'author_id': author_id, 'parent_id': parent_id}
# Author condition (READ, WRITE, CREATE (private)) -> could become an ir.rule ?
author_ids = []
if operation == 'read' or operation == 'write':
author_ids = [mid for mid, message in message_values.iteritems()
if message.get('author_id') and message.get('author_id') == partner_id]
elif operation == 'create':
author_ids = [mid for mid, message in message_values.iteritems()
if not message.get('model') and not message.get('res_id')]
# Parent condition, for create (check for received notifications for the created message parent)
notified_ids = []
if operation == 'create':
parent_ids = [message.get('parent_id') for mid, message in message_values.iteritems()
if message.get('parent_id')]
not_ids = not_obj.search(cr, SUPERUSER_ID, [('message_id.id', 'in', parent_ids), ('partner_id', '=', partner_id)], context=context)
not_parent_ids = [notif.message_id.id for notif in not_obj.browse(cr, SUPERUSER_ID, not_ids, context=context)]
notified_ids += [mid for mid, message in message_values.iteritems()
if message.get('parent_id') in not_parent_ids]
# Notification condition, for read (check for received notifications and create (in message_follower_ids)) -> could become an ir.rule, but not till we do not have a many2one variable field
other_ids = set(ids).difference(set(author_ids), set(notified_ids))
model_record_ids = _generate_model_record_ids(message_values, other_ids)
if operation == 'read':
not_ids = not_obj.search(cr, SUPERUSER_ID, [
('partner_id', '=', partner_id),
('message_id', 'in', ids),
], context=context)
notified_ids = [notification.message_id.id for notification in not_obj.browse(cr, SUPERUSER_ID, not_ids, context=context)]
elif operation == 'create':
for doc_model, doc_ids in model_record_ids.items():
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [
('res_model', '=', doc_model),
('res_id', 'in', list(doc_ids)),
('partner_id', '=', partner_id),
], context=context)
fol_mids = [follower.res_id for follower in fol_obj.browse(cr, SUPERUSER_ID, fol_ids, context=context)]
notified_ids += [mid for mid, message in message_values.iteritems()
if message.get('model') == doc_model and message.get('res_id') in fol_mids]
# CRUD: Access rights related to the document
other_ids = other_ids.difference(set(notified_ids))
model_record_ids = _generate_model_record_ids(message_values, other_ids)
document_related_ids = []
for model, doc_ids in model_record_ids.items():
model_obj = self.pool[model]
mids = model_obj.exists(cr, uid, list(doc_ids))
if hasattr(model_obj, 'check_mail_message_access'):
model_obj.check_mail_message_access(cr, uid, mids, operation, context=context)
else:
self.pool['mail.thread'].check_mail_message_access(cr, uid, mids, operation, model_obj=model_obj, context=context)
document_related_ids += [mid for mid, message in message_values.iteritems()
if message.get('model') == model and message.get('res_id') in mids]
# Calculate remaining ids: if not void, raise an error
other_ids = other_ids.difference(set(document_related_ids))
if not other_ids:
return
raise orm.except_orm(_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
def _get_record_name(self, cr, uid, values, context=None):
""" Return the related document name, using name_get. It is done using
SUPERUSER_ID, to be sure to have the record name correctly stored. """
if not values.get('model') or not values.get('res_id') or values['model'] not in self.pool:
return False
return self.pool[values['model']].name_get(cr, SUPERUSER_ID, [values['res_id']], context=context)[0][1]
def _get_reply_to(self, cr, uid, values, context=None):
""" Return a specific reply_to: alias of the document through message_get_reply_to
or take the email_from
"""
model, res_id, email_from = values.get('model'), values.get('res_id'), values.get('email_from')
ctx = dict(context, thread_model=model)
return self.pool['mail.thread'].message_get_reply_to(cr, uid, [res_id], default=email_from, context=ctx)[res_id]
def _get_message_id(self, cr, uid, values, context=None):
if values.get('same_thread', True) is False:
message_id = tools.generate_tracking_message_id('reply_to')
elif values.get('res_id') and values.get('model'):
message_id = tools.generate_tracking_message_id('%(res_id)s-%(model)s' % values)
else:
message_id = tools.generate_tracking_message_id('private')
return message_id
def create(self, cr, uid, values, context=None):
context = dict(context or {})
default_starred = context.pop('default_starred', False)
if 'email_from' not in values: # needed to compute reply_to
values['email_from'] = self._get_default_from(cr, uid, context=context)
if 'message_id' not in values:
values['message_id'] = self._get_message_id(cr, uid, values, context=context)
if 'reply_to' not in values:
values['reply_to'] = self._get_reply_to(cr, uid, values, context=context)
if 'record_name' not in values and 'default_record_name' not in context:
values['record_name'] = self._get_record_name(cr, uid, values, context=context)
newid = super(mail_message, self).create(cr, uid, values, context)
self._notify(cr, uid, newid, context=context,
force_send=context.get('mail_notify_force_send', True),
user_signature=context.get('mail_notify_user_signature', True))
# TDE FIXME: handle default_starred. Why not setting an inv on starred ?
# Because starred will call set_message_starred, that looks for notifications.
# When creating a new mail_message, it will create a notification to a message
# that does not exist, leading to an error (key not existing). Also this
# this means unread notifications will be created, yet we can not assure
# this is what we want.
if default_starred:
self.set_message_starred(cr, uid, [newid], True, context=context)
return newid
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
""" Override to explicitely call check_access_rule, that is not called
by the ORM. It instead directly fetches ir.rules and apply them. """
self.check_access_rule(cr, uid, ids, 'read', context=context)
res = super(mail_message, self).read(cr, uid, ids, fields=fields, context=context, load=load)
return res
def unlink(self, cr, uid, ids, context=None):
# cascade-delete attachments that are directly attached to the message (should only happen
# for mail.messages that act as parent for a standalone mail.mail record).
self.check_access_rule(cr, uid, ids, 'unlink', context=context)
attachments_to_delete = []
for message in self.browse(cr, uid, ids, context=context):
for attach in message.attachment_ids:
if attach.res_model == self._name and (attach.res_id == message.id or attach.res_id == 0):
attachments_to_delete.append(attach.id)
if attachments_to_delete:
self.pool.get('ir.attachment').unlink(cr, uid, attachments_to_delete, context=context)
return super(mail_message, self).unlink(cr, uid, ids, context=context)
#------------------------------------------------------
# Messaging API
#------------------------------------------------------
def _notify(self, cr, uid, newid, context=None, force_send=False, user_signature=True):
""" Add the related record followers to the destination partner_ids if is not a private message.
Call mail_notification.notify to manage the email sending
"""
notification_obj = self.pool.get('mail.notification')
message = self.browse(cr, uid, newid, context=context)
partners_to_notify = set([])
# all followers of the mail.message document have to be added as partners and notified if a subtype is defined (otherwise: log message)
if message.subtype_id and message.model and message.res_id:
fol_obj = self.pool.get("mail.followers")
# browse as SUPERUSER because rules could restrict the search results
fol_ids = fol_obj.search(
cr, SUPERUSER_ID, [
('res_model', '=', message.model),
('res_id', '=', message.res_id),
], context=context)
partners_to_notify |= set(
fo.partner_id.id for fo in fol_obj.browse(cr, SUPERUSER_ID, fol_ids, context=context)
if message.subtype_id.id in [st.id for st in fo.subtype_ids]
)
# remove me from notified partners, unless the message is written on my own wall
if message.subtype_id and message.author_id and message.model == "res.partner" and message.res_id == message.author_id.id:
partners_to_notify |= set([message.author_id.id])
elif message.author_id:
partners_to_notify -= set([message.author_id.id])
# all partner_ids of the mail.message have to be notified regardless of the above (even the author if explicitly added!)
if message.partner_ids:
partners_to_notify |= set([p.id for p in message.partner_ids])
# notify
notification_obj._notify(
cr, uid, newid, partners_to_notify=list(partners_to_notify), context=context,
force_send=force_send, user_signature=user_signature
)
message.refresh()
# An error appear when a user receive a notification without notifying
# the parent message -> add a read notification for the parent
if message.parent_id:
# all notified_partner_ids of the mail.message have to be notified for the parented messages
partners_to_parent_notify = set(message.notified_partner_ids).difference(message.parent_id.notified_partner_ids)
for partner in partners_to_parent_notify:
notification_obj.create(cr, uid, {
'message_id': message.parent_id.id,
'partner_id': partner.id,
'is_read': True,
}, context=context)
|
agpl-3.0
|
wlof/gameoflife
|
gameoflife/gamenumpy.py
|
1
|
6610
|
# -*- coding: utf-8 -*-
# This file is part of gameoflife.
# Copyright 2015, wlof.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module provides a class that implements the Game of Life using
the NumPy and SciPy libraries.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import numpy as np
from scipy.ndimage.filters import convolve
from gameoflife.gameoflife import GameOfLife, Fate
class BaseGameNumPy(GameOfLife):
"""Base class for both Numpy/SciPy implementations."""
# Weights used for the convolve operation
WEIGHTS = np.array([[1, 1, 1],
[1, 10, 1],
[1, 1, 1]])
def _init(self):
"""Initializes the internal structures used by the implementation."""
# Cells grid. Each item is either 0 for a dead cell or 1 for a live
# one.
self.cells = np.zeros((self.width, self.height), dtype=np.int8)
def populate_random(self, prob=0.5):
"""Populates the grid of cells at random, with specified
probability.
"""
rand = np.random.uniform(0.0, 1.0, (self.width, self.height))
self.cells = np.int8(rand <= prob)
class GameNumPy(BaseGameNumPy):
"""Full-featured NumPy/SciPy-based implementation of the Game of Life."""
def _init(self):
"""Initializes the internal structures used by the implementation."""
super(GameNumPy, self)._init()
# Fates grid. Each item contains the fate of the cell at the location
# for the next generation.
self.fates = np.zeros((self.width, self.height), dtype=np.int8)
self.fates.fill(Fate.StayDead)
# Ages grid. Each item is the number of generations the cell at the
# location has been in its current state (dead or alive).
self.ages = np.zeros((self.width, self.height), dtype=np.int64)
def populate_random(self, prob=0.5):
"""Populates the grid of cells at random, with specified
probability.
"""
super(GameNumPy, self).populate_random(prob)
self._compute_fates()
def _step(self):
"""Computes the next generation of cells based on the current one."""
self._apply_fates()
self._compute_fates()
def fate(self, row, col):
"""Returns the fate of the cell at the specified location."""
line = self.fates.take(row, axis=0, mode='wrap')
fate = line.take(col, mode='wrap')
return fate
def age(self, row, col):
"""Returns the age of a cell, i.e. how many generations it's been in
its current state (dead or alive).
"""
line = self.ages.take(row, axis=0, mode='wrap')
age = line.take(col, mode='wrap')
return age
def _compute_fates(self):
"""Computes the fate of all cells."""
# Compute the convolved matrix of neighbors
con = convolve(self.cells, self.WEIGHTS, mode='wrap')
# Here's the trick: we assigned 10 to the central element of the
# weights kernel. Therefore, currently dead cells will have a value
# of 0-8 in the convolved matrix, and currently live cells will have
# a value of 10-18 (depending on the number of neighbors).
# Reset the fates grid
self.fates.fill(Fate.StayDead)
# Dead cells with exactly 3 neighbors will be born
self.fates[con == 3] = Fate.Birth
# Live cells with less than 2 neighbors will die by isolation
self.fates[(con >= 10) & (con < 12)] = Fate.DeathByIsolation
# Live cells with 2 or 3 neighbors survive
self.fates[(con == 12) | (con == 13)] = Fate.Survive
# Live cells with more than 3 neighbors die by overcrowding
self.fates[con > 13] = Fate.DeathByOvercrowding
def _apply_fates(self):
"""Applies the fates to all cells."""
# The new cells grid has live cells for every "birth" or "survive"
# fates, and dead cells for everything else
new_cells = np.zeros((self.width, self.height), dtype=np.int8)
new_cells[(self.fates == Fate.Birth) |
(self.fates == Fate.Survive)] = 1
# Check which cells have changed (dead to live or vice-versa)
unchanged = new_cells == self.cells
changed = np.logical_not(unchanged)
# Unchanged cells grow one generation older, changed cells have their
# ages reset to zero
self.ages[unchanged] += 1
self.ages[changed] = 0
# Memorize the new cells grid
self.cells = new_cells
class GameNumPyLight(BaseGameNumPy):
"""Light version of the NumPy/SciPy-based implementation of the Game of
Life.
"""
def _step(self):
"""Computes the next generation of cells based on the current one."""
# Compute the convolved matrix of neighbors
con = convolve(self.cells, self.WEIGHTS, mode='wrap')
# The trick is the same as in the full-featured version, but we don't
# need to track fates, so we can simply set the new live cells to be:
# - currently dead cells with exactly 3 neighbors, and
# - currently live cells with 2 or 3 neighbors
self.cells.fill(0)
self.cells[(con == 3) | (con == 12) | (con == 13)] = 1
def fate(self, row, col):
"""Returns the fate of the cell at the specified location."""
# The light implementation does not know the fates, so it cheats by
# returning "survive" for all currently live cells and "stay dead" for
# all currently dead cells.
line = self.cells.take(row, axis=0, mode='wrap')
cell = line.take(col, mode='wrap')
return Fate.Survive if cell == 1 else Fate.StayDead
def age(self, row, col):
"""Returns the age of a cell, i.e. how many generations it's been in
its current state (dead or alive).
"""
# The light implementation does not know the ages, so it cheats and
# returns a constant value.
return 1000
|
mit
|
jimsimon/sky_engine
|
third_party/jinja2/__init__.py
|
238
|
2270
|
# -*- coding: utf-8 -*-
"""
jinja2
~~~~~~
Jinja2 is a template engine written in pure Python. It provides a
Django inspired non-XML syntax but supports inline expressions and
an optional sandboxed environment.
Nutshell
--------
Here a small example of a Jinja2 template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
__docformat__ = 'restructuredtext en'
__version__ = '2.7.1'
# high level interface
from jinja2.environment import Environment, Template
# loaders
from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
ModuleLoader
# bytecode caches
from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
MemcachedBytecodeCache
# undefined types
from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined
# exceptions
from jinja2.exceptions import TemplateError, UndefinedError, \
TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
TemplateAssertionError
# decorators and public utilities
from jinja2.filters import environmentfilter, contextfilter, \
evalcontextfilter
from jinja2.utils import Markup, escape, clear_caches, \
environmentfunction, evalcontextfunction, contextfunction, \
is_undefined
__all__ = [
'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
'evalcontextfilter', 'evalcontextfunction'
]
|
bsd-3-clause
|
grnet/snf-image-creator
|
image_creator/distro/linux.py
|
2
|
38531
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2018 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""This module hosts OS-specific code for Linux."""
import os
import os.path
import re
import tempfile
from collections import namedtuple
from functools import wraps
import yaml
import pkg_resources
from image_creator.util import FatalError
from image_creator.bootloader import vbr_bootinfo
from image_creator.distro.unix import Unix, sysprep, add_sysprep_param
X2GO_DESKTOPSESSIONS = {
'CINNAMON': 'cinnamon',
'KDE': 'startkde',
'GNOME': 'gnome-session',
'MATE': 'mate-session',
'XFCE': 'xfce4-session',
'LXDE': 'startlxde',
'TRINITY': 'starttrinity',
'UNITY': 'unity',
}
X2GO_EXECUTABLE = "x2goruncommand"
DISTRO_ORDER = {
"ubuntu": 80,
"linuxmint": 75,
"debian": 70,
"rhel": 60,
"fedora": 58,
"centos": 55,
"scientificlinux": 50,
"sles": 45,
"opensuse": 44,
"archlinux": 40,
"gentoo": 35,
"slackware": 30,
"oraclelinux": 28,
"mageia": 20,
"mandriva": 19,
"cirros": 15,
"pardus": 10
}
CLOUDINIT_FILE_PRIORITY = 60
GRUB1_CONFIG = ['/boot/grub/menu.lst']
GRUB2_CONFIG = ['/boot/grub/grub.cfg',
'/boot/grub2/grub.cfg']
def cloudinit(method):
"""Decorator that adds a check to run only on cloud-init enabled images"""
@wraps(method)
def inner(self):
if not self.cloud_init:
self.out.warn("Not a cloud-init enabled image")
return
return method(self)
return inner
class Linux(Unix):
"""OS class for Linux"""
@add_sysprep_param(
'bootmenu_timeout', 'posint', 10, "Boot menu timeout in seconds")
@add_sysprep_param(
'powerbtn_action', 'string', '/sbin/shutdown -h now',
"The action that should be executed if the power button is pressed")
@add_sysprep_param(
'default_user', 'string', 'user', "Name of default cloud-init user")
def __init__(self, image, **kwargs):
super(Linux, self).__init__(image, **kwargs)
self._uuid = dict()
self._persistent = re.compile('/dev/[hsv]d[a-z][1-9]*')
self.cloud_init = False
# As of 4.00, *SYSLINUX* will search for extlinux.conf then
# syslinux.cfg in each directory before falling back to the next
# directory
#
# http://repo.or.cz/syslinux.git/blob/syslinux-6.01: \
# /txt/syslinux.cfg.txt#l32
#
dirs = ('/boot/extlinux/', '/boot/syslinux/', '/boot/',
'/extlinux/', '/syslinux/', '/')
files = ('extlinux.conf', 'syslinux.cfg')
paths = ["%s%s" % (d, c) for d in dirs for c in files]
self.syslinux = namedtuple(
'syslinux', ['search_dirs', 'search_paths'])(dirs, paths)
def get_cloud_init_config_files(self):
"""Returns the cloud-init configuration files of the image"""
files = []
if self.image.g.is_file('/etc/cloud/cloud.cfg'):
files.append('/etc/cloud/cloud.cfg')
if self.image.g.is_dir('/etc/cloud/cloud.cfg.d'):
for c in self.image.g.readdir('/etc/cloud/cloud.cfg.d'):
if not (c['ftyp'] == 'r' and c['name'].endswith('.cfg')):
continue
files.append('/etc/cloud/cloud.cfg.d/%s' % c['name'])
files.sort()
return files
def cloud_init_config(self):
"""Returns a dictionary with the cloud-init configuration"""
cfg = {}
for c in self.get_cloud_init_config_files():
cfg.update(yaml.load(self.image.g.cat(c)))
return cfg
@sysprep('Removing user accounts with id greater that 1000', enabled=False)
def _remove_user_accounts(self):
"""Remove all user accounts with id greater than 1000"""
removed_users = {}
# Remove users from /etc/passwd
if self.image.g.is_file('/etc/passwd'):
passwd = []
metadata_users = self.meta['USERS'].split() \
if 'USERS' in self.meta else []
for line in self.image.g.cat('/etc/passwd').splitlines():
fields = line.split(':')
if int(fields[2]) > 1000:
removed_users[fields[0]] = fields
# remove it from the USERS metadata too
if fields[0] in metadata_users:
metadata_users.remove(fields[0])
else:
passwd.append(':'.join(fields))
self.meta['USERS'] = " ".join(metadata_users)
# Delete the USERS metadata if empty
if not self.meta['USERS']:
del self.meta['USERS']
self.image.g.write('/etc/passwd', '\n'.join(passwd) + '\n')
else:
self.out.warn("File: `/etc/passwd' is missing. "
"No users were deleted")
return
if self.image.g.is_file('/etc/shadow'):
# Remove the corresponding /etc/shadow entries
shadow = []
for line in self.image.g.cat('/etc/shadow').splitlines():
fields = line.split(':')
if fields[0] not in removed_users:
shadow.append(':'.join(fields))
self.image.g.write('/etc/shadow', "\n".join(shadow) + '\n')
else:
self.out.warn("File: `/etc/shadow' is missing.")
if self.image.g.is_file('/etc/group'):
# Remove the corresponding /etc/group entries
group = []
for line in self.image.g.cat('/etc/group').splitlines():
fields = line.split(':')
# Remove groups tha have the same name as the removed users
if fields[0] not in removed_users:
group.append(':'.join(fields))
self.image.g.write('/etc/group', '\n'.join(group) + '\n')
# Remove home directories
for home in [field[5] for field in removed_users.values()]:
if self.image.g.is_dir(home) and home.startswith('/home/'):
self.image.g.rm_rf(home)
@sysprep('Renaming default cloud-init user to "%(default_user)s"',
enabled=False)
@cloudinit
def _rename_default_cloud_init_user(self):
"""Rename the default cloud-init user"""
old_name = None
new_name = self.sysprep_params['default_user'].value
for f in self.get_cloud_init_config_files():
cfg = yaml.load(self.image.g.cat(f))
try:
old_name = cfg['system_info']['default_user']['name']
except KeyError:
continue
if old_name != new_name:
self.image.g.mv(f, f + ".bak")
cfg['system_info']['default_user']['name'] = new_name
self.image.g.write(f, yaml.dump(cfg, default_flow_style=False))
else:
self.out.warn(
'The default cloud-init user is already named: "%s"' %
new_name)
if old_name is None:
self.out.warn("No default cloud-init user was found!")
else:
self._collect_cloud_init_metadata()
@sysprep('Enable root account in cloud-init', enabled=False)
@cloudinit
def _enable_root_in_cloud_init(self):
"""Enable root in cloud-init"""
cfg = self.cloud_init_config()
new_cfg = {}
if 'disable_root' not in cfg or cfg['disable_root'] is True:
new_cfg['disable_root'] = False
enabled_users = cfg['users'] if 'users' in cfg else []
if 'root' not in enabled_users:
new_cfg['users'] = enabled_users + ['root']
if not new_cfg:
self.out.warn('Root is already enabled')
return
fname = "%d_snf-image-creator-EnableRoot.cfg" % CLOUDINIT_FILE_PRIORITY
if not self.image.g.is_dir('/etc/cloud/cloud.cfg.d'):
self.image.g.mkdir('/etc/cloud/cloud.cfg.d')
self.image.g.write('/etc/cloud/cloud.cfg.d/%s' % fname,
yaml.dump(new_cfg))
metadata_users = self.meta['USERS'].split() if 'USERS' in self.meta \
else []
metadata_users.insert(0, 'root')
self.meta['USERS'] = " ".join(metadata_users)
if not self.meta['USERS']:
del self.meta['USERS']
@sysprep('Cleaning up password & locking all user accounts')
def _cleanup_passwords(self):
"""Remove all passwords and lock all user accounts"""
shadow = []
for line in self.image.g.cat('/etc/shadow').splitlines():
fields = line.split(':')
if fields[1] not in ('*', '!'):
fields[1] = '!'
shadow.append(":".join(fields))
self.image.g.write('/etc/shadow', "\n".join(shadow) + '\n')
# Remove backup file for /etc/shadow
self.image.g.rm_rf('/etc/shadow-')
@sysprep('Fixing acpid powerdown action')
def _fix_acpid(self):
"""Replace acpid powerdown action scripts to immediately shutdown the
system without checking if a GUI is running.
"""
powerbtn_action = self.sysprep_params['powerbtn_action'].value
events_dir = '/etc/acpi/events'
if not self.image.g.is_dir(events_dir):
self.out.warn("No acpid event directory found")
return
event_exp = re.compile('event=(.+)', re.I)
action_exp = re.compile('action=(.+)', re.I)
for events_file in self.image.g.readdir(events_dir):
if events_file['ftyp'] != 'r':
continue
event = -1
action = -1
fullpath = "%s/%s" % (events_dir, events_file['name'])
content = self.image.g.cat(fullpath).splitlines()
for i in xrange(len(content)):
if event_exp.match(content[i]):
event = i
elif action_exp.match(content[i]):
action = i
if event == -1:
continue
if action == -1:
self.out.warn("Corrupted acpid event file: `%s'" % fullpath)
continue
entry = content[event].split('=')[1].strip()
if entry in ("button[ /]power", "button/power.*"):
content[action] = "action=%s" % powerbtn_action
self.image.g.write(fullpath, "\n".join(content) +
'\n\n### Edited by snf-image-creator ###\n')
return
elif entry == ".*":
self.out.warn("Found action `.*'. Don't know how to handle "
"this. Please edit `%s' image file manually to "
"make the system immediately shutdown when an "
"power button ACPI event occurs." %
content[action].split('=')[1].strip())
return
self.out.warn("No acpi power button event found!")
@sysprep('Removing persistent network interface names')
def _remove_persistent_net_rules(self):
"""Remove udev rules that will keep network interface names persistent
after hardware changes and reboots. Those rules will be created again
the next time the image runs.
"""
rule_file = '/etc/udev/rules.d/70-persistent-net.rules'
if self.image.g.is_file(rule_file):
self.image.g.rm(rule_file)
@sysprep('Removing swap entry from fstab')
def _remove_swap_entry(self):
"""Remove swap entry from /etc/fstab. If swap is the last partition
then the partition will be removed when shrinking is performed. If the
swap partition is not the last partition in the disk or if you are not
going to shrink the image you should probably disable this.
"""
if not self.image.g.is_file('/etc/fstab'):
self.out.warn("File: `/etc/fstab' is missing. No entry removed!")
return
new_fstab = ""
fstab = self.image.g.cat('/etc/fstab')
for line in fstab.splitlines():
entry = line.split('#')[0].strip().split()
if len(entry) == 6 and entry[2] == 'swap':
continue
new_fstab += "%s\n" % line
self.image.g.write('/etc/fstab', new_fstab)
@sysprep('Change boot menu timeout to %(bootmenu_timeout)s seconds')
def _change_bootmenu_timeout(self):
"""Change the boot menu timeout to the one specified by the namesake
system preparation parameter.
"""
timeout = self.sysprep_params['bootmenu_timeout'].value
if self.image.g.is_file('/etc/default/grub'):
self.image.g.aug_init('/', 0)
try:
self.image.g.aug_set('/files/etc/default/grub/GRUB_TIMEOUT',
str(timeout))
finally:
self.image.g.aug_save()
self.image.g.aug_close()
def replace_timeout(remote, regexp, timeout):
"""Replace the timeout value from a config file"""
tmpfd, tmp = tempfile.mkstemp()
try:
for line in self.image.g.cat(remote).splitlines():
if regexp.match(line):
line = re.sub(r'\d+', str(timeout), line)
os.write(tmpfd, line + '\n')
os.close(tmpfd)
tmpfd = None
self.image.g.upload(tmp, remote)
finally:
if tmpfd is not None:
os.close(tmpfd)
os.unlink(tmp)
grub1_regexp = re.compile(r'^\s*timeout\s+\d+\s*$')
grub2_regexp = re.compile(r'^\s*set\s+timeout=\d+\s*$')
for path in GRUB1_CONFIG:
if self.image.g.is_file(path):
replace_timeout(path, grub1_regexp, timeout)
for path in GRUB2_CONFIG:
if self.image.g.is_file(path):
replace_timeout(path, grub2_regexp, timeout)
regexp = re.compile(r'^\s*TIMEOUT\s+\d+\s*$', re.IGNORECASE)
for syslinux_config in self.syslinux.search_paths:
if self.image.g.is_file(syslinux_config):
# In syslinux the timeout unit is 0.1 seconds
replace_timeout(syslinux_config, regexp, timeout * 10)
@sysprep('Replacing fstab & grub non-persistent device references')
def _use_persistent_block_device_names(self):
"""Scan fstab & grub configuration files and replace all non-persistent
device references with UUIDs.
"""
if not self.image.g.is_file('/etc/fstab'):
self.out.warn("Omitted! File: `/etc/fstab' does not exist")
# convert all devices in fstab to persistent
persistent_root = self._persistent_fstab()
# convert root device in grub1 to persistent
self._persistent_grub1(persistent_root)
# convert root device in syslinux to persistent
self._persistent_syslinux(persistent_root)
@sysprep('Disabling IPv6 privacy extensions',
display='Disable IPv6 privacy enxtensions')
def _disable_ipv6_privacy_extensions(self):
"""Disable IPv6 privacy extensions."""
file_path = '/files/etc/sysctl.conf/net.ipv6.conf.%s.use_tempaddr'
dir_path = '/files/etc/sysctl.d/*/net.ipv6.conf.%s.use_tempaddr'
self.image.g.aug_init('/', 0)
try:
default = self.image.g.aug_match(file_path % 'default') + \
self.image.g.aug_match(dir_path % 'default')
all = self.image.g.aug_match(file_path % 'all') + \
self.image.g.aug_match(dir_path % 'all')
if not default:
self.image.g.aug_set(file_path % 'default', '0')
else:
for token in default:
self.image.g.aug_set(token, '0')
if not all:
self.image.g.aug_set(file_path % 'all', '0')
else:
for token in all:
self.image.g.aug_set(token, '0')
finally:
self.image.g.aug_save()
self.image.g.aug_close()
@sysprep('Disabling predictable network interface naming')
def _disable_predictable_network_interface_naming(self):
"""Disable predictable network interface naming"""
# Predictable Network Interface Names are explained here:
#
# https://www.freedesktop.org/wiki/Software/systemd/
# PredictableNetworkInterfaceNames/
# Creating a link to disable them:
# ln -s /dev/null /etc/systemd/network/99-default.link
# is not enough. We would also need to recreate the initramfs. Passing
# net.ifnames=0 on the kernel command line seems easier.
ifnames = re.compile(r'\s+net\.ifnames=\d+\b')
def repl(match):
"""Append net.ifnames=0"""
if ifnames.search(match.group(1)):
return ifnames.sub(' net.ifnames=0', match.group(1))
else:
return "%s %s" % (match.group(1), 'net.ifnames=0')
self._replace_kernel_params(repl)
if self.image.g.is_file('/etc/default/grub'):
self.image.g.aug_init('/', 0)
path = '/files/etc/default/grub/GRUB_CMDLINE_LINUX'
path_default = path + '_DEFAULT'
try:
cmdline = ""
cmdline_default = ""
if self.image.g.aug_match(path):
cmdline = self.image.g.aug_get(path)
if ifnames.search(cmdline):
self.image.g.aug_set(
path, ifnames.sub(' net.ifnames=0', cmdline))
if self.image.g.aug_match(path_default):
cmdline_default = self.image.g.aug_get(path_default)
if ifnames.search(cmdline_default):
self.image.g.aug_set(
path_default,
ifnames.sub(' net.ifnames=0', cmdline_default))
if not (ifnames.search(cmdline) or
ifnames.search(cmdline_default)):
# This looks a little bit weird but its a good way to
# append text to a variable without messing up with the
# quoting. The variable could have a value foo or 'foo' or
# "foo". Appending ' bar' will lead to a valid result.
self.image.g.aug_set(path, "%s' %s'" % (cmdline.strip(),
'net.ifnames=0'))
finally:
self.image.g.aug_save()
self.image.g.aug_close()
@sysprep('Disable serial console')
def _disable_serial_console(self):
"""Disable outputting to the serial console"""
regexp = re.compile(
r'(\s+(console=tty[SU]|earlyprintk=(serial|ttyS))[^\s"'"'"']*)+')
def repl(match):
"""Remove console options containing serial ports"""
return regexp.sub(" ", match.group(1))
self._replace_kernel_params(repl)
if self.image.g.is_file('/etc/default/grub'):
self.image.g.aug_init('/', 0)
try:
for var in ('', '_DEFAULT'):
path = '/files/etc/default/grub/GRUB_CMDLINE_LINUX' + var
if self.image.g.aug_match(path):
cmdline = self.image.g.aug_get(path)
self.image.g.aug_set(path, regexp.sub(" ", cmdline))
finally:
self.image.g.aug_save()
self.image.g.aug_close()
@sysprep('Clearing local machine ID configuration file',
display='Clear local machine ID configuration file')
def _clear_local_machine_id_configuration_file(self):
"""Clear the /etc/machine-id file if present. This file is used by
systemd to uniquely identify systems and will be automatically
populated on the next boot if empty."""
if self.image.g.is_file('/etc/machine-id'):
self.image.g.truncate('/etc/machine-id')
if self.image.g.is_file('/var/lib/dbus/machine-id'):
self.image.g.truncate('/var/lib/dbus/machine-id')
@sysprep('Removing NetworkManager system connections',
display='Remove NetworkManager system connections')
def _remove_networkmanager_system_connections(self):
"""Remove files under /etc/NetworkManager/system-connections. Those
files may cause NetworkManager to misbehave."""
connections = '/etc/NetworkManager/system-connections'
if not self.image.g.is_dir(connections):
return
cnt = [0]
def count(f):
cnt[0] += 1
self._foreach_file(connections, count, ftype='r', maxdepth=1)
self._foreach_file(connections, self.image.g.rm, ftype='r', maxdepth=1)
if cnt:
self.out.success("removed %d connections" % cnt[0])
@sysprep('Shrinking image (may take a while)', nomount=True)
def _shrink(self):
"""Shrink the last file system and update the partition table"""
device = self.image.shrink()
self.shrinked = True
if not device:
# Shrinking failed. No need to proceed.
return
# Check the Volume Boot Record of the shrinked partition to determine
# if a bootloader is present on it.
vbr = self.image.g.pread_device(device, 512, 0)
bootloader = vbr_bootinfo(vbr)
if bootloader == 'syslinux':
# EXTLINUX needs to be reinstalled after shrinking
with self.mount(silent=True):
basedir = self._get_syslinux_base_dir()
self.out.info("Updating the EXTLINUX installation under %s ..."
% basedir, False)
self.image.g.command(['extlinux', '-U', basedir])
self.out.success("done")
def _get_syslinux_base_dir(self):
"""Find the installation directory we need to use to when updating
syslinux
"""
cfg = None
for path in self.syslinux.search_paths:
if self.image.g.is_file(path):
cfg = path
break
if not cfg:
# Maybe we should fail here
self.out.warn("Unable to find syslinux configuration file!")
return "/boot"
kernel_regexp = re.compile(r'\s*kernel\s+(.+)', re.IGNORECASE)
initrd_regexp = re.compile(r'\s*initrd\s+(.+)', re.IGNORECASE)
append_regexp = re.compile(
r'\s*[Aa][Pp][Pp][Ee][Nn][Dd]\s+.*\binitrd=([^\s]+)')
kernel = None
initrd = None
for line in self.image.g.cat(cfg).splitlines():
kernel_match = kernel_regexp.match(line)
if kernel_match:
kernel = kernel_match.group(1).strip()
continue
initrd_match = initrd_regexp.match(line)
if initrd_match:
initrd = initrd_match.group(1).strip()
continue
append_match = append_regexp.match(line)
if append_match:
initrd = append_match.group(1)
if kernel and kernel[0] != '/':
relative_path = kernel
elif initrd and initrd[0] != '/':
relative_path = initrd
else:
# The config does not contain relative paths. Use the directory of
# the config.
return os.path.dirname(cfg)
for d in self.syslinux.search_dirs:
if self.image.g.is_file(d+relative_path):
return d
raise FatalError("Unable to find the working directory of extlinux")
def _persistent_grub1(self, new_root):
"""Replaces non-persistent device name occurrences with persistent
ones in GRUB1 configuration files.
"""
if self.image.g.is_file('/boot/grub/menu.lst'):
grub1 = '/boot/grub/menu.lst'
elif self.image.g.is_file('/etc/grub.conf'):
grub1 = '/etc/grub.conf'
else:
return
self.image.g.aug_init('/', 0)
try:
roots = self.image.g.aug_match(
'/files%s/title[*]/kernel/root' % grub1)
for root in roots:
dev = self.image.g.aug_get(root)
if not self._is_persistent(dev):
# This is not always correct. Grub may contain root entries
# for other systems, but we only support 1 OS per hard
# disk, so this shouldn't harm.
self.image.g.aug_set(root, new_root)
finally:
self.image.g.aug_save()
self.image.g.aug_close()
def _persistent_syslinux(self, new_root):
"""Replace non-persistent root device name occurrences with persistent
ones in the syslinux configuration files.
"""
append_regexp = re.compile(
r'\s*APPEND\s+.*\broot=/dev/[hsv]d[a-z][1-9]*\b', re.IGNORECASE)
for config in self.syslinux.search_paths:
if not self.image.g.is_file(config):
continue
# There is no augeas lense for syslinux :-(
tmpfd, tmp = tempfile.mkstemp()
try:
for line in self.image.g.cat(config).splitlines():
if append_regexp.match(line):
line = re.sub(r'\broot=/dev/[hsv]d[a-z][1-9]*\b',
'root=%s' % new_root, line)
os.write(tmpfd, line + '\n')
os.close(tmpfd)
tmpfd = None
self.image.g.upload(tmp, config)
finally:
if tmpfd is not None:
os.close(tmpfd)
os.unlink(tmp)
def _persistent_fstab(self):
"""Replaces non-persistent device name occurrences in /etc/fstab with
persistent ones.
"""
mpoints = self.image.g.mountpoints()
if not mpoints:
pass # TODO: error handling
device_dict = dict([[mpoint, dev] for dev, mpoint in mpoints])
root_dev = None
new_fstab = ""
fstab = self.image.g.cat('/etc/fstab')
for line in fstab.splitlines():
line, dev, mpoint = self._convert_fstab_line(line, device_dict)
new_fstab += "%s\n" % line
if mpoint == '/':
root_dev = dev
self.image.g.write('/etc/fstab', new_fstab)
if root_dev is None:
pass # TODO: error handling
return root_dev
def _convert_fstab_line(self, line, devices):
"""Replace non-persistent device names in an fstab line to their UUID
equivalent
"""
orig = line
line = line.split('#')[0].strip()
if not line:
return orig, "", ""
entry = line.split()
if len(entry) != 6:
self.out.warn("Detected abnormal entry in fstab")
return orig, "", ""
dev = entry[0]
mpoint = entry[1]
if not self._is_persistent(dev):
if mpoint in devices:
dev = "UUID=%s" % self._get_uuid(devices[mpoint])
entry[0] = dev
else:
# comment out the entry
entry[0] = "#%s" % dev
return " ".join(entry), dev, mpoint
return orig, dev, mpoint
def _do_inspect(self):
"""Run various diagnostics to check if media is supported"""
self.out.info(
'Checking if the media contains logical volumes (LVM)...', False)
has_lvm = True if self.image.g.lvs() else False
if has_lvm:
self.out.info()
self.image.set_unsupported('The media contains logical volumes')
else:
self.out.success('no')
def _collect_cloud_init_metadata(self):
"""Collect metadata regarding cloud-init"""
def warn(msg):
self.out.warn("Cloud-init: " + msg)
self.meta['CLOUD_INIT'] = 'yes'
cfg = self.cloud_init_config()
try:
default_user = cfg['system_info']['default_user']['name']
except KeyError:
default_user = None
warn("No default user defined")
users = []
if 'users' in cfg:
for u in cfg['users']:
if isinstance(u, (str, unicode)):
if u == 'default':
if default_user:
users.append(default_user)
else:
warn("Ignoring undefined default user")
else:
users.append(u)
elif isinstance(u, dict):
if 'snapuser' in u:
warn("Ignoring snapuser: %s" % u['snapuser'])
elif 'inactive' in u and u['inactive'] is True:
try:
warn("Ignoring inactive user: %s" % u['name'])
except KeyError:
pass
elif 'system' in u and u['system'] is True:
try:
warn("Ignoring system user: %s" % u['name'])
except KeyError:
pass
if users:
self.meta['USERS'] = " ".join(users)
if default_user:
self.meta['CLOUD_INIT_DEFAULT_USER'] = default_user
def _do_collect_metadata(self):
"""Collect metadata about the OS"""
super(Linux, self)._do_collect_metadata()
users = self._get_passworded_users()
self.meta["USERS"] = " ".join(users)
# Delete the USERS metadata if empty
if not self.meta['USERS']:
self.out.warn("No passworded users found!")
del self.meta['USERS']
kernels = []
for f in self.image.g.ls('/boot'):
if f.startswith('config-'):
kernels.append(f[7:])
if kernels:
kernels.sort(key=pkg_resources.parse_version)
self.meta['KERNEL'] = kernels[-1]
distro = self.image.g.inspect_get_distro(self.root)
major = self.image.g.inspect_get_major_version(self.root)
if major > 99:
major = 99
minor = self.image.g.inspect_get_minor_version(self.root)
if minor > 99:
minor = 99
try:
self.meta['SORTORDER'] += \
10000 * DISTRO_ORDER[distro] + 100 * major + minor
except KeyError:
pass
if self.is_enabled('sshd'):
ssh = []
opts = self.ssh_connection_options(users)
for user in opts['users']:
ssh.append("ssh:port=%d,user=%s" % (opts['port'], user))
if 'REMOTE_CONNECTION' not in self.meta:
self.meta['REMOTE_CONNECTION'] = ""
else:
self.meta['REMOTE_CONNECTION'] += " "
if ssh:
self.meta['REMOTE_CONNECTION'] += " ".join(ssh)
else:
self.meta['REMOTE_CONNECTION'] += "ssh:port=%d" % opts['port']
# Check if x2go is installed
x2go_installed = False
desktops = set()
for path in ('/bin', '/usr/bin', '/usr/local/bin'):
if self.image.g.is_file("%s/%s" % (path, X2GO_EXECUTABLE)):
x2go_installed = True
for name, exe in X2GO_DESKTOPSESSIONS.items():
if self.image.g.is_file("%s/%s" % (path, exe)):
desktops.add(name)
if x2go_installed:
self.meta['REMOTE_CONNECTION'] += " "
if not desktops:
self.meta['REMOTE_CONNECTION'] += "x2go"
else:
self.meta['REMOTE_CONNECTION'] += \
" ".join(["x2go:session=%s" % d for d in desktops])
else:
self.out.warn("OpenSSH Daemon is not configured to run on boot")
# Check if NetworkManager is enabled
if self.is_enabled('NetworkManager'):
self.meta['NM_NETWORKING'] = "yes"
if self.is_enabled('cloud-init'):
self.cloud_init = True
else:
# Many OSes use a systemd generator for cloud-init:
#
# When booting under systemd, a generator will run that determines
# if cloud-init.target should be included in the boot goals. By
# default, this generator will enable cloud-init. It will not
# enable cloud-init if either:
#
# * A file exists: /etc/cloud/cloud-init.disabled
# * The kernel command line as found in /proc/cmdline contains
# cloud-init=disabled. When running in a container, the kernel
# command line is not honored, but cloud-init will read an
# environment variable named KERNEL_CMDLINE in its place.
#
# http://cloudinit.readthedocs.io/en/latest/topics/boot.html
generator_found = False
for i in ("/run", "/etc", "/usr/local/lib", "/usr/lib"):
if self.image.g.is_file("%s/systemd/system-generators/"
"cloud-init-generator" % i):
generator_found = True
break
if generator_found:
self.cloud_init = \
not self.image.g.is_file("/etc/cloud/cloud-init.disabled")
if self.cloud_init:
self._collect_cloud_init_metadata()
def is_enabled(self, service):
"""Check if a service is enabled to run on boot"""
systemd_services = '/etc/systemd/system/multi-user.target.wants'
exec_start = re.compile(r'^\s*ExecStart=.+bin/%s\s?' % service)
if self.image.g.is_dir(systemd_services):
for entry in self.image.g.readdir(systemd_services):
if entry['ftyp'] not in ('l', 'f'):
continue
service_file = "%s/%s" % (systemd_services, entry['name'])
# Could be a broken link
if self.image.g.is_file(service_file, followsymlinks=True):
for line in self.image.g.cat(service_file).splitlines():
if exec_start.search(line):
return True
else:
self.out.warn("Unable to open file: %s" % service_file)
found = set()
def check_file(path):
regexp = re.compile(r"[/=\s'\"]%s('\")?\s" % service)
for line in self.image.g.cat(path).splitlines():
line = line.split('#', 1)[0].strip()
if not line:
continue
if regexp.search(line):
found.add(path)
return
# Check upstart config files under /etc/init
# Only examine *.conf files
if self.image.g.is_dir('/etc/init'):
self._foreach_file('/etc/init', check_file, maxdepth=1,
include=r'.+\.conf$')
if found:
return True
# Check scripts under /etc/rc[1-5].d/ and /etc/rc.d/rc[1-5].d/
for conf in ["/etc/%src%d.d" % (d, i) for i in xrange(1, 6)
for d in ('', 'rc.d/')]:
try:
for entry in self.image.g.readdir(conf):
if entry['ftyp'] not in ('l', 'f'):
continue
check_file("%s/%s" % (conf, entry['name']))
if found:
return True
except RuntimeError:
continue
return False
def _get_passworded_users(self):
"""Returns a list of non-locked user accounts"""
if not self.image.g.is_file('/etc/shadow'):
self.out.warn(
"Unable to collect user info. File: `/etc/shadow' is missing!")
return []
users = []
regexp = re.compile(r'(\S+):((?:!\S+)|(?:[^!*]\S+)|):(?:\S*:){6}')
for line in self.image.g.cat('/etc/shadow').splitlines():
match = regexp.match(line)
if not match:
continue
user, passwd = match.groups()
if passwd and passwd[0] == '!':
self.out.warn("Ignoring locked %s account." % user)
else:
users.append(user)
return users
def _is_persistent(self, dev):
"""Checks if a device name is persistent."""
return not self._persistent.match(dev)
def _get_uuid(self, dev):
"""Returns the UUID corresponding to a device"""
if dev in self._uuid:
return self._uuid[dev]
uuid = self.image.g.vfs_uuid(dev)
assert uuid
self._uuid[dev] = uuid
return uuid
def _replace_kernel_params(self, repl):
"""Change the kernel parameters passed by the boot loader"""
for path in GRUB2_CONFIG:
if self.image.g.is_file(path):
cfg = re.sub(r'^(\s*linux(?:16)?\s+.*)', repl,
self.image.g.cat(path),
flags=re.MULTILINE)
self.image.g.write(path, cfg)
for path in self.syslinux.search_paths:
if self.image.g.is_file(path):
cfg = re.sub(r'^(\s*append\s+.*)', repl,
self.image.g.cat(path), flags=re.MULTILINE)
self.image.g.write(path, cfg)
# vim: set sta sts=4 shiftwidth=4 sw=4 et ai :
|
gpl-3.0
|
rocky/python-xdis
|
xdis/opcodes/opcode_27pypy.py
|
1
|
2029
|
# (C) Copyright 2017, 2020 by Rocky Bernstein
"""
PYPY 2.7 opcodes
This is a like Python 2.7's opcode.py with some classification
of stack usage.
"""
import xdis.opcodes.opcode_27 as opcode_27
from xdis.opcodes.base import (
def_op,
extended_format_ATTR,
extended_format_CALL_FUNCTION,
extended_format_MAKE_FUNCTION_older,
extended_format_RAISE_VARARGS_older,
extended_format_RETURN_VALUE,
finalize_opcodes,
format_CALL_FUNCTION_pos_name_encoded,
format_MAKE_FUNCTION_default_argc,
format_RAISE_VARARGS_older,
format_extended_arg,
init_opdata,
jrel_op,
name_op,
nargs_op,
update_pj3,
)
version = 2.7
python_implementation = "PyPy"
l = locals()
init_opdata(l, opcode_27, version, is_pypy=True)
# FIXME: DRY common PYPY opcode additions
# PyPy only
# ----------
name_op(l, "LOOKUP_METHOD", 201, 1, 2)
nargs_op(l, "CALL_METHOD", 202, -1, 1)
l["hasnargs"].append(202)
# Used only in single-mode compilation list-comprehension generators
def_op(l, "BUILD_LIST_FROM_ARG", 203)
# Used only in assert statements
jrel_op(l, "JUMP_IF_NOT_DEBUG", 204, conditional=True)
# PyPy 2.7.13 (and 3.6.1) start to introduce LOAD_REVDB_VAR
import sys
if sys.version_info[:3] >= (2, 7, 13) and sys.version_info[4] >= 42:
def_op(l, "LOAD_REVDB_VAR", 205)
# There are no opcodes to remove or change.
# If there were, they'd be listed below.
# FIXME remove (fix uncompyle6)
update_pj3(globals(), l)
opcode_arg_fmt = {
"MAKE_FUNCTION": format_MAKE_FUNCTION_default_argc,
"EXTENDED_ARG": format_extended_arg,
"CALL_FUNCTION": format_CALL_FUNCTION_pos_name_encoded,
"RAISE_VARARGS": format_RAISE_VARARGS_older,
}
finalize_opcodes(l)
opcode_extended_fmt = {
"CALL_FUNCTION": extended_format_CALL_FUNCTION,
"LOAD_ATTR": extended_format_ATTR,
"MAKE_FUNCTION": extended_format_MAKE_FUNCTION_older,
"RAISE_VARARGS": extended_format_RAISE_VARARGS_older,
"RETURN_VALUE": extended_format_RETURN_VALUE,
"STORE_ATTR": extended_format_ATTR,
}
|
gpl-2.0
|
alrusdi/lettuce
|
tests/integration/lib/Django-1.2.5/django/db/backends/oracle/base.py
|
44
|
30653
|
"""
Oracle database backend for Django.
Requires cx_Oracle: http://cx-oracle.sourceforge.net/
"""
import datetime
import sys
import time
from decimal import Decimal
def _setup_environment(environ):
import platform
# Cygwin requires some special voodoo to set the environment variables
# properly so that Oracle will see them.
if platform.system().upper().startswith('CYGWIN'):
try:
import ctypes
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading ctypes: %s; "
"the Oracle backend requires ctypes to "
"operate correctly under Cygwin." % e)
kernel32 = ctypes.CDLL('kernel32')
for name, value in environ:
kernel32.SetEnvironmentVariableA(name, value)
else:
import os
os.environ.update(environ)
_setup_environment([
# Oracle takes client-side character set encoding from the environment.
('NLS_LANG', '.UTF8'),
# This prevents unicode from getting mangled by getting encoded into the
# potentially non-unicode database character set.
('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
])
try:
import cx_Oracle as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.oracle.client import DatabaseClient
from django.db.backends.oracle.creation import DatabaseCreation
from django.db.backends.oracle.introspection import DatabaseIntrospection
from django.utils.encoding import smart_str, force_unicode
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# Check whether cx_Oracle was compiled with the WITH_UNICODE option. This will
# also be True in Python 3.0.
if int(Database.version.split('.', 1)[0]) >= 5 and not hasattr(Database, 'UNICODE'):
convert_unicode = force_unicode
else:
convert_unicode = smart_str
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
needs_datetime_string_cast = False
interprets_empty_strings_as_nulls = True
uses_savepoints = True
can_return_id_from_insert = True
allow_sliced_subqueries = False
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.oracle.compiler"
def autoinc_sql(self, table, column):
# To simulate auto-incrementing primary keys in Oracle, we have to
# create a sequence and a trigger.
sq_name = get_sequence_name(table)
tr_name = get_trigger_name(table)
tbl_name = self.quote_name(table)
col_name = self.quote_name(column)
sequence_sql = """
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(*) INTO i FROM USER_CATALOG
WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE';
IF i = 0 THEN
EXECUTE IMMEDIATE 'CREATE SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % locals()
trigger_sql = """
CREATE OR REPLACE TRIGGER "%(tr_name)s"
BEFORE INSERT ON %(tbl_name)s
FOR EACH ROW
WHEN (new.%(col_name)s IS NULL)
BEGIN
SELECT "%(sq_name)s".nextval
INTO :new.%(col_name)s FROM dual;
END;
/""" % locals()
return sequence_sql, trigger_sql
def date_extract_sql(self, lookup_type, field_name):
# http://download-east.oracle.com/docs/cd/B10501_01/server.920/a96540/functions42a.htm#1017163
if lookup_type == 'week_day':
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
return "TO_CHAR(%s, 'D')" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
# Oracle uses TRUNC() for both dates and numbers.
# http://download-east.oracle.com/docs/cd/B10501_01/server.920/a96540/functions155a.htm#SQLRF06151
if lookup_type == 'day':
sql = 'TRUNC(%s)' % field_name
else:
sql = "TRUNC(%s, '%s')" % (field_name, lookup_type)
return sql
def convert_values(self, value, field):
if isinstance(value, Database.LOB):
value = value.read()
if field and field.get_internal_type() == 'TextField':
value = force_unicode(value)
# Oracle stores empty strings as null. We need to undo this in
# order to adhere to the Django convention of using the empty
# string instead of null, but only if the field accepts the
# empty string.
if value is None and field and field.empty_strings_allowed:
value = u''
# Convert 1 or 0 to True or False
elif value in (1, 0) and field and field.get_internal_type() in ('BooleanField', 'NullBooleanField'):
value = bool(value)
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
# Convert floats to decimals
elif value is not None and field and field.get_internal_type() == 'DecimalField':
value = util.typecast_decimal(field.format_number(value))
# cx_Oracle always returns datetime.datetime objects for
# DATE and TIMESTAMP columns, but Django wants to see a
# python datetime.date, .time, or .datetime. We use the type
# of the Field to determine which to cast to, but it's not
# always available.
# As a workaround, we cast to date if all the time-related
# values are 0, or to time if the date is 1/1/1900.
# This could be cleaned a bit by adding a method to the Field
# classes to normalize values from the database (the to_python
# method is used for validation and isn't what we want here).
elif isinstance(value, Database.Timestamp):
# In Python 2.3, the cx_Oracle driver returns its own
# Timestamp object that we must convert to a datetime class.
if not isinstance(value, datetime.datetime):
value = datetime.datetime(value.year, value.month,
value.day, value.hour, value.minute, value.second,
value.fsecond)
if field and field.get_internal_type() == 'DateTimeField':
pass
elif field and field.get_internal_type() == 'DateField':
value = value.date()
elif field and field.get_internal_type() == 'TimeField' or (value.year == 1900 and value.month == value.day == 1):
value = value.time()
elif value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
return value
def datetime_cast_sql(self):
return "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')"
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def drop_sequence_sql(self, table):
return "DROP SEQUENCE %s;" % self.quote_name(get_sequence_name(table))
def fetch_returned_insert_id(self, cursor):
return long(cursor._insert_id_var.getvalue())
def field_cast_sql(self, db_type):
if db_type and db_type.endswith('LOB'):
return "DBMS_LOB.SUBSTR(%s)"
else:
return "%s"
def last_insert_id(self, cursor, table_name, pk_name):
sq_name = get_sequence_name(table_name)
cursor.execute('SELECT "%s".currval FROM dual' % sq_name)
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_in_list_size(self):
return 1000
def max_name_length(self):
return 30
def prep_for_iexact_query(self, x):
return x
def process_clob(self, value):
if value is None:
return u''
return force_unicode(value.read())
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
# not quoted, Oracle has case-insensitive behavior for identifiers, but
# always defaults to uppercase.
# We simplify things by making Oracle identifiers always uppercase.
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % util.truncate_name(name.upper(),
self.max_name_length())
return name.upper()
def random_function_sql(self):
return "DBMS_RANDOM.RANDOM"
def regex_lookup_9(self, lookup_type):
raise NotImplementedError("Regexes are not supported in Oracle before version 10g.")
def regex_lookup_10(self, lookup_type):
if lookup_type == 'regex':
match_option = "'c'"
else:
match_option = "'i'"
return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
def regex_lookup(self, lookup_type):
# If regex_lookup is called before it's been initialized, then create
# a cursor to initialize it and recur.
from django.db import connection
connection.cursor()
return connection.ops.regex_lookup(lookup_type)
def return_insert_id(self):
return "RETURNING %s INTO %%s", (InsertIdVar(),)
def savepoint_create_sql(self, sid):
return convert_unicode("SAVEPOINT " + self.quote_name(sid))
def savepoint_rollback_sql(self, sid):
return convert_unicode("ROLLBACK TO SAVEPOINT " + self.quote_name(sid))
def sql_flush(self, style, tables, sequences):
# Return a list of 'TRUNCATE x;', 'TRUNCATE y;',
# 'TRUNCATE z;'... style SQL statements
if tables:
# Oracle does support TRUNCATE, but it seems to get us into
# FK referential trouble, whereas DELETE FROM table works.
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table)))
for table in tables]
# Since we've just deleted all the rows, running our sequence
# ALTER code will reset the sequence to 0.
for sequence_info in sequences:
sequence_name = get_sequence_name(sequence_info['table'])
table_name = self.quote_name(sequence_info['table'])
column_name = self.quote_name(sequence_info['column'] or 'id')
query = _get_sequence_reset_sql() % {'sequence': sequence_name,
'table': table_name,
'column': column_name}
sql.append(query)
return sql
else:
return []
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
query = _get_sequence_reset_sql()
for model in model_list:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
table_name = self.quote_name(model._meta.db_table)
sequence_name = get_sequence_name(model._meta.db_table)
column_name = self.quote_name(f.column)
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
# Only one AutoField is allowed per model, so don't
# continue to loop
break
for f in model._meta.many_to_many:
if not f.rel.through:
table_name = self.quote_name(f.m2m_db_table())
sequence_name = get_sequence_name(f.m2m_db_table())
column_name = self.quote_name('id')
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
return output
def start_transaction_sql(self):
return ''
def tablespace_sql(self, tablespace, inline=False):
return "%sTABLESPACE %s" % ((inline and "USING INDEX " or ""),
self.quote_name(tablespace))
def value_to_db_time(self, value):
if value is None:
return None
if isinstance(value, basestring):
return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))
return datetime.datetime(1900, 1, 1, value.hour, value.minute,
value.second, value.microsecond)
def year_lookup_bounds_for_date_field(self, value):
first = '%s-01-01'
second = '%s-12-31'
return [first % value, second % value]
def combine_expression(self, connector, sub_expressions):
"Oracle requires special cases for %% and & operators in query expressions"
if connector == '%%':
return 'MOD(%s)' % ','.join(sub_expressions)
elif connector == '&':
return 'BITAND(%s)' % ','.join(sub_expressions)
elif connector == '|':
raise NotImplementedError("Bit-wise or is not supported in Oracle.")
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
class _UninitializedOperatorsDescriptor(object):
def __get__(self, instance, owner):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__['operators']
class DatabaseWrapper(BaseDatabaseWrapper):
operators = _UninitializedOperatorsDescriptor()
_standard_operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
}
_likec_operators = _standard_operators.copy()
_likec_operators.update({
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
})
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.oracle_version = None
self.features = DatabaseFeatures()
self.ops = DatabaseOperations()
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _valid_connection(self):
return self.connection is not None
def _connect_string(self):
settings_dict = self.settings_dict
if not settings_dict['HOST'].strip():
settings_dict['HOST'] = 'localhost'
if settings_dict['PORT'].strip():
dsn = Database.makedsn(settings_dict['HOST'],
int(settings_dict['PORT']),
settings_dict['NAME'])
else:
dsn = settings_dict['NAME']
return "%s/%s@%s" % (settings_dict['USER'],
settings_dict['PASSWORD'], dsn)
def _cursor(self):
cursor = None
if not self._valid_connection():
conn_string = convert_unicode(self._connect_string())
self.connection = Database.connect(conn_string, **self.settings_dict['OPTIONS'])
cursor = FormatStylePlaceholderCursor(self.connection)
# Set oracle date to ansi date format. This only needs to execute
# once when we create a new connection. We also set the Territory
# to 'AMERICA' which forces Sunday to evaluate to a '1' in TO_CHAR().
cursor.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS' "
"NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF' "
"NLS_TERRITORY = 'AMERICA'")
if 'operators' not in self.__dict__:
# Ticket #14149: Check whether our LIKE implementation will
# work for this connection or we need to fall back on LIKEC.
# This check is performed only once per DatabaseWrapper
# instance per thread, since subsequent connections will use
# the same settings.
try:
cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s"
% self._standard_operators['contains'],
['X'])
except utils.DatabaseError:
self.operators = self._likec_operators
else:
self.operators = self._standard_operators
try:
self.oracle_version = int(self.connection.version.split('.')[0])
# There's no way for the DatabaseOperations class to know the
# currently active Oracle version, so we do some setups here.
# TODO: Multi-db support will need a better solution (a way to
# communicate the current version).
if self.oracle_version <= 9:
self.ops.regex_lookup = self.ops.regex_lookup_9
else:
self.ops.regex_lookup = self.ops.regex_lookup_10
except ValueError:
pass
try:
self.connection.stmtcachesize = 20
except:
# Django docs specify cx_Oracle version 4.3.1 or higher, but
# stmtcachesize is available only in 4.3.2 and up.
pass
connection_created.send(sender=self.__class__, connection=self)
if not cursor:
cursor = FormatStylePlaceholderCursor(self.connection)
return cursor
# Oracle doesn't support savepoint commits. Ignore them.
def _savepoint_commit(self, sid):
pass
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.IntegrityError, e:
# In case cx_Oracle implements (now or in a future version)
# raising this specific exception
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
# cx_Oracle 5.0.4 raises a cx_Oracle.DatabaseError exception
# with the following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# We convert that particular case to our IntegrityError exception
x = e.args[0]
if hasattr(x, 'code') and hasattr(x, 'message') \
and x.code == 2091 and 'ORA-02291' in x.message:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
class OracleParam(object):
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
if hasattr(param, 'bind_parameter'):
self.smart_str = param.bind_parameter(cursor)
else:
self.smart_str = convert_unicode(param, cursor.charset,
strings_only)
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif isinstance(param, basestring) and len(param) > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
else:
self.input_size = None
class VariableWrapper(object):
"""
An adapter class for cursor variables that prevents the wrapped object
from being converted into a string when used to instanciate an OracleParam.
This can be used generally for any other object that should be passed into
Cursor.execute as-is.
"""
def __init__(self, var):
self.var = var
def bind_parameter(self, cursor):
return self.var
def __getattr__(self, key):
return getattr(self.var, key)
def __setattr__(self, key, value):
if key == 'var':
self.__dict__[key] = value
else:
setattr(self.var, key, value)
class InsertIdVar(object):
"""
A late-binding cursor variable that can be passed to Cursor.execute
as a parameter, in order to receive the id of the row created by an
insert statement.
"""
def bind_parameter(self, cursor):
param = cursor.cursor.var(Database.NUMBER)
cursor._insert_id_var = param
return param
class FormatStylePlaceholderCursor(object):
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
We also do automatic conversion between Unicode on the Python side and
UTF-8 -- for talking to Oracle -- in here.
"""
charset = 'utf-8'
def __init__(self, connection):
self.cursor = connection.cursor()
# Necessary to retrieve decimal values without rounding error.
self.cursor.numbersAsStrings = True
# Default arraysize of 1 is highly sub-optimal.
self.cursor.arraysize = 100
def _format_params(self, params):
return tuple([OracleParam(p, self, True) for p in params])
def _guess_input_sizes(self, params_list):
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
self.setinputsizes(*sizes)
def _param_generator(self, params):
return [p.smart_str for p in params]
def execute(self, query, params=None):
if params is None:
params = []
else:
params = self._format_params(params)
args = [(':arg%d' % i) for i in range(len(params))]
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
query = convert_unicode(query % tuple(args), self.charset)
self._guess_input_sizes([params])
try:
return self.cursor.execute(query, self._param_generator(params))
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def executemany(self, query, params=None):
try:
args = [(':arg%d' % i) for i in range(len(params[0]))]
except (IndexError, TypeError):
# No params given, nothing to do
return None
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
query = convert_unicode(query % tuple(args), self.charset)
formatted = [self._format_params(i) for i in params]
self._guess_input_sizes(formatted)
try:
return self.cursor.executemany(query,
[self._param_generator(p) for p in formatted])
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def fetchone(self):
row = self.cursor.fetchone()
if row is None:
return row
return _rowfactory(row, self.cursor)
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
return tuple([_rowfactory(r, self.cursor)
for r in self.cursor.fetchmany(size)])
def fetchall(self):
return tuple([_rowfactory(r, self.cursor)
for r in self.cursor.fetchall()])
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return CursorIterator(self.cursor)
class CursorIterator(object):
"""Cursor iterator wrapper that invokes our custom row factory."""
def __init__(self, cursor):
self.cursor = cursor
self.iter = iter(cursor)
def __iter__(self):
return self
def next(self):
return _rowfactory(self.iter.next(), self.cursor)
def _rowfactory(row, cursor):
# Cast numeric values as the appropriate Python type based upon the
# cursor description, and convert strings to unicode.
casted = []
for value, desc in zip(row, cursor.description):
if value is not None and desc[1] is Database.NUMBER:
precision, scale = desc[4:6]
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point
# This will normally be an integer from a sequence,
# but it could be a decimal value.
if '.' in value:
value = Decimal(value)
else:
value = int(value)
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
value = float(value)
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntField and DecimalField columns.
if scale == 0:
value = int(value)
else:
value = Decimal(value)
elif '.' in value:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
value = Decimal(value)
else:
value = int(value)
elif desc[1] in (Database.STRING, Database.FIXED_CHAR,
Database.LONG_STRING):
value = to_unicode(value)
casted.append(value)
return tuple(casted)
def to_unicode(s):
"""
Convert strings to Unicode objects (and return all other data types
unchanged).
"""
if isinstance(s, basestring):
return force_unicode(s)
return s
def _get_sequence_reset_sql():
# TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
return """
DECLARE
table_value integer;
seq_value integer;
BEGIN
SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s;
SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences
WHERE sequence_name = '%(sequence)s';
WHILE table_value > seq_value LOOP
SELECT "%(sequence)s".nextval INTO seq_value FROM dual;
END LOOP;
END;
/"""
def get_sequence_name(table):
name_length = DatabaseOperations().max_name_length() - 3
return '%s_SQ' % util.truncate_name(table, name_length).upper()
def get_trigger_name(table):
name_length = DatabaseOperations().max_name_length() - 3
return '%s_TR' % util.truncate_name(table, name_length).upper()
|
gpl-3.0
|
fasaxc/felix
|
calico/felix/test/stub_ipsets.py
|
1
|
4497
|
# -*- coding: utf-8 -*-
# Copyright 2014 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.stub_ipsets
~~~~~~~~~~~~
Stub version of the ipsets module.
"""
import logging
import difflib
# Logger
log = logging.getLogger(__name__)
def reset():
ipset_state.reset()
class IpsetState(object):
def __init__(self):
self.ipsets = {}
def reset(self):
self.ipsets.clear()
def swap(self, name1, name2):
ipset1 = self.ipsets[name1]
ipset2 = self.ipsets[name2]
if ipset1.typename != ipset2.typename:
raise StubIpsetError(
"Cannot swap ipset %s of type %s with ipset %s of type %s" %
(name1, ipset1.typename, name2, ipset2.typename))
if ipset1.family != ipset2.family:
raise StubIpsetError(
"Cannot swap ipset %s of family %s with ipset %s of family %s" %
(name1, ipset1.family, name2, ipset2.family))
tmp = ipset1.entries
ipset1.entries = ipset2.entries
ipset2.entries = tmp
def flush(self, name):
self.ipsets[name].entries.clear()
def create(self, name, typename, family):
if name not in self.ipsets:
self.ipsets[name] = StubIpset(name, typename, family)
def destroy(self, name):
if name in self.ipsets:
del self.ipsets[name]
def add(self, name, value):
self.ipsets[name].entries.add(value)
def list_names(self):
return self.ipsets.keys()
class StubIpset(object):
def __init__(self, name, typename, family):
self.name = name
self.typename = typename
self.family = family
self.entries = set()
def __str__(self):
return("Name: %s\nType: %s (%s)\nMembers:\n%s\n" %
(self.name, self.typename, self.family,
"\n".join(sorted(self.entries))))
class StubIpsetError(Exception):
pass
class UnexpectedStateException(Exception):
def __init__(self, actual, expected):
super(UnexpectedStateException, self).__init__(
"ipsets state does not match")
self.diff = "\n".join(difflib.unified_diff(
expected.split("\n"),
actual.split("\n")))
self.actual = actual
self.expected = expected
def __str__(self):
return ("%s\nDIFF:\n%s\n\nACTUAL:\n%s\nEXPECTED\n%s" %
(self.message, self.diff, self.actual, self.expected))
def check_state(expected_ipsets):
"""
Checks that the current state matches the expected state. Throws an
exception if it does not. Note that we do not check the "tmp" ipsets.
That is because whether or not they are present is quite complicated,
and writing test code to duplicate the logic would be pointless, especially
since we only really care that the right used ipsets exist.
"""
actual = "\n".join([str(ipset_state.ipsets[name])
for name in sorted(ipset_state.ipsets.keys())
if "tmp" not in name])
expected = "\n".join([str(expected_ipsets.ipsets[name])
for name in sorted(expected_ipsets.ipsets.keys())
if "tmp" not in name])
if actual != expected:
raise UnexpectedStateException(actual, expected)
#*****************************************************************************#
#* Methods that match the real interface. *#
#*****************************************************************************#
def swap(name1, name2):
ipset_state.swap(name1, name2)
def flush(name):
ipset_state.flush(name)
def create(name, typename, family):
ipset_state.create(name, typename, family)
def destroy(name):
ipset_state.destroy(name)
def add(name, value):
ipset_state.add(name, value)
def list_names():
return ipset_state.list_names()
# One global variable - the existing state.
ipset_state = IpsetState()
|
apache-2.0
|
chipster/chipster
|
ext/applications/apache-activemq-5.10.0/examples/stomp/python/stompest/sync/publisher.py
|
9
|
1679
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import time
from stompest.config import StompConfig
from stompest.sync import Stomp
user = os.getenv('ACTIVEMQ_USER') or 'admin'
password = os.getenv('ACTIVEMQ_PASSWORD') or 'password'
host = os.getenv('ACTIVEMQ_HOST') or 'localhost'
port = int(os.getenv('ACTIVEMQ_PORT') or 61613)
destination = sys.argv[1:2] or ['/topic/event']
destination = destination[0]
messages = 10000
data = 'Hello World from Python'
config = StompConfig('tcp://%s:%d' % (host, port), login=user, passcode=password, version='1.1')
client = Stomp(config)
client.connect(host='mybroker')
count = 0
start = time.time()
for _ in xrange(messages):
client.send(destination=destination, body=data, headers={'persistent': 'false'})
count += 1
diff = time.time() - start
print 'Sent %s frames in %f seconds' % (count, diff)
client.disconnect(receipt='bye')
client.receiveFrame()
client.close()
|
mit
|
repotvsupertuga/tvsupertuga.repository
|
script.module.tulip/lib/tulip/url_dispatcher.py
|
1
|
4656
|
# -*- coding: utf-8 -*-
'''
Tulip routine libraries, based on lambda's lamlib
Url dispatcher module thanks to tknorris
Author Twilight0
License summary below, for more details please read license.txt file
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
class URL_Dispatcher:
def __init__(self):
self.func_registry = {}; self.args_registry = {}; self.kwargs_registry = {}
def register(self, action, args=None, kwargs=None):
"""
Decorator function to register a function as a plugin:// url endpoint
mode: the mode value passed in the plugin:// url
args: a list of strings that are the positional arguments to expect
kwargs: a list of strings that are the keyword arguments to expect
* Positional argument must be in the order the function expect
* kwargs can be in any order
* kwargs without positional arguments are supported by passing in a kwargs but no args
* If there are no arguments at all, just "action" can be specified
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
def decorator(f):
if action in self.func_registry:
message = 'Error: {0} already registered as {1}'.format(str(f), action)
raise Exception(message)
self.func_registry[action.strip()] = f
self.args_registry[action] = args
self.kwargs_registry[action] = kwargs
return f
return decorator
def dispatch(self, action, queries):
"""
Dispatch function to execute function registered for the provided mode
mode: the string that the function was associated with
queries: a dictionary of the parameters to be passed to the called function
"""
if action not in self.func_registry:
message = 'Error: Attempt to invoke unregistered mode |{0}|'.format(action)
raise Exception(message)
args = []
kwargs = {}
unused_args = queries.copy()
if self.args_registry[action]:
# positional arguments are all required
for arg in self.args_registry[action]:
arg = arg.strip()
if arg in queries:
args.append(self.__coerce(queries[arg]))
del unused_args[arg]
else:
message = 'Error: mode |{0}| requested argument |{1}| but it was not provided.'.format(action, arg)
raise Exception(message)
if self.kwargs_registry[action]:
# kwargs are optional
for arg in self.kwargs_registry[action]:
arg = arg.strip()
if arg in queries:
kwargs[arg] = self.__coerce(queries[arg])
del unused_args[arg]
if 'action' in unused_args:
del unused_args['action'] # delete action last in case it's used by the target function
if unused_args:
pass
self.func_registry[action](*args, **kwargs)
def showmodes(self):
from kodi_six import xbmc
for action in sorted(self.func_registry, key=lambda x: int(x)):
value = self.func_registry[action]
args = self.args_registry[action]
kwargs = self.kwargs_registry[action]
line = 'Action {0} Registered - {1} args: {2} kwargs: {3}'.format(str(action), str(value), str(args), str(kwargs))
xbmc.log(line, xbmc.LOGNOTICE)
# since all params are passed as strings, do any conversions necessary to get good types (e.g. boolean)
def __coerce(self, arg):
try:
temp = arg.lower()
if temp == 'true':
return True
elif temp == 'false':
return False
elif temp == 'none':
return None
return arg
except:
return arg
|
gpl-2.0
|
m4rx9/rna-pdb-tools
|
rna_tools/tools/rna_refinement/rna_refinement.py
|
2
|
6344
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""rna_refinement - RNA refinement with QRNAS.
Models of RNA 3D structures obtained by modeling methods often suffer from local inaccuracies such as clashes or physically improbable bond lengths, backbone conformations, or sugar puckers. To ensure high quality of models, a procedure of re nement should be applied as a nal step in the modeling pipeline. The software tool QRNAS was developed in our laboratory to perform local re nement of nucleic acid structures based on an extended version of the AMBER force field. The extensions consist of energy terms associated with introduction of explicit hydrogen bonds, idealization of base pair planarity and regularization of backbone conformation.
Read more: Piatkowski, P., Kasprzak, J. M., Kumar, D., Magnus, M., Chojnowski, G., & Bujnicki, J. M. (2016). RNA 3D Structure Modeling by Combination of Template-Based Method ModeRNA, Template-Free Folding with SimRNA, and Refinement with QRNAS. Methods in Molecular Biology (Clifton, N.J.), 1490(Suppl), 217-235. http://doi.org/10.1007/978-1-4939-6433-8_14
Right now, there is 20k steps of refinement.
.. image:: ../pngs/qrnas_0k.png
The initial structure, ``179c48aa-c0d3-4bd6-8e06-12081da22998_ALL_thrs6.20A_clust01-000001_AA.pdb``.
.. image:: ../pngs/qrnas_3k.png
after 3k, ~10min
.. image:: ../pngs/qrnas_10k.png
after 10k steps, around 30min
.. image:: ../pngs/qrnas_20k.png
after 20k steps, around 1h.
**Installation of QRNAS**
Download the QRNAS package from http://genesilico.pl/qrnas/,
unzip the archive, and compile it with the following command::
./qrnamake sequential
This should create an executable version of QRNAS.
.. warning:: Please, change the name of the binary file from QRNA to QRNAS!
Be default the script searches QRNAS in <rna-pdb-tools>/opt/qrnas/ .
Usage of QRNA::
QRNA - Quick Refinement of Nucleic Acids (0.2 alpha)
by Juliusz Stasiewicz ([email protected])
To use type:
QRNA -i <input PDBfile> [-o <output PDBfile>] [-c <configfile>] [-p] [-m <restraintsfile>]
OR specify <input PDBfile>, <output PDBfile> and <restraintsfile> in <configfile> and type just:
QRNA -c <configfile>
**Installation of this util**
Set up in your bashrc::
export QRNAS_PATH=<your path to qrnas> # e.g. /home/magnus/src/rna-pdb-tools/opt/qrnas
but default rna-pdb-tools searches for qrnas in <rna-pdb-tools>/opt/qrnas.
**QRNAS at Peyote2**
There is no problem to run QRNAS at our Genesilico cluster, `peyote2`. Tested by mmagnus --170822. Copy files of QRNAS to peyote and run ``./qrnamake sequential``.
To run it at a cluster with the Sun Grid Engine queuing system (this one with qusb ;-))::
for p in *.pdb; do echo "rna_refinement.py $p >& ${p}.log" | qsub -cwd -V -pe mpi 1 -N "r_$p" ; done
DONE:
- [x] clean up the output structure
- [x] configuration should not be hardcoded
"""
from __future__ import print_function
import argparse
import re
import os
import subprocess
import random
import string
from shutil import copyfile
try:
PATH = os.environ['RNA_PDB_TOOLS']
except:
print ('Set up RNA_PDB_TOOLS, see Installation note')
pass
else:
QRNAS_PATH = os.getenv('QRNAS_PATH', PATH + '/opt/qrnas/')
class QRNAS:
"""QRNAS"""
def run(self, inputfile, outputfile, run_in_tmp=False, job_id_random=False, steps=10):
"""Run QRNAS.
Args:
inputfile (str) : path to a input file, use .pdb extensions
outputfile (str) : path to on output file
run_in_tmp (bool): if yes, run in /tmp otherwise run in currect-directory/tmp/THEjHxilN3nLx2Aj8REg/input
job_id_random (bool): if yes, then job id will be like, e.g. tmp/gOIFfSdo9tnelFtvs3A7/output.pdb, if now
tmp/output/output.pdb; output, not input because then you can run the same input
a view times and get different outputs (and name of outputs will give
folder names
steps (int) : # of steps
Returns:
none: works on input/output files
"""
cwd = os.getcwd()
# get config
conftxt = open(QRNAS_PATH + os.sep + 'configfile.txt').read()
conftxt_tmp = re.sub('\#?\s?NSTEPS.+\d+', 'NSTEPS ' + str(steps), conftxt)
if job_id_random:
JOB_ID = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20))
else:
JOB_ID = os.path.basename(outputfile.replace('.pdb', ''))
if run_in_tmp:
JOB_PATH = '/tmp/' + os.sep + JOB_ID + os.sep
else:
JOB_PATH = cwd + os.sep + 'tmp' + os.sep + JOB_ID + os.sep # run it in place?
os.makedirs(JOB_PATH)
# get temp config
with open(JOB_PATH + os.sep + 'configfile.txt','w') as f:
f.write(conftxt_tmp)
# copy input to qrnas folder
qrnas_inputfile = JOB_PATH + os.path.basename(inputfile)
qrnas_outputfile = JOB_PATH + os.path.basename(inputfile).replace('.pdb', '.refx.pdb')
copyfile(inputfile, qrnas_inputfile)
os.chdir(QRNAS_PATH)
cmd = './QRNAS -i ' + qrnas_inputfile + \
' -c ' + JOB_PATH + 'configfile.txt ' + \
' -o ' + qrnas_outputfile
print(cmd)
stdout = open(JOB_PATH + 'stdout.txt', 'w')
stderr = open(JOB_PATH + 'stderr.txt', 'w')
subprocess.call(cmd, shell=True, stdout=stdout, stderr=stderr)
os.chdir(cwd)
print ("Save to %s" % outputfile)
copyfile(qrnas_outputfile, outputfile)
def get_parser():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-s', '--steps', help="# of steps, default: 20k ", default=20000)
parser.add_argument('fn', help="input pdb file")
parser.add_argument('-o', '--output_file', help="output pdb file")
return parser
# main
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
q = QRNAS()
if not args.output_file:
output_file = args.fn.replace('.pdb', '_refx.pdb')
else:
output_file = args.output_file
q.run(args.fn, output_file, steps=args.steps)
|
mit
|
mailosaurapp/mailosaur-python
|
tests/emails_test.py
|
1
|
7625
|
import os
from datetime import datetime
from unittest import TestCase
from .mailer import Mailer
from mailosaur import MailosaurClient
from mailosaur.models import SearchCriteria, MailosaurException
class EmailsTest(TestCase):
@classmethod
def setUpClass(cls):
api_key = os.getenv('MAILOSAUR_API_KEY')
base_url = os.getenv('MAILOSAUR_BASE_URL')
cls.server = os.getenv('MAILOSAUR_SERVER')
if (api_key or cls.server) is None:
raise Exception("Missing necessary environment variables - refer to README.md")
cls.client = MailosaurClient(api_key, base_url)
cls.client.messages.delete_all(cls.server)
Mailer.send_emails(cls.client, cls.server, 5)
cls.emails = cls.client.messages.list(cls.server).items
def test_list(self):
self.assertEqual(5, len(self.emails))
for email in self.emails:
self.validate_email_summary(email)
def test_get(self):
email_to_retrieve = self.emails[0]
email = self.client.messages.get(email_to_retrieve.id)
self.validate_email(email)
self.validate_headers(email)
def test_get_not_found(self):
with self.assertRaises(MailosaurException):
self.client.messages.get("efe907e9-74ed-4113-a3e0-a3d41d914765")
def test_wait_for(self):
host = os.getenv('MAILOSAUR_SMTP_HOST', 'mailosaur.io')
test_email_address = "wait_for_test.%s@%s" % (self.server, host)
Mailer.send_email(self.client, self.server, test_email_address)
criteria = SearchCriteria()
criteria.sent_to = test_email_address
email = self.client.messages.wait_for(self.server, criteria)
self.validate_email(email)
def test_search_no_criteria_error(self):
with self.assertRaises(MailosaurException):
self.client.messages.search(self.server, SearchCriteria())
def test_search_by_sent_to(self):
target_email = self.emails[1]
criteria = SearchCriteria()
criteria.sent_to = target_email.to[0].email
results = self.client.messages.search(self.server, criteria).items
self.assertEqual(1, len(results))
self.assertEqual(target_email.to[0].email, results[0].to[0].email)
self.assertEqual(target_email.subject, results[0].subject)
def test_search_by_sent_to_invalid_email(self):
criteria = SearchCriteria()
criteria.sent_to = ".not_an_email_address"
with self.assertRaises(MailosaurException):
self.client.messages.search(self.server, criteria)
def test_search_by_body(self):
target_email = self.emails[1]
unique_string = target_email.subject[0:10]
criteria = SearchCriteria()
criteria.body = "%s html" % (unique_string)
results = self.client.messages.search(self.server, criteria).items
self.assertEqual(1, len(results))
self.assertEqual(target_email.to[0].email, results[0].to[0].email)
self.assertEqual(target_email.subject, results[0].subject)
def test_search_by_subject(self):
target_email = self.emails[1]
unique_string = target_email.subject[0:10]
criteria = SearchCriteria()
criteria.subject = unique_string
results = self.client.messages.search(self.server, criteria).items
self.assertEqual(1, len(results))
self.assertEqual(target_email.to[0].email, results[0].to[0].email)
self.assertEqual(target_email.subject, results[0].subject)
def test_spam_analysis(self):
target_id = self.emails[0].id
result = self.client.analysis.spam(target_id)
for rule in result.spam_filter_results.spam_assassin:
self.assertIsNotNone(rule.rule)
self.assertIsNotNone(rule.description)
def test_delete(self):
target_email_id = self.emails[4].id
self.client.messages.delete(target_email_id)
# Attempting to delete again should fail
with self.assertRaises(MailosaurException):
self.client.messages.delete(target_email_id)
def validate_email(self, email):
self.validate_metadata(email)
self.validate_attachments(email)
self.validate_html(email)
self.validate_text(email)
def validate_email_summary(self, email):
self.validate_metadata(email)
self.assertEqual(2, email.attachments)
def validate_html(self, email):
# Html.Body
self.assertTrue(email.html.body.startswith("<div dir=\"ltr\">"))
# Html.Links
self.assertEqual(3, len(email.html.links))
self.assertEqual("https://mailosaur.com/", email.html.links[0].href)
self.assertEqual("mailosaur", email.html.links[0].text)
self.assertEqual("https://mailosaur.com/", email.html.links[1].href)
self.assertIsNone(email.html.links[1].text)
self.assertEqual("http://invalid/", email.html.links[2].href)
self.assertEqual("invalid", email.html.links[2].text)
# Html.Images
self.assertTrue(email.html.images[1].src.startswith('cid:'))
self.assertEqual("Inline image 1", email.html.images[1].alt)
def validate_text(self, email):
# Text.Body
self.assertTrue(email.text.body.startswith("this is a test"))
# Text.Links
self.assertEqual(2, len(email.text.links))
self.assertEqual("https://mailosaur.com/", email.text.links[0].href)
self.assertEqual(email.text.links[0].href, email.text.links[0].text)
self.assertEqual("https://mailosaur.com/", email.text.links[1].href)
self.assertEqual(email.text.links[1].href, email.text.links[1].text)
def validate_headers(self, email):
expected_from_header = "%s <%s>" % (email.sender[0].name, email.sender[0].email)
expected_to_header = "%s <%s>" % (email.to[0].name, email.to[0].email)
headers = email.metadata.headers
# Invalid python3 syntax
# print [h for h in headers if h.field.lower() == "from"]
# self.assertEqual(expected_from_header, [h for h in headers if h.field.lower() == "from"][0].value)
# self.assertEqual(expected_from_header, [h for h in headers if h.field.lower() == "to"][0].value)
# self.assertEqual(email.subject, [h for h in headers if h.field.lower() == "subject"][0].value)
def validate_metadata(self, email):
self.assertEqual(1, len(email.sender))
self.assertEqual(1, len(email.to))
self.assertIsNotNone(email.sender[0].email)
self.assertIsNotNone(email.sender[0].name)
self.assertIsNotNone(email.to[0].email)
self.assertIsNotNone(email.to[0].name)
self.assertIsNotNone(email.subject)
self.assertIsNotNone(email.server)
self.assertEqual(datetime.strftime(datetime.now(), '%Y-%m-%d'), datetime.strftime(email.received, '%Y-%m-%d'))
def validate_attachments(self, email):
self.assertEqual(2, len(email.attachments))
file1 = email.attachments[0]
self.assertIsNotNone(file1.id)
self.assertIsNotNone(file1.url)
self.assertEqual(82138, file1.length)
self.assertEqual("cat.png", file1.file_name)
self.assertEqual("image/png", file1.content_type)
file2 = email.attachments[1]
self.assertIsNotNone(file2.id)
self.assertIsNotNone(file2.url)
self.assertEqual(212080, file2.length)
self.assertEqual("dog.png", file2.file_name)
self.assertEqual("image/png", file2.content_type)
if __name__ == '__main__':
unittest.main()
|
mit
|
DylanMcCall/rhythmbox-songinfo-context-menu
|
plugins/lyrics/LyricWikiParser.py
|
3
|
2039
|
# -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
#
# Copyright (C) 2007 Jonathan Matthew
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import urllib.parse
import rb
from xml.dom import minidom
class LyricWikiParser(object):
def __init__(self, artist, title):
self.artist = artist
self.title = title
def search(self, callback, *data):
artist = urllib.parse.quote(self.artist.replace(' ', '_'))
title = urllib.parse.quote(self.title.replace(' ', '_'))
htstring = 'http://lyricwiki.org/api.php?artist=%s&song=%s&fmt=text' % (artist, title)
loader = rb.Loader()
loader.get_url (htstring, self.got_lyrics, callback, *data)
def got_lyrics(self, result, callback, *data):
if result is None or result == "Not found":
callback (None, *data)
return
result = result.decode('iso-8859-1')
result += "\n\nLyrics provided by lyricwiki.org"
callback (result, *data)
|
gpl-2.0
|
maximinus/wargame
|
wargame-examples/04_tweens/chained-tweens.py
|
1
|
1109
|
#!/usr/bin/env python
import os
import pygame
import wargame.engine
from wargame.scene import Scene
from wargame.nodes import ImageNode
from wargame.tweens import ChainedTween, MoveTween
# example of moving Node2d
def game():
resources = os.path.join(os.getcwd(), '../')
controller = wargame.engine.init(resources)
# add a sprite from an image
sprite = ImageNode.from_image(100, 100, 'sprites.soldier')
# add a chained tween with >1 tween
sprite.tween = ChainedTween([MoveTween(500, sprite.rect, pygame.Rect(100, 0, 0, 0)),
MoveTween(500, sprite.rect, pygame.Rect(0, 100, 0, 0)),
MoveTween(500, sprite.rect, pygame.Rect(-100, 0, 0, 0)),
MoveTween(500, sprite.rect, pygame.Rect(0, -100, 0, 0))], loop=True)
# I add the node to a SCENE
scene = Scene([sprite])
# I add the scene to the ENGINE
controller.add_scene('start', scene)
# I tell the engine what scene to start and run the controller
controller.run('start')
if __name__ == '__main__':
game()
|
gpl-3.0
|
chirilo/kuma
|
vendor/packages/logilab/common/visitor.py
|
93
|
3444
|
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""A generic visitor abstract implementation.
"""
__docformat__ = "restructuredtext en"
def no_filter(_):
return 1
# Iterators ###################################################################
class FilteredIterator(object):
def __init__(self, node, list_func, filter_func=None):
self._next = [(node, 0)]
if filter_func is None:
filter_func = no_filter
self._list = list_func(node, filter_func)
def __next__(self):
try:
return self._list.pop(0)
except :
return None
next = __next__
# Base Visitor ################################################################
class Visitor(object):
def __init__(self, iterator_class, filter_func=None):
self._iter_class = iterator_class
self.filter = filter_func
def visit(self, node, *args, **kargs):
"""
launch the visit on a given node
call 'open_visit' before the beginning of the visit, with extra args
given
when all nodes have been visited, call the 'close_visit' method
"""
self.open_visit(node, *args, **kargs)
return self.close_visit(self._visit(node))
def _visit(self, node):
iterator = self._get_iterator(node)
n = next(iterator)
while n:
result = n.accept(self)
n = next(iterator)
return result
def _get_iterator(self, node):
return self._iter_class(node, self.filter)
def open_visit(self, *args, **kargs):
"""
method called at the beginning of the visit
"""
pass
def close_visit(self, result):
"""
method called at the end of the visit
"""
return result
# standard visited mixin ######################################################
class VisitedMixIn(object):
"""
Visited interface allow node visitors to use the node
"""
def get_visit_name(self):
"""
return the visit name for the mixed class. When calling 'accept', the
method <'visit_' + name returned by this method> will be called on the
visitor
"""
try:
return self.TYPE.replace('-', '_')
except:
return self.__class__.__name__.lower()
def accept(self, visitor, *args, **kwargs):
func = getattr(visitor, 'visit_%s' % self.get_visit_name())
return func(self, *args, **kwargs)
def leave(self, visitor, *args, **kwargs):
func = getattr(visitor, 'leave_%s' % self.get_visit_name())
return func(self, *args, **kwargs)
|
mpl-2.0
|
annapowellsmith/openpresc
|
openprescribing/pipeline/tests/test_pipeline.py
|
1
|
16563
|
import mock
import os
import json
from django.conf import settings
from django.test import TestCase, override_settings
from pipeline.models import TaskLog
from pipeline.runner import load_tasks, run_task, in_progress
class PipelineTests(TestCase):
def setUp(self):
# Load tasks
self.tasks = load_tasks()
# Set up dummy files on filesystem
for source_id, year_and_month, filename in [
['source_a', '2017_01', 'source_a.csv'],
['source_a', '2017_02', 'source_a.csv'],
['source_a', '2017_03', 'source_a.csv'],
['source_b', '2017_01', 'source_b_1701.csv'],
['source_b', '2017_02', 'source_b_1702.csv'],
['source_b', '2017_03', 'source_b_1703.csv'],
['source_c', '2017_01', 'source_c1.csv'],
['source_c', '2017_01', 'source_c2.csv'],
['source_c', '2017_02', 'source_c1.csv'],
['source_c', '2017_02', 'source_c2.csv'],
]:
path = build_path(source_id, year_and_month, filename)
dir_path = os.path.dirname(path)
try:
os.makedirs(dir_path)
except OSError as e:
import errno
if e.errno != errno.EEXIST or not os.path.isdir(dir_path):
raise
with open(path, 'w') as f:
f.write('1,2,3\n')
# Set up dummy log data
log_data = {
'source_a': [
{
'imported_file': build_path(
'source_a',
'2017_01',
'source_a.csv'
),
'imported_at': '2017-01-01T12:00:00'
},
{
'imported_file': build_path(
'source_a',
'2017_02',
'source_a.csv'
),
'imported_at': '2017-02-01T12:00:00'
}
],
'source_b': [
{
'imported_file': build_path(
'source_b',
'2017_01',
'source_b_1701.csv'
),
'imported_at': '2017-01-01T12:00:00'
},
{
'imported_file': build_path(
'source_b',
'2017_02',
'source_b_1702.csv'
),
'imported_at': '2017-02-01T12:00:00'
}
],
'source_c': [
{
'imported_file': build_path(
'source_c',
'2017_01',
'source_c2.csv'
),
'imported_at': '2017-01-01T12:00:00'
},
{
'imported_file': build_path(
'source_c',
'2017_02',
'source_c2.csv'
),
'imported_at': '2017-02-01T12:00:00'
}
]
}
with open(settings.PIPELINE_IMPORT_LOG_PATH, 'w') as f:
json.dump(log_data, f)
def test_task_initialisation(self):
task = self.tasks['fetch_source_a']
self.assertEqual(task.name, 'fetch_source_a')
self.assertEqual(task.task_type, 'manual_fetch')
self.assertEqual(task.source_id, 'source_a')
self.assertEqual(task.dependencies, [])
task = self.tasks['convert_source_a']
self.assertEqual(task.dependency_names, ['fetch_source_a'])
def test_load_real_tasks(self):
# We're just checking that no exceptions get raised here
path = os.path.join(settings.APPS_ROOT, 'pipeline', 'metadata')
with override_settings(PIPELINE_METADATA_DIR=path):
load_tasks()
def test_run_real_tasks(self):
# We're not actually going to run the management commands, but we're
# going to check that the management commands exist and can be run with
# the given input
path = os.path.join(settings.APPS_ROOT, 'pipeline', 'metadata')
with override_settings(PIPELINE_METADATA_DIR=path):
tasks = load_tasks()
with mock.patch('django.core.management.base.BaseCommand.execute'):
for task in tasks.by_type('auto_fetch'):
task.run(2017, 7)
with mock.patch('pipeline.runner.Task.unimported_paths',
return_value=['/some/path']):
for task in tasks.by_type('convert'):
task.run(2017, 7)
with mock.patch('pipeline.runner.Task.unimported_paths',
return_value=['/some/path']):
for task in tasks.by_type('import'):
task.run(2017, 7)
for task in tasks.by_type('post_process'):
task.run(2017, 7, last_imported='2017_01')
def test_tasks_by_type(self):
tasks = self.tasks.by_type('manual_fetch')
self.assertIn('fetch_source_a', [task.name for task in tasks])
tasks = self.tasks.by_type('auto_fetch')
self.assertIn('fetch_source_b', [task.name for task in tasks])
def test_tasks_ordered(self):
task_names = [task.name for task in self.tasks.ordered()]
for name1, name2 in [
['fetch_source_a', 'convert_source_a'],
['convert_source_a', 'import_source_a'],
['fetch_source_b', 'import_source_b'],
['import_source_a', 'import_source_b'],
['fetch_source_c', 'import_source_c1'],
['import_source_a', 'import_source_c1'],
['import_source_b', 'import_source_c1'],
['fetch_source_c', 'import_source_c2'],
['import_source_c1', 'import_source_c2'],
['import_source_a', 'post_process'],
['import_source_b', 'post_process'],
['import_source_c1', 'post_process'],
]:
self.assertTrue(task_names.index(name1) < task_names.index(name2))
def test_tasks_by_type_ordered(self):
tasks = self.tasks.by_type('import').ordered()
task_names = [task.name for task in tasks]
expected_output = [
'import_source_a',
'import_source_b',
'import_source_c1',
'import_source_c2',
]
self.assertEqual(task_names, expected_output)
def test_tasks_ordered_by_type(self):
tasks = self.tasks.ordered().by_type('import')
task_names = [task.name for task in tasks]
expected_output = [
'import_source_a',
'import_source_b',
'import_source_c1',
'import_source_c2',
]
self.assertEqual(task_names, expected_output)
def test_source_initialisation(self):
source = self.tasks['import_source_a'].source
self.assertEqual(source.name, 'source_a')
self.assertEqual(source.title, 'Source A')
def test_tasks_that_use_raw_source_data(self):
source_a = self.tasks['fetch_source_a'].source
self.assertEqual(
[task.name for task in source_a.tasks_that_use_raw_source_data()],
['convert_source_a']
)
source_c = self.tasks['fetch_source_c'].source
self.assertEqual(
[task.name for task in source_c.tasks_that_use_raw_source_data()],
['import_source_c1', 'import_source_c2']
)
def test_filename_pattern(self):
task = self.tasks['convert_source_a']
self.assertEqual(task.filename_pattern(), 'source_a.csv')
def test_imported_paths(self):
task = self.tasks['convert_source_a']
expected_output = [
build_path('source_a', '2017_01', 'source_a.csv'),
build_path('source_a', '2017_02', 'source_a.csv'),
]
self.assertEqual(task.imported_paths(), expected_output)
task = self.tasks['import_source_b']
expected_output = [
build_path('source_b', '2017_01', 'source_b_1701.csv'),
build_path('source_b', '2017_02', 'source_b_1702.csv'),
]
self.assertEqual(task.imported_paths(), expected_output)
task = self.tasks['import_source_c1']
self.assertEqual(task.imported_paths(), [])
def test_set_last_imported_path(self):
task = self.tasks['import_source_b']
path = build_path('source_b', '2017_03', 'source_b_1703.csv')
task.set_last_imported_path(path)
expected_output = [
build_path('source_b', '2017_01', 'source_b_1701.csv'),
build_path('source_b', '2017_02', 'source_b_1702.csv'),
build_path('source_b', '2017_03', 'source_b_1703.csv'),
]
self.assertEqual(task.imported_paths(), expected_output)
# According to the log data in setUp(), no data has been imported for
# source_c yet
task1 = self.tasks['import_source_c1']
path = build_path('source_c', '2017_03', 'source_c1.csv')
task1.set_last_imported_path(path)
expected_output = [
build_path('source_c', '2017_03', 'source_c1.csv'),
]
self.assertEqual(task1.imported_paths(), expected_output)
expected_output = [
build_path('source_b', '2017_01', 'source_b_1701.csv'),
build_path('source_b', '2017_02', 'source_b_1702.csv'),
build_path('source_b', '2017_03', 'source_b_1703.csv'),
]
self.assertEqual(task.imported_paths(), expected_output)
def test_input_paths(self):
task = self.tasks['import_source_b']
expected_output = [
build_path(
'source_b',
'2017_{}'.format(month),
'source_b_17{}.csv'.format(month)
)
for month in ['01', '02', '03']
]
self.assertEqual(task.input_paths(), expected_output)
def test_unimported_paths(self):
task = self.tasks['import_source_b']
expected_output = [
build_path('source_b', '2017_03', 'source_b_1703.csv'),
]
self.assertEqual(task.unimported_paths(), expected_output)
def test_manual_fetch_instructions(self):
task = self.tasks['fetch_source_a']
expected_output = '''
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You should now locate the latest data for source_a, if available
You should save it at:
{data_basedir}/source_a/YYYY_MM
The last imported data can be found at:
{data_basedir}/source_a/2017_02/source_a.csv
'''.strip().format(
data_basedir=settings.PIPELINE_DATA_BASEDIR,
)
output = task.manual_fetch_instructions()
self.assertEqual(output, expected_output)
task = self.tasks['fetch_source_c']
expected_output = '''
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You should now locate the latest data for source_c, if available
You should save it at:
{data_basedir}/source_c/YYYY_MM
The last imported data can be found at:
<never imported>
{data_basedir}/source_c/2017_02/source_c2.csv
'''.strip().format(
data_basedir=settings.PIPELINE_DATA_BASEDIR,
)
output = task.manual_fetch_instructions()
self.assertEqual(output, expected_output)
def test_manual_fetch_instructions_with_real_data(self):
path = os.path.join(settings.APPS_ROOT, 'pipeline', 'metadata')
with override_settings(PIPELINE_METADATA_DIR=path):
tasks = load_tasks()
# We're just checking that no exceptions get raised here
for task in tasks.by_type('manual_fetch'):
task.manual_fetch_instructions()
def test_run_auto_fetch(self):
task = self.tasks['fetch_source_b']
with mock.patch('pipeline.runner.call_command') as cc:
task.run(2017, 7)
cc.assert_called_with('fetch_source_b', '2017', '7', '--yes')
def test_run_convert(self):
task = self.tasks['convert_source_a']
path = build_path('source_a', '2017_03', 'source_a.csv')
with mock.patch('pipeline.runner.call_command') as cc:
task.run(2017, 7)
cc.assert_called_with('convert_source_a', '--filename', path)
def test_run_import(self):
task = self.tasks['import_source_c1']
expected_calls = []
for year_and_month in ['2017_01', '2017_02']:
path = build_path('source_c', year_and_month, 'source_c1.csv')
call = mock.call('import_source_c', '--filename', path)
expected_calls.append(call)
with mock.patch('pipeline.runner.call_command') as cc:
task.run(2017, 7)
cc.assert_has_calls(expected_calls)
def test_run_post_process(self):
task = self.tasks['post_process']
with mock.patch('pipeline.runner.call_command') as cc:
task.run(2017, 7, '2017_01')
cc.assert_called_with('post_process', '2017_01')
def test_run_task(self):
task = self.tasks['fetch_source_b']
with mock.patch('pipeline.runner.call_command'):
run_task(task, 2017, 7)
log = TaskLog.objects.get(
year=2017,
month=7,
task_name='fetch_source_b',
)
self.assertEqual(log.status, 'successful')
self.assertIsNotNone(log.ended_at)
def test_run_task_that_fails(self):
task = self.tasks['fetch_source_b']
with self.assertRaises(KeyboardInterrupt):
with mock.patch('pipeline.runner.call_command') as cc:
cc.side_effect = KeyboardInterrupt
run_task(task, 2017, 7)
log = TaskLog.objects.get(
year=2017,
month=7,
task_name='fetch_source_b',
)
self.assertEqual(log.status, 'failed')
self.assertIsNotNone(log.ended_at)
self.assertIn('KeyboardInterrupt', log.formatted_tb)
def test_run_task_after_success(self):
task = self.tasks['fetch_source_b']
with mock.patch('pipeline.runner.call_command') as cc:
run_task(task, 2017, 7)
with mock.patch('pipeline.runner.call_command') as cc:
run_task(task, 2017, 7)
cc.assert_not_called()
logs = TaskLog.objects.filter(
year=2017, month=7,
task_name='fetch_source_b'
)
self.assertEqual(1, logs.count())
def test_run_task_after_failure(self):
task = self.tasks['fetch_source_b']
with self.assertRaises(KeyboardInterrupt):
with mock.patch('pipeline.runner.call_command') as cc:
cc.side_effect = KeyboardInterrupt
run_task(task, 2017, 7)
with mock.patch('pipeline.runner.call_command') as cc:
run_task(task, 2017, 7)
logs = TaskLog.objects.filter(
year=2017, month=7,
task_name='fetch_source_b'
)
self.assertEqual(2, logs.count())
def test_in_progress_when_not_in_progress(self):
TaskLog.objects.create(year=2017, month=7, task_name='task1')
TaskLog.objects.create(year=2017, month=7, task_name='task2')
TaskLog.objects.create(year=2017, month=7, task_name='fetch_and_import')
TaskLog.objects.create(year=2017, month=8, task_name='task1')
TaskLog.objects.create(year=2017, month=8, task_name='task2')
TaskLog.objects.create(year=2017, month=8, task_name='fetch_and_import')
self.assertFalse(in_progress())
def test_in_progress_when_in_progress(self):
TaskLog.objects.create(year=2017, month=7, task_name='task1')
TaskLog.objects.create(year=2017, month=7, task_name='task2')
TaskLog.objects.create(year=2017, month=7, task_name='fetch_and_import')
TaskLog.objects.create(year=2017, month=8, task_name='task1')
TaskLog.objects.create(year=2017, month=8, task_name='task2')
TaskLog.objects.create(year=2017, month=8, task_name='fetch_and_import')
TaskLog.objects.create(year=2017, month=9, task_name='task1')
self.assertTrue(in_progress())
def build_path(source_id, year_and_month, filename):
return os.path.join(
settings.PIPELINE_DATA_BASEDIR,
source_id,
year_and_month,
filename
)
|
mit
|
rajsadho/django
|
tests/multiple_database/tests.py
|
44
|
94129
|
from __future__ import unicode_literals
import datetime
import pickle
from operator import attrgetter
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core import management
from django.db import DEFAULT_DB_ALIAS, connections, router, transaction
from django.db.models import signals
from django.db.utils import ConnectionRouter
from django.test import SimpleTestCase, TestCase, override_settings
from django.utils.six import StringIO
from .models import Book, Person, Pet, Review, UserProfile
from .routers import AuthRouter, TestRouter, WriteRouter
class QueryTestCase(TestCase):
multi_db = True
def test_db_selection(self):
"Check that querysets will use the default database by default"
self.assertEqual(Book.objects.db, DEFAULT_DB_ALIAS)
self.assertEqual(Book.objects.all().db, DEFAULT_DB_ALIAS)
self.assertEqual(Book.objects.using('other').db, 'other')
self.assertEqual(Book.objects.db_manager('other').db, 'other')
self.assertEqual(Book.objects.db_manager('other').all().db, 'other')
def test_default_creation(self):
"Objects created on the default database don't leak onto other databases"
# Create a book on the default database using create()
Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
# Create a book on the default database using a save
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
dive.save()
# Check that book exists on the default database, but not on other database
try:
Book.objects.get(title="Pro Django")
Book.objects.using('default').get(title="Pro Django")
except Book.DoesNotExist:
self.fail('"Pro Django" should exist on default database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('other').get,
title="Pro Django"
)
try:
Book.objects.get(title="Dive into Python")
Book.objects.using('default').get(title="Dive into Python")
except Book.DoesNotExist:
self.fail('"Dive into Python" should exist on default database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('other').get,
title="Dive into Python"
)
def test_other_creation(self):
"Objects created on another database don't leak onto the default database"
# Create a book on the second database
Book.objects.using('other').create(title="Pro Django",
published=datetime.date(2008, 12, 16))
# Create a book on the default database using a save
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
dive.save(using='other')
# Check that book exists on the default database, but not on other database
try:
Book.objects.using('other').get(title="Pro Django")
except Book.DoesNotExist:
self.fail('"Pro Django" should exist on other database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.get,
title="Pro Django"
)
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('default').get,
title="Pro Django"
)
try:
Book.objects.using('other').get(title="Dive into Python")
except Book.DoesNotExist:
self.fail('"Dive into Python" should exist on other database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.get,
title="Dive into Python"
)
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('default').get,
title="Dive into Python"
)
def test_refresh(self):
dive = Book()
dive.title = "Dive into Python"
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
dive.save(using='other')
dive.published = datetime.date(2009, 5, 4)
dive.save(using='other')
dive2 = Book.objects.using('other').get()
dive2.title = "Dive into Python (on default)"
dive2.save(using='default')
dive.refresh_from_db()
self.assertEqual(dive.title, "Dive into Python")
dive.refresh_from_db(using='default')
self.assertEqual(dive.title, "Dive into Python (on default)")
self.assertEqual(dive._state.db, "default")
def test_basic_queries(self):
"Queries are constrained to a single database"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
dive = Book.objects.using('other').get(published=datetime.date(2009, 5, 4))
self.assertEqual(dive.title, "Dive into Python")
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, published=datetime.date(2009, 5, 4))
dive = Book.objects.using('other').get(title__icontains="dive")
self.assertEqual(dive.title, "Dive into Python")
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, title__icontains="dive")
dive = Book.objects.using('other').get(title__iexact="dive INTO python")
self.assertEqual(dive.title, "Dive into Python")
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, title__iexact="dive INTO python")
dive = Book.objects.using('other').get(published__year=2009)
self.assertEqual(dive.title, "Dive into Python")
self.assertEqual(dive.published, datetime.date(2009, 5, 4))
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, published__year=2009)
years = Book.objects.using('other').dates('published', 'year')
self.assertEqual([o.year for o in years], [2009])
years = Book.objects.using('default').dates('published', 'year')
self.assertEqual([o.year for o in years], [])
months = Book.objects.using('other').dates('published', 'month')
self.assertEqual([o.month for o in months], [5])
months = Book.objects.using('default').dates('published', 'month')
self.assertEqual([o.month for o in months], [])
def test_m2m_separation(self):
"M2M fields are constrained to a single database"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Save the author relations
pro.authors = [marty]
dive.authors = [mark]
# Inspect the m2m tables directly.
# There should be 1 entry in each database
self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
# Check that queries work across m2m joins
self.assertEqual(
list(Book.objects.using('default').filter(authors__name='Marty Alchin').values_list('title', flat=True)),
['Pro Django']
)
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='Marty Alchin').values_list('title', flat=True)),
[]
)
self.assertEqual(
list(Book.objects.using('default').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[]
)
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
['Dive into Python']
)
# Reget the objects to clear caches
dive = Book.objects.using('other').get(title="Dive into Python")
mark = Person.objects.using('other').get(name="Mark Pilgrim")
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(list(dive.authors.all().values_list('name', flat=True)),
['Mark Pilgrim'])
self.assertEqual(list(mark.book_set.all().values_list('title', flat=True)),
['Dive into Python'])
def test_m2m_forward_operations(self):
"M2M forward manipulations are all constrained to a single DB"
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Save the author relations
dive.authors = [mark]
# Add a second author
john = Person.objects.using('other').create(name="John Smith")
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
[]
)
dive.authors.add(john)
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
['Dive into Python']
)
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
['Dive into Python']
)
# Remove the second author
dive.authors.remove(john)
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
['Dive into Python']
)
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
[]
)
# Clear all authors
dive.authors.clear()
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[]
)
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
[]
)
# Create an author through the m2m interface
dive.authors.create(name='Jane Brown')
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[]
)
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='Jane Brown').values_list('title', flat=True)),
['Dive into Python']
)
def test_m2m_reverse_operations(self):
"M2M reverse manipulations are all constrained to a single DB"
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Save the author relations
dive.authors = [mark]
# Create a second book on the other database
grease = Book.objects.using('other').create(title="Greasemonkey Hacks",
published=datetime.date(2005, 11, 1))
# Add a books to the m2m
mark.book_set.add(grease)
self.assertEqual(
list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
['Mark Pilgrim']
)
self.assertEqual(
list(
Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)
),
['Mark Pilgrim']
)
# Remove a book from the m2m
mark.book_set.remove(grease)
self.assertEqual(
list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
['Mark Pilgrim']
)
self.assertEqual(
list(
Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)
),
[]
)
# Clear the books associated with mark
mark.book_set.clear()
self.assertEqual(
list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
[]
)
self.assertEqual(
list(
Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)
),
[]
)
# Create a book through the m2m interface
mark.book_set.create(title="Dive into HTML5", published=datetime.date(2020, 1, 1))
self.assertEqual(
list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
[]
)
self.assertEqual(
list(Person.objects.using('other').filter(book__title='Dive into HTML5').values_list('name', flat=True)),
['Mark Pilgrim']
)
def test_m2m_cross_database_protection(self):
"Operations that involve sharing M2M objects across databases raise an error"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Set a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.edited = [pro, dive]
# Add to an m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.book_set.add(dive)
# Set a m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.book_set = [pro, dive]
# Add to a reverse m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='other'):
dive.authors.add(marty)
# Set a reverse m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='other'):
dive.authors = [mark, marty]
def test_m2m_deletion(self):
"Cascaded deletions of m2m relations issue queries on the right database"
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
dive.authors = [mark]
# Check the initial state
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
self.assertEqual(Person.objects.using('other').count(), 1)
self.assertEqual(Book.objects.using('other').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
# Delete the object on the other database
dive.delete(using='other')
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
# The person still exists ...
self.assertEqual(Person.objects.using('other').count(), 1)
# ... but the book has been deleted
self.assertEqual(Book.objects.using('other').count(), 0)
# ... and the relationship object has also been deleted.
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Now try deletion in the reverse direction. Set up the relation again
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
dive.authors = [mark]
# Check the initial state
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
self.assertEqual(Person.objects.using('other').count(), 1)
self.assertEqual(Book.objects.using('other').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
# Delete the object on the other database
mark.delete(using='other')
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
# The person has been deleted ...
self.assertEqual(Person.objects.using('other').count(), 0)
# ... but the book still exists
self.assertEqual(Book.objects.using('other').count(), 1)
# ... and the relationship object has been deleted.
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
def test_foreign_key_separation(self):
"FK fields are constrained to a single database"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
george = Person.objects.create(name="George Vilches")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
chris = Person.objects.using('other').create(name="Chris Mills")
# Save the author's favorite books
pro.editor = george
pro.save()
dive.editor = chris
dive.save()
pro = Book.objects.using('default').get(title="Pro Django")
self.assertEqual(pro.editor.name, "George Vilches")
dive = Book.objects.using('other').get(title="Dive into Python")
self.assertEqual(dive.editor.name, "Chris Mills")
# Check that queries work across foreign key joins
self.assertEqual(
list(Person.objects.using('default').filter(edited__title='Pro Django').values_list('name', flat=True)),
['George Vilches']
)
self.assertEqual(
list(Person.objects.using('other').filter(edited__title='Pro Django').values_list('name', flat=True)),
[]
)
self.assertEqual(
list(
Person.objects.using('default').filter(edited__title='Dive into Python').values_list('name', flat=True)
),
[]
)
self.assertEqual(
list(
Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)
),
['Chris Mills']
)
# Reget the objects to clear caches
chris = Person.objects.using('other').get(name="Chris Mills")
dive = Book.objects.using('other').get(title="Dive into Python")
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(list(chris.edited.values_list('title', flat=True)),
['Dive into Python'])
def test_foreign_key_reverse_operations(self):
"FK reverse manipulations are all constrained to a single DB"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
chris = Person.objects.using('other').create(name="Chris Mills")
# Save the author relations
dive.editor = chris
dive.save()
# Add a second book edited by chris
html5 = Book.objects.using('other').create(title="Dive into HTML5", published=datetime.date(2010, 3, 15))
self.assertEqual(
list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[]
)
chris.edited.add(html5)
self.assertEqual(
list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
['Chris Mills']
)
self.assertEqual(
list(
Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)
),
['Chris Mills']
)
# Remove the second editor
chris.edited.remove(html5)
self.assertEqual(
list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[]
)
self.assertEqual(
list(
Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)
),
['Chris Mills']
)
# Clear all edited books
chris.edited.clear()
self.assertEqual(
list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[]
)
self.assertEqual(
list(
Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)
),
[]
)
# Create an author through the m2m interface
chris.edited.create(title='Dive into Water', published=datetime.date(2010, 3, 15))
self.assertEqual(
list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[]
)
self.assertEqual(
list(Person.objects.using('other').filter(edited__title='Dive into Water').values_list('name', flat=True)),
['Chris Mills']
)
self.assertEqual(
list(
Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)
),
[]
)
def test_foreign_key_cross_database_protection(self):
"Operations that involve sharing FK objects across databases raise an error"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
# Set a foreign key with an object from a different database
with self.assertRaises(ValueError):
dive.editor = marty
# Set a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.edited = [pro, dive]
# Add to a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.edited.add(dive)
def test_foreign_key_deletion(self):
"Cascaded deletions of Foreign Key relations issue queries on the right database"
mark = Person.objects.using('other').create(name="Mark Pilgrim")
Pet.objects.using('other').create(name="Fido", owner=mark)
# Check the initial state
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Pet.objects.using('default').count(), 0)
self.assertEqual(Person.objects.using('other').count(), 1)
self.assertEqual(Pet.objects.using('other').count(), 1)
# Delete the person object, which will cascade onto the pet
mark.delete(using='other')
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Pet.objects.using('default').count(), 0)
# Both the pet and the person have been deleted from the right database
self.assertEqual(Person.objects.using('other').count(), 0)
self.assertEqual(Pet.objects.using('other').count(), 0)
def test_foreign_key_validation(self):
"ForeignKey.validate() uses the correct database"
mickey = Person.objects.using('other').create(name="Mickey")
pluto = Pet.objects.using('other').create(name="Pluto", owner=mickey)
self.assertIsNone(pluto.full_clean())
def test_o2o_separation(self):
"OneToOne fields are constrained to a single database"
# Create a user and profile on the default database
alice = User.objects.db_manager('default').create_user('alice', '[email protected]')
alice_profile = UserProfile.objects.using('default').create(user=alice, flavor='chocolate')
# Create a user and profile on the other database
bob = User.objects.db_manager('other').create_user('bob', '[email protected]')
bob_profile = UserProfile.objects.using('other').create(user=bob, flavor='crunchy frog')
# Retrieve related objects; queries should be database constrained
alice = User.objects.using('default').get(username="alice")
self.assertEqual(alice.userprofile.flavor, "chocolate")
bob = User.objects.using('other').get(username="bob")
self.assertEqual(bob.userprofile.flavor, "crunchy frog")
# Check that queries work across joins
self.assertEqual(
list(
User.objects.using('default')
.filter(userprofile__flavor='chocolate').values_list('username', flat=True)
),
['alice']
)
self.assertEqual(
list(
User.objects.using('other')
.filter(userprofile__flavor='chocolate').values_list('username', flat=True)
),
[]
)
self.assertEqual(
list(
User.objects.using('default')
.filter(userprofile__flavor='crunchy frog').values_list('username', flat=True)
),
[]
)
self.assertEqual(
list(
User.objects.using('other')
.filter(userprofile__flavor='crunchy frog').values_list('username', flat=True)
),
['bob']
)
# Reget the objects to clear caches
alice_profile = UserProfile.objects.using('default').get(flavor='chocolate')
bob_profile = UserProfile.objects.using('other').get(flavor='crunchy frog')
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(alice_profile.user.username, 'alice')
self.assertEqual(bob_profile.user.username, 'bob')
def test_o2o_cross_database_protection(self):
"Operations that involve sharing FK objects across databases raise an error"
# Create a user and profile on the default database
alice = User.objects.db_manager('default').create_user('alice', '[email protected]')
# Create a user and profile on the other database
bob = User.objects.db_manager('other').create_user('bob', '[email protected]')
# Set a one-to-one relation with an object from a different database
alice_profile = UserProfile.objects.using('default').create(user=alice, flavor='chocolate')
with self.assertRaises(ValueError):
bob.userprofile = alice_profile
# BUT! if you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
bob_profile = UserProfile.objects.using('other').create(user=bob, flavor='crunchy frog')
new_bob_profile = UserProfile(flavor="spring surprise")
# assigning a profile requires an explicit pk as the object isn't saved
charlie = User(pk=51, username='charlie', email='[email protected]')
charlie.set_unusable_password()
# initially, no db assigned
self.assertEqual(new_bob_profile._state.db, None)
self.assertEqual(charlie._state.db, None)
# old object comes from 'other', so the new object is set to use 'other'...
new_bob_profile.user = bob
charlie.userprofile = bob_profile
self.assertEqual(new_bob_profile._state.db, 'other')
self.assertEqual(charlie._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(list(User.objects.using('other').values_list('username', flat=True)),
['bob'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog'])
# When saved (no using required), new objects goes to 'other'
charlie.save()
bob_profile.save()
new_bob_profile.save()
self.assertEqual(list(User.objects.using('default').values_list('username', flat=True)),
['alice'])
self.assertEqual(list(User.objects.using('other').values_list('username', flat=True)),
['bob', 'charlie'])
self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)),
['chocolate'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog', 'spring surprise'])
# This also works if you assign the O2O relation in the constructor
denise = User.objects.db_manager('other').create_user('denise', '[email protected]')
denise_profile = UserProfile(flavor="tofu", user=denise)
self.assertEqual(denise_profile._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)),
['chocolate'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog', 'spring surprise'])
# When saved, the new profile goes to 'other'
denise_profile.save()
self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)),
['chocolate'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog', 'spring surprise', 'tofu'])
def test_generic_key_separation(self):
"Generic fields are constrained to a single database"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
review1 = Review.objects.create(source="Python Monthly", content_object=pro)
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
review2 = Review.objects.using('other').create(source="Python Weekly", content_object=dive)
review1 = Review.objects.using('default').get(source="Python Monthly")
self.assertEqual(review1.content_object.title, "Pro Django")
review2 = Review.objects.using('other').get(source="Python Weekly")
self.assertEqual(review2.content_object.title, "Dive into Python")
# Reget the objects to clear caches
dive = Book.objects.using('other').get(title="Dive into Python")
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(list(dive.reviews.all().values_list('source', flat=True)),
['Python Weekly'])
def test_generic_key_reverse_operations(self):
"Generic reverse manipulations are all constrained to a single DB"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
temp = Book.objects.using('other').create(title="Temp",
published=datetime.date(2009, 5, 4))
review1 = Review.objects.using('other').create(source="Python Weekly", content_object=dive)
review2 = Review.objects.using('other').create(source="Python Monthly", content_object=temp)
self.assertEqual(
list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[]
)
self.assertEqual(
list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Weekly']
)
# Add a second review
dive.reviews.add(review2)
self.assertEqual(
list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[]
)
self.assertEqual(
list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Monthly', 'Python Weekly']
)
# Remove the second author
dive.reviews.remove(review1)
self.assertEqual(
list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[]
)
self.assertEqual(
list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Monthly']
)
# Clear all reviews
dive.reviews.clear()
self.assertEqual(
list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[]
)
self.assertEqual(
list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
[]
)
# Create an author through the generic interface
dive.reviews.create(source='Python Daily')
self.assertEqual(
list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[]
)
self.assertEqual(
list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Daily']
)
def test_generic_key_cross_database_protection(self):
"Operations that involve sharing generic key objects across databases raise an error"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
review1 = Review.objects.create(source="Python Monthly", content_object=pro)
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
Review.objects.using('other').create(source="Python Weekly", content_object=dive)
# Set a foreign key with an object from a different database
with self.assertRaises(ValueError):
review1.content_object = dive
# Add to a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='other'):
dive.reviews.add(review1)
# BUT! if you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
review3 = Review(source="Python Daily")
# initially, no db assigned
self.assertEqual(review3._state.db, None)
# Dive comes from 'other', so review3 is set to use 'other'...
review3.content_object = dive
self.assertEqual(review3._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(
list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)),
['Python Monthly']
)
self.assertEqual(
list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Weekly']
)
# When saved, John goes to 'other'
review3.save()
self.assertEqual(
list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)),
['Python Monthly']
)
self.assertEqual(
list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Daily', 'Python Weekly']
)
def test_generic_key_deletion(self):
"Cascaded deletions of Generic Key relations issue queries on the right database"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
Review.objects.using('other').create(source="Python Weekly", content_object=dive)
# Check the initial state
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Review.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('other').count(), 1)
self.assertEqual(Review.objects.using('other').count(), 1)
# Delete the Book object, which will cascade onto the pet
dive.delete(using='other')
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Review.objects.using('default').count(), 0)
# Both the pet and the person have been deleted from the right database
self.assertEqual(Book.objects.using('other').count(), 0)
self.assertEqual(Review.objects.using('other').count(), 0)
def test_ordering(self):
"get_next_by_XXX commands stick to a single database"
Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
learn = Book.objects.using('other').create(title="Learning Python",
published=datetime.date(2008, 7, 16))
self.assertEqual(learn.get_next_by_published().title, "Dive into Python")
self.assertEqual(dive.get_previous_by_published().title, "Learning Python")
def test_raw(self):
"test the raw() method across databases"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
val = Book.objects.db_manager("other").raw('SELECT id FROM multiple_database_book')
self.assertQuerysetEqual(val, [dive.pk], attrgetter("pk"))
val = Book.objects.raw('SELECT id FROM multiple_database_book').using('other')
self.assertQuerysetEqual(val, [dive.pk], attrgetter("pk"))
def test_select_related(self):
"Database assignment is retained if an object is retrieved with select_related()"
# Create a book and author on the other database
mark = Person.objects.using('other').create(name="Mark Pilgrim")
Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
editor=mark)
# Retrieve the Person using select_related()
book = Book.objects.using('other').select_related('editor').get(title="Dive into Python")
# The editor instance should have a db state
self.assertEqual(book.editor._state.db, 'other')
def test_subquery(self):
"""Make sure as_sql works with subqueries and primary/replica."""
sub = Person.objects.using('other').filter(name='fff')
qs = Book.objects.filter(editor__in=sub)
# When you call __str__ on the query object, it doesn't know about using
# so it falls back to the default. If the subquery explicitly uses a
# different database, an error should be raised.
self.assertRaises(ValueError, str, qs.query)
# Evaluating the query shouldn't work, either
with self.assertRaises(ValueError):
for obj in qs:
pass
def test_related_manager(self):
"Related managers return managers, not querysets"
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# extra_arg is removed by the BookManager's implementation of
# create(); but the BookManager's implementation won't get called
# unless edited returns a Manager, not a queryset
mark.book_set.create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
extra_arg=True)
mark.book_set.get_or_create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
extra_arg=True)
mark.edited.create(title="Dive into Water",
published=datetime.date(2009, 5, 4),
extra_arg=True)
mark.edited.get_or_create(title="Dive into Water",
published=datetime.date(2009, 5, 4),
extra_arg=True)
class ConnectionRouterTestCase(SimpleTestCase):
@override_settings(DATABASE_ROUTERS=[
'multiple_database.tests.TestRouter',
'multiple_database.tests.WriteRouter'])
def test_router_init_default(self):
connection_router = ConnectionRouter()
self.assertListEqual([r.__class__.__name__ for r in connection_router.routers],
['TestRouter', 'WriteRouter'])
def test_router_init_arg(self):
connection_router = ConnectionRouter([
'multiple_database.tests.TestRouter',
'multiple_database.tests.WriteRouter'
])
self.assertListEqual([r.__class__.__name__ for r in connection_router.routers],
['TestRouter', 'WriteRouter'])
# Init with instances instead of strings
connection_router = ConnectionRouter([TestRouter(), WriteRouter()])
self.assertListEqual([r.__class__.__name__ for r in connection_router.routers],
['TestRouter', 'WriteRouter'])
# Make the 'other' database appear to be a replica of the 'default'
@override_settings(DATABASE_ROUTERS=[TestRouter()])
class RouterTestCase(TestCase):
multi_db = True
def test_db_selection(self):
"Check that querysets obey the router for db suggestions"
self.assertEqual(Book.objects.db, 'other')
self.assertEqual(Book.objects.all().db, 'other')
self.assertEqual(Book.objects.using('default').db, 'default')
self.assertEqual(Book.objects.db_manager('default').db, 'default')
self.assertEqual(Book.objects.db_manager('default').all().db, 'default')
def test_migrate_selection(self):
"Synchronization behavior is predictable"
self.assertTrue(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
self.assertTrue(router.allow_migrate_model('other', User))
self.assertTrue(router.allow_migrate_model('other', Book))
with override_settings(DATABASE_ROUTERS=[TestRouter(), AuthRouter()]):
# Add the auth router to the chain. TestRouter is a universal
# synchronizer, so it should have no effect.
self.assertTrue(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
self.assertTrue(router.allow_migrate_model('other', User))
self.assertTrue(router.allow_migrate_model('other', Book))
with override_settings(DATABASE_ROUTERS=[AuthRouter(), TestRouter()]):
# Now check what happens if the router order is reversed.
self.assertFalse(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
self.assertTrue(router.allow_migrate_model('other', User))
self.assertTrue(router.allow_migrate_model('other', Book))
def test_partial_router(self):
"A router can choose to implement a subset of methods"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
# First check the baseline behavior.
self.assertEqual(router.db_for_read(User), 'other')
self.assertEqual(router.db_for_read(Book), 'other')
self.assertEqual(router.db_for_write(User), 'default')
self.assertEqual(router.db_for_write(Book), 'default')
self.assertTrue(router.allow_relation(dive, dive))
self.assertTrue(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
with override_settings(DATABASE_ROUTERS=[WriteRouter(), AuthRouter(), TestRouter()]):
self.assertEqual(router.db_for_read(User), 'default')
self.assertEqual(router.db_for_read(Book), 'other')
self.assertEqual(router.db_for_write(User), 'writer')
self.assertEqual(router.db_for_write(Book), 'writer')
self.assertTrue(router.allow_relation(dive, dive))
self.assertFalse(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
def test_database_routing(self):
marty = Person.objects.using('default').create(name="Marty Alchin")
pro = Book.objects.using('default').create(title="Pro Django",
published=datetime.date(2008, 12, 16),
editor=marty)
pro.authors = [marty]
# Create a book and author on the other database
Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
# An update query will be routed to the default database
Book.objects.filter(title='Pro Django').update(pages=200)
with self.assertRaises(Book.DoesNotExist):
# By default, the get query will be directed to 'other'
Book.objects.get(title='Pro Django')
# But the same query issued explicitly at a database will work.
pro = Book.objects.using('default').get(title='Pro Django')
# Check that the update worked.
self.assertEqual(pro.pages, 200)
# An update query with an explicit using clause will be routed
# to the requested database.
Book.objects.using('other').filter(title='Dive into Python').update(pages=300)
self.assertEqual(Book.objects.get(title='Dive into Python').pages, 300)
# Related object queries stick to the same database
# as the original object, regardless of the router
self.assertEqual(list(pro.authors.values_list('name', flat=True)), ['Marty Alchin'])
self.assertEqual(pro.editor.name, 'Marty Alchin')
# get_or_create is a special case. The get needs to be targeted at
# the write database in order to avoid potential transaction
# consistency problems
book, created = Book.objects.get_or_create(title="Pro Django")
self.assertFalse(created)
book, created = Book.objects.get_or_create(title="Dive Into Python",
defaults={'published': datetime.date(2009, 5, 4)})
self.assertTrue(created)
# Check the head count of objects
self.assertEqual(Book.objects.using('default').count(), 2)
self.assertEqual(Book.objects.using('other').count(), 1)
# If a database isn't specified, the read database is used
self.assertEqual(Book.objects.count(), 1)
# A delete query will also be routed to the default database
Book.objects.filter(pages__gt=150).delete()
# The default database has lost the book.
self.assertEqual(Book.objects.using('default').count(), 1)
self.assertEqual(Book.objects.using('other').count(), 1)
def test_invalid_set_foreign_key_assignment(self):
marty = Person.objects.using('default').create(name="Marty Alchin")
dive = Book.objects.using('other').create(
title="Dive into Python",
published=datetime.date(2009, 5, 4),
)
# Set a foreign key set with an object from a different database
msg = "<Book: Dive into Python> instance isn't saved. Use bulk=False or save the object first."
with self.assertRaisesMessage(ValueError, msg):
marty.edited.set([dive])
def test_foreign_key_cross_database_protection(self):
"Foreign keys can cross databases if they two databases have a common source"
# Create a book and author on the default database
pro = Book.objects.using('default').create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.using('default').create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Set a foreign key with an object from a different database
try:
dive.editor = marty
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# ... but they will when the affected object is saved.
dive.save()
self.assertEqual(dive._state.db, 'default')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
self.assertEqual(dive._state.db, 'other')
# Set a foreign key set with an object from a different database
try:
marty.edited.set([pro, dive], bulk=False)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Assignment implies a save, so database assignments of original objects have changed...
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'default')
self.assertEqual(mark._state.db, 'other')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
self.assertEqual(dive._state.db, 'other')
# Add to a foreign key set with an object from a different database
try:
marty.edited.add(dive, bulk=False)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Add implies a save, so database assignments of original objects have changed...
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'default')
self.assertEqual(mark._state.db, 'other')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
# If you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
chris = Person(name="Chris Mills")
html5 = Book(title="Dive into HTML5", published=datetime.date(2010, 3, 15))
# initially, no db assigned
self.assertEqual(chris._state.db, None)
self.assertEqual(html5._state.db, None)
# old object comes from 'other', so the new object is set to use the
# source of 'other'...
self.assertEqual(dive._state.db, 'other')
chris.save()
dive.editor = chris
html5.editor = mark
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
self.assertEqual(chris._state.db, 'default')
self.assertEqual(html5._state.db, 'default')
# This also works if you assign the FK in the constructor
water = Book(title="Dive into Water", published=datetime.date(2001, 1, 1), editor=mark)
self.assertEqual(water._state.db, 'default')
# For the remainder of this test, create a copy of 'mark' in the
# 'default' database to prevent integrity errors on backends that
# don't defer constraints checks until the end of the transaction
mark.save(using='default')
# This moved 'mark' in the 'default' database, move it back in 'other'
mark.save(using='other')
self.assertEqual(mark._state.db, 'other')
# If you create an object through a FK relation, it will be
# written to the write database, even if the original object
# was on the read database
cheesecake = mark.edited.create(title='Dive into Cheesecake', published=datetime.date(2010, 3, 15))
self.assertEqual(cheesecake._state.db, 'default')
# Same goes for get_or_create, regardless of whether getting or creating
cheesecake, created = mark.edited.get_or_create(
title='Dive into Cheesecake',
published=datetime.date(2010, 3, 15),
)
self.assertEqual(cheesecake._state.db, 'default')
puddles, created = mark.edited.get_or_create(title='Dive into Puddles', published=datetime.date(2010, 3, 15))
self.assertEqual(puddles._state.db, 'default')
def test_m2m_cross_database_protection(self):
"M2M relations can cross databases if the database share a source"
# Create books and authors on the inverse to the usual database
pro = Book.objects.using('other').create(pk=1, title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
dive = Book.objects.using('default').create(pk=2, title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('default').create(pk=2, name="Mark Pilgrim")
# Now save back onto the usual database.
# This simulates primary/replica - the objects exist on both database,
# but the _state.db is as it is for all other tests.
pro.save(using='default')
marty.save(using='default')
dive.save(using='other')
mark.save(using='other')
# Check that we have 2 of both types of object on both databases
self.assertEqual(Book.objects.using('default').count(), 2)
self.assertEqual(Book.objects.using('other').count(), 2)
self.assertEqual(Person.objects.using('default').count(), 2)
self.assertEqual(Person.objects.using('other').count(), 2)
# Set a m2m set with an object from a different database
try:
marty.book_set = [pro, dive]
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 2)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Reset relations
Book.authors.through.objects.using('default').delete()
# Add to an m2m with an object from a different database
try:
marty.book_set.add(dive)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Reset relations
Book.authors.through.objects.using('default').delete()
# Set a reverse m2m with an object from a different database
try:
dive.authors = [mark, marty]
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 2)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Reset relations
Book.authors.through.objects.using('default').delete()
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Add to a reverse m2m with an object from a different database
try:
dive.authors.add(marty)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# If you create an object through a M2M relation, it will be
# written to the write database, even if the original object
# was on the read database
alice = dive.authors.create(name='Alice')
self.assertEqual(alice._state.db, 'default')
# Same goes for get_or_create, regardless of whether getting or creating
alice, created = dive.authors.get_or_create(name='Alice')
self.assertEqual(alice._state.db, 'default')
bob, created = dive.authors.get_or_create(name='Bob')
self.assertEqual(bob._state.db, 'default')
def test_o2o_cross_database_protection(self):
"Operations that involve sharing FK objects across databases raise an error"
# Create a user and profile on the default database
alice = User.objects.db_manager('default').create_user('alice', '[email protected]')
# Create a user and profile on the other database
bob = User.objects.db_manager('other').create_user('bob', '[email protected]')
# Set a one-to-one relation with an object from a different database
alice_profile = UserProfile.objects.create(user=alice, flavor='chocolate')
try:
bob.userprofile = alice_profile
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(alice._state.db, 'default')
self.assertEqual(alice_profile._state.db, 'default')
self.assertEqual(bob._state.db, 'other')
# ... but they will when the affected object is saved.
bob.save()
self.assertEqual(bob._state.db, 'default')
def test_generic_key_cross_database_protection(self):
"Generic Key operations can span databases if they share a source"
# Create a book and author on the default database
pro = Book.objects.using(
'default').create(title="Pro Django", published=datetime.date(2008, 12, 16))
review1 = Review.objects.using(
'default').create(source="Python Monthly", content_object=pro)
# Create a book and author on the other database
dive = Book.objects.using(
'other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
review2 = Review.objects.using(
'other').create(source="Python Weekly", content_object=dive)
# Set a generic foreign key with an object from a different database
try:
review1.content_object = dive
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(pro._state.db, 'default')
self.assertEqual(review1._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(review2._state.db, 'other')
# ... but they will when the affected object is saved.
dive.save()
self.assertEqual(review1._state.db, 'default')
self.assertEqual(dive._state.db, 'default')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
self.assertEqual(dive._state.db, 'other')
# Add to a generic foreign key set with an object from a different database
try:
dive.reviews.add(review1)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(pro._state.db, 'default')
self.assertEqual(review1._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(review2._state.db, 'other')
# ... but they will when the affected object is saved.
dive.save()
self.assertEqual(dive._state.db, 'default')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# BUT! if you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
review3 = Review(source="Python Daily")
# initially, no db assigned
self.assertEqual(review3._state.db, None)
# Dive comes from 'other', so review3 is set to use the source of 'other'...
review3.content_object = dive
self.assertEqual(review3._state.db, 'default')
# If you create an object through a M2M relation, it will be
# written to the write database, even if the original object
# was on the read database
dive = Book.objects.using('other').get(title='Dive into Python')
nyt = dive.reviews.create(source="New York Times", content_object=dive)
self.assertEqual(nyt._state.db, 'default')
def test_m2m_managers(self):
"M2M relations are represented by managers, and can be controlled like managers"
pro = Book.objects.using('other').create(pk=1, title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
self.assertEqual(pro.authors.db, 'other')
self.assertEqual(pro.authors.db_manager('default').db, 'default')
self.assertEqual(pro.authors.db_manager('default').all().db, 'default')
self.assertEqual(marty.book_set.db, 'other')
self.assertEqual(marty.book_set.db_manager('default').db, 'default')
self.assertEqual(marty.book_set.db_manager('default').all().db, 'default')
def test_foreign_key_managers(self):
"FK reverse relations are represented by managers, and can be controlled like managers"
marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
Book.objects.using('other').create(pk=1, title="Pro Django",
published=datetime.date(2008, 12, 16),
editor=marty)
self.assertEqual(marty.edited.db, 'other')
self.assertEqual(marty.edited.db_manager('default').db, 'default')
self.assertEqual(marty.edited.db_manager('default').all().db, 'default')
def test_generic_key_managers(self):
"Generic key relations are represented by managers, and can be controlled like managers"
pro = Book.objects.using('other').create(title="Pro Django",
published=datetime.date(2008, 12, 16))
Review.objects.using('other').create(source="Python Monthly",
content_object=pro)
self.assertEqual(pro.reviews.db, 'other')
self.assertEqual(pro.reviews.db_manager('default').db, 'default')
self.assertEqual(pro.reviews.db_manager('default').all().db, 'default')
def test_subquery(self):
"""Make sure as_sql works with subqueries and primary/replica."""
# Create a book and author on the other database
mark = Person.objects.using('other').create(name="Mark Pilgrim")
Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
editor=mark)
sub = Person.objects.filter(name='Mark Pilgrim')
qs = Book.objects.filter(editor__in=sub)
# When you call __str__ on the query object, it doesn't know about using
# so it falls back to the default. Don't let routing instructions
# force the subquery to an incompatible database.
str(qs.query)
# If you evaluate the query, it should work, running on 'other'
self.assertEqual(list(qs.values_list('title', flat=True)), ['Dive into Python'])
def test_deferred_models(self):
mark_def = Person.objects.using('default').create(name="Mark Pilgrim")
mark_other = Person.objects.using('other').create(name="Mark Pilgrim")
orig_b = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
editor=mark_other)
b = Book.objects.using('other').only('title').get(pk=orig_b.pk)
self.assertEqual(b.published, datetime.date(2009, 5, 4))
b = Book.objects.using('other').only('title').get(pk=orig_b.pk)
b.editor = mark_def
b.save(using='default')
self.assertEqual(Book.objects.using('default').get(pk=b.pk).published,
datetime.date(2009, 5, 4))
@override_settings(DATABASE_ROUTERS=[AuthRouter()])
class AuthTestCase(TestCase):
multi_db = True
def test_auth_manager(self):
"The methods on the auth manager obey database hints"
# Create one user using default allocation policy
User.objects.create_user('alice', '[email protected]')
# Create another user, explicitly specifying the database
User.objects.db_manager('default').create_user('bob', '[email protected]')
# The second user only exists on the other database
alice = User.objects.using('other').get(username='alice')
self.assertEqual(alice.username, 'alice')
self.assertEqual(alice._state.db, 'other')
self.assertRaises(User.DoesNotExist, User.objects.using('default').get, username='alice')
# The second user only exists on the default database
bob = User.objects.using('default').get(username='bob')
self.assertEqual(bob.username, 'bob')
self.assertEqual(bob._state.db, 'default')
self.assertRaises(User.DoesNotExist, User.objects.using('other').get, username='bob')
# That is... there is one user on each database
self.assertEqual(User.objects.using('default').count(), 1)
self.assertEqual(User.objects.using('other').count(), 1)
def test_dumpdata(self):
"Check that dumpdata honors allow_migrate restrictions on the router"
User.objects.create_user('alice', '[email protected]')
User.objects.db_manager('default').create_user('bob', '[email protected]')
# Check that dumping the default database doesn't try to include auth
# because allow_migrate prohibits auth on default
new_io = StringIO()
management.call_command('dumpdata', 'auth', format='json', database='default', stdout=new_io)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '[]')
# Check that dumping the other database does include auth
new_io = StringIO()
management.call_command('dumpdata', 'auth', format='json', database='other', stdout=new_io)
command_output = new_io.getvalue().strip()
self.assertIn('"email": "[email protected]"', command_output)
class AntiPetRouter(object):
# A router that only expresses an opinion on migrate,
# passing pets to the 'other' database
def allow_migrate(self, db, app_label, model_name=None, **hints):
if db == 'other':
return model_name == 'pet'
else:
return model_name != 'pet'
class FixtureTestCase(TestCase):
multi_db = True
fixtures = ['multidb-common', 'multidb']
@override_settings(DATABASE_ROUTERS=[AntiPetRouter()])
def test_fixture_loading(self):
"Multi-db fixtures are loaded correctly"
# Check that "Pro Django" exists on the default database, but not on other database
try:
Book.objects.get(title="Pro Django")
Book.objects.using('default').get(title="Pro Django")
except Book.DoesNotExist:
self.fail('"Pro Django" should exist on default database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('other').get,
title="Pro Django"
)
# Check that "Dive into Python" exists on the default database, but not on other database
try:
Book.objects.using('other').get(title="Dive into Python")
except Book.DoesNotExist:
self.fail('"Dive into Python" should exist on other database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.get,
title="Dive into Python"
)
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('default').get,
title="Dive into Python"
)
# Check that "Definitive Guide" exists on the both databases
try:
Book.objects.get(title="The Definitive Guide to Django")
Book.objects.using('default').get(title="The Definitive Guide to Django")
Book.objects.using('other').get(title="The Definitive Guide to Django")
except Book.DoesNotExist:
self.fail('"The Definitive Guide to Django" should exist on both databases')
@override_settings(DATABASE_ROUTERS=[AntiPetRouter()])
def test_pseudo_empty_fixtures(self):
"""
A fixture can contain entries, but lead to nothing in the database;
this shouldn't raise an error (#14068).
"""
new_io = StringIO()
management.call_command('loaddata', 'pets', stdout=new_io, stderr=new_io)
command_output = new_io.getvalue().strip()
# No objects will actually be loaded
self.assertEqual(command_output, "Installed 0 object(s) (of 2) from 1 fixture(s)")
class PickleQuerySetTestCase(TestCase):
multi_db = True
def test_pickling(self):
for db in connections:
Book.objects.using(db).create(title='Dive into Python', published=datetime.date(2009, 5, 4))
qs = Book.objects.all()
self.assertEqual(qs.db, pickle.loads(pickle.dumps(qs)).db)
class DatabaseReceiver(object):
"""
Used in the tests for the database argument in signals (#13552)
"""
def __call__(self, signal, sender, **kwargs):
self._database = kwargs['using']
class WriteToOtherRouter(object):
"""
A router that sends all writes to the other database.
"""
def db_for_write(self, model, **hints):
return "other"
class SignalTests(TestCase):
multi_db = True
def override_router(self):
return override_settings(DATABASE_ROUTERS=[WriteToOtherRouter()])
def test_database_arg_save_and_delete(self):
"""
Tests that the pre/post_save signal contains the correct database.
(#13552)
"""
# Make some signal receivers
pre_save_receiver = DatabaseReceiver()
post_save_receiver = DatabaseReceiver()
pre_delete_receiver = DatabaseReceiver()
post_delete_receiver = DatabaseReceiver()
# Make model and connect receivers
signals.pre_save.connect(sender=Person, receiver=pre_save_receiver)
signals.post_save.connect(sender=Person, receiver=post_save_receiver)
signals.pre_delete.connect(sender=Person, receiver=pre_delete_receiver)
signals.post_delete.connect(sender=Person, receiver=post_delete_receiver)
p = Person.objects.create(name='Darth Vader')
# Save and test receivers got calls
p.save()
self.assertEqual(pre_save_receiver._database, DEFAULT_DB_ALIAS)
self.assertEqual(post_save_receiver._database, DEFAULT_DB_ALIAS)
# Delete, and test
p.delete()
self.assertEqual(pre_delete_receiver._database, DEFAULT_DB_ALIAS)
self.assertEqual(post_delete_receiver._database, DEFAULT_DB_ALIAS)
# Save again to a different database
p.save(using="other")
self.assertEqual(pre_save_receiver._database, "other")
self.assertEqual(post_save_receiver._database, "other")
# Delete, and test
p.delete(using="other")
self.assertEqual(pre_delete_receiver._database, "other")
self.assertEqual(post_delete_receiver._database, "other")
signals.pre_save.disconnect(sender=Person, receiver=pre_save_receiver)
signals.post_save.disconnect(sender=Person, receiver=post_save_receiver)
signals.pre_delete.disconnect(sender=Person, receiver=pre_delete_receiver)
signals.post_delete.disconnect(sender=Person, receiver=post_delete_receiver)
def test_database_arg_m2m(self):
"""
Test that the m2m_changed signal has a correct database arg (#13552)
"""
# Make a receiver
receiver = DatabaseReceiver()
# Connect it
signals.m2m_changed.connect(receiver=receiver)
# Create the models that will be used for the tests
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
# Create a copy of the models on the 'other' database to prevent
# integrity errors on backends that don't defer constraints checks
Book.objects.using('other').create(pk=b.pk, title=b.title,
published=b.published)
Person.objects.using('other').create(pk=p.pk, name=p.name)
# Test addition
b.authors.add(p)
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
b.authors.add(p)
self.assertEqual(receiver._database, "other")
# Test removal
b.authors.remove(p)
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
b.authors.remove(p)
self.assertEqual(receiver._database, "other")
# Test addition in reverse
p.book_set.add(b)
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
p.book_set.add(b)
self.assertEqual(receiver._database, "other")
# Test clearing
b.authors.clear()
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
b.authors.clear()
self.assertEqual(receiver._database, "other")
class AttributeErrorRouter(object):
"A router to test the exception handling of ConnectionRouter"
def db_for_read(self, model, **hints):
raise AttributeError
def db_for_write(self, model, **hints):
raise AttributeError
class RouterAttributeErrorTestCase(TestCase):
multi_db = True
def override_router(self):
return override_settings(DATABASE_ROUTERS=[AttributeErrorRouter()])
def test_attribute_error_read(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
with self.override_router():
self.assertRaises(AttributeError, Book.objects.get, pk=b.pk)
def test_attribute_error_save(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
with self.override_router():
self.assertRaises(AttributeError, dive.save)
def test_attribute_error_delete(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
b.authors = [p]
b.editor = p
with self.override_router():
self.assertRaises(AttributeError, b.delete)
def test_attribute_error_m2m(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
with self.override_router():
self.assertRaises(AttributeError, setattr, b, 'authors', [p])
class ModelMetaRouter(object):
"A router to ensure model arguments are real model classes"
def db_for_write(self, model, **hints):
if not hasattr(model, '_meta'):
raise ValueError
@override_settings(DATABASE_ROUTERS=[ModelMetaRouter()])
class RouterModelArgumentTestCase(TestCase):
multi_db = True
def test_m2m_collection(self):
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
# test add
b.authors.add(p)
# test remove
b.authors.remove(p)
# test clear
b.authors.clear()
# test setattr
b.authors = [p]
# test M2M collection
b.delete()
def test_foreignkey_collection(self):
person = Person.objects.create(name='Bob')
Pet.objects.create(owner=person, name='Wart')
# test related FK collection
person.delete()
class SyncOnlyDefaultDatabaseRouter(object):
def allow_migrate(self, db, app_label, **hints):
return db == DEFAULT_DB_ALIAS
class MigrateTestCase(TestCase):
available_apps = [
'multiple_database',
'django.contrib.auth',
'django.contrib.contenttypes'
]
multi_db = True
def test_migrate_to_other_database(self):
"""Regression test for #16039: migrate with --database option."""
cts = ContentType.objects.using('other').filter(app_label='multiple_database')
count = cts.count()
self.assertGreater(count, 0)
cts.delete()
management.call_command('migrate', verbosity=0, interactive=False, database='other')
self.assertEqual(cts.count(), count)
def test_migrate_to_other_database_with_router(self):
"""Regression test for #16039: migrate with --database option."""
cts = ContentType.objects.using('other').filter(app_label='multiple_database')
cts.delete()
with override_settings(DATABASE_ROUTERS=[SyncOnlyDefaultDatabaseRouter()]):
management.call_command('migrate', verbosity=0, interactive=False, database='other')
self.assertEqual(cts.count(), 0)
class RouterUsed(Exception):
WRITE = 'write'
def __init__(self, mode, model, hints):
self.mode = mode
self.model = model
self.hints = hints
class RouteForWriteTestCase(TestCase):
multi_db = True
class WriteCheckRouter(object):
def db_for_write(self, model, **hints):
raise RouterUsed(mode=RouterUsed.WRITE, model=model, hints=hints)
def override_router(self):
return override_settings(DATABASE_ROUTERS=[RouteForWriteTestCase.WriteCheckRouter()])
def test_fk_delete(self):
owner = Person.objects.create(name='Someone')
pet = Pet.objects.create(name='fido', owner=owner)
try:
with self.override_router():
pet.owner.delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': owner})
def test_reverse_fk_delete(self):
owner = Person.objects.create(name='Someone')
to_del_qs = owner.pet_set.all()
try:
with self.override_router():
to_del_qs.delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Pet)
self.assertEqual(e.hints, {'instance': owner})
def test_reverse_fk_get_or_create(self):
owner = Person.objects.create(name='Someone')
try:
with self.override_router():
owner.pet_set.get_or_create(name='fido')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Pet)
self.assertEqual(e.hints, {'instance': owner})
def test_reverse_fk_update(self):
owner = Person.objects.create(name='Someone')
Pet.objects.create(name='fido', owner=owner)
try:
with self.override_router():
owner.pet_set.update(name='max')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Pet)
self.assertEqual(e.hints, {'instance': owner})
def test_m2m_add(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
book.authors.add(auth)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_clear(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.clear()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_delete(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.all().delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_get_or_create(self):
Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
book.authors.get_or_create(name='Someone else')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_remove(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.remove(auth)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_update(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.all().update(name='Different')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': book})
def test_reverse_m2m_add(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
auth.book_set.add(book)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_clear(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.clear()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_delete(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.all().delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_get_or_create(self):
auth = Person.objects.create(name='Someone')
Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
auth.book_set.get_or_create(title="New Book", published=datetime.datetime.now())
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_remove(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.remove(book)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_update(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.all().update(title='Different')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book)
self.assertEqual(e.hints, {'instance': auth})
|
bsd-3-clause
|
mayuroks/schedulord
|
server.py
|
1
|
1861
|
from flask import Flask, request, jsonify
from werkzeug.utils import secure_filename
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.restless import APIManager
import os
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/schedulord.db'
app.config['UPLOAD_FOLDER'] = os.path.join(os.path.abspath('.'), 'uploads')
db = SQLAlchemy(app)
class Job(db.Model):
__tablename__ = "jobs"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.Text)
interval = db.Column(db.Integer)
script_name = db.Column(db.Text)
next_runtime = db.Column(db.DateTime)
# App routes
@app.route('/jobs')
def index():
all_jobs = Job.query.all()
all_jobs = { x.name: {'id': x.id, 'name': x.name, 'interval': x.interval} for x in all_jobs}
return jsonify(jobs=all_jobs)
@app.route('/jobs/create', methods=['POST'])
def create():
file = request.files['file']
json_data = request.form
filename = secure_filename(file.filename)
script_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(script_path)
script_name = filename.rstrip('.py')
# creating a job
job = Job(name=json_data['name'],
interval=json_data['interval'],
next_runtime=None,
script_name=script_name
)
db.session.add(job)
db.session.commit()
return "{0} has been uploaded".format(filename)
@app.route('/jobs/<id>', methods=['GET'])
def read(id):
return "read method"
@app.route('/jobs/<id>', methods=['DELETE'])
def delete(id):
job = Job.query.get(id)
job_name = job.name
job.query.delete()
db.session.commit()
return "job {} has been deleted".format(job_name)
if __name__ == "__main__":
db.drop_all()
db.create_all()
app.run(debug=True)
|
mit
|
MapLarge/flat-file-feeds
|
directory_feed_constructor.py
|
1
|
4075
|
"""
Classname: DirectoryFeedConstructor
Description: Creates a feed for an internal directory type of feed
Authored by: MapLarge, Inc. (Scott Rowles)
Change Log:
"""
"""
Define all the imports for the site_feed_constructor class
"""
import os
import re
import sys
import datetime as dt
import codecs
import uuid
import shutil
from feed_constructor import FeedConstructor
class DirectoryFeedConstructor(FeedConstructor):
"""
Define the class properties
"""
depth_limit = None
storage_root = None
storage_subdirectory = None
storage_directory = None
web_root = None
web_subdiretory = None
web_directory = None
start_directory = None
search_filter = None
def __init__(self, **config_json):
"""
Initialize the super class and load the class properties from the config json
"""
super(DirectoryFeedConstructor, self).__init__(**config_json)
self.storage_root = self.config_json['storage_root']
self.storage_subdirectory = self.config_json['storage_subdirectory']
self.storage_directory = os.path.join(self.storage_root, self.storage_subdirectory)
self.start_directory = self.config_json['start_directory']
self.web_root = self.config_json['web_root']
self.web_subdirectory = self.config_json['web_subdirectory']
self.web_directory = self.web_root + "/" + self.web_subdirectory
if self.config_json['feed_type'] == 'directory':
self.depth_limit = self.config_json['depth_limit']
self.search_filter = self.config_json['search_filter']
else:
self.depth_limit = 1
self.search_filter = self.config_json['file']
def walk_dir(self):
"""
Walk the directory with a given depth_limit
"""
path = os.path.normpath(self.start_directory)
#If the local subdirectory for starage does not exist,
# create it
try:
os.makedirs(self.storage_directory)
except OSError:
if os.path.exists(self.storage_directory):
pass
else:
raise
fpattern = re.compile(self.search_filter) if self.search_filter else None
adate = dt.datetime.utcnow().isoformat()
with codecs.open(self.storage_file, 'w', "utf-8") as ff:
for root,dirs,files in os.walk(self.start_directory, topdown=True):
depth = root[len(path) + len(os.path.sep):].count(os.path.sep)
if self.depth_limit and depth == self.depth_limit:
dirs[:] = [] # Don't recurse any deeper
else:
for afile in files:
if fpattern and fpattern.match(afile):
# Want to add each entry to the file,
fpath_orig = os.path.join(root, afile)
fpath_stage = os.path.join(self.storage_directory, afile)
try:
shutil.copy(fpath_orig, fpath_stage)
url = self.web_directory + "/" + afile
ff.write('{0}, {1}, {2}\n'.format(fpath_orig,
url,
adate.encode('utf-8')))
except IOError as e:
continue
except Exception:
print e.message
def construct_feed(self):
"""
Execute the walk_dir method
"""
self.walk_dir()
def provide_params(self):
"""
Provide parameters needed to dave the feed to the database
"""
params = dict()
params['title'] = self.title
params['root_dir'] = self.start_directory
params['items_url'] = self.storage_file
params['feed_uuid']= uuid.uuid4()
params['pub_time'] = dt.datetime.now()
params['mod_time'] = dt.datetime.now()
return params
|
mit
|
samsu/neutron
|
db/migration/alembic_migrations/cisco_init_ops.py
|
17
|
8038
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial schema operations for cisco plugin
from alembic import op
import sqlalchemy as sa
segment_type = sa.Enum('vlan', 'overlay', 'trunk', 'multi-segment',
name='segment_type')
profile_type = sa.Enum('network', 'policy', name='profile_type')
def upgrade():
op.create_table(
'cisco_policy_profiles',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'cisco_n1kv_vlan_allocations',
sa.Column('physical_network', sa.String(length=64), nullable=False),
sa.Column('vlan_id', sa.Integer(), autoincrement=False,
nullable=False),
sa.Column('allocated', sa.Boolean(), autoincrement=False,
nullable=False),
sa.PrimaryKeyConstraint('physical_network', 'vlan_id'))
op.create_table(
'cisco_network_profiles',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('segment_type', segment_type, nullable=False),
sa.Column('sub_type', sa.String(length=255), nullable=True),
sa.Column('segment_range', sa.String(length=255), nullable=True),
sa.Column('multicast_ip_index', sa.Integer(), nullable=True),
sa.Column('multicast_ip_range', sa.String(length=255), nullable=True),
sa.Column('physical_network', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'cisco_n1kv_vxlan_allocations',
sa.Column('vxlan_id', sa.Integer(), autoincrement=False,
nullable=False),
sa.Column('allocated', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('vxlan_id'))
op.create_table(
'cisco_credentials',
sa.Column('credential_id', sa.String(length=255), nullable=True),
sa.Column('credential_name', sa.String(length=255), nullable=False),
sa.Column('user_name', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('type', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('credential_name'))
op.create_table(
'cisco_qos_policies',
sa.Column('qos_id', sa.String(length=255), nullable=True),
sa.Column('tenant_id', sa.String(length=255), nullable=False),
sa.Column('qos_name', sa.String(length=255), nullable=False),
sa.Column('qos_desc', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('tenant_id', 'qos_name'))
op.create_table(
'cisco_nexusport_bindings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('port_id', sa.String(length=255), nullable=True),
sa.Column('vlan_id', sa.Integer(), nullable=False),
sa.Column('switch_ip', sa.String(length=255), nullable=False),
sa.Column('instance_id', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'cisco_n1kv_profile_bindings',
sa.Column('profile_type', profile_type, nullable=True),
sa.Column('tenant_id', sa.String(length=36), nullable=False),
sa.Column('profile_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('tenant_id', 'profile_id'))
op.create_table(
'cisco_n1kv_vmnetworks',
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('profile_id', sa.String(length=36), nullable=True),
sa.Column('network_id', sa.String(length=36), nullable=True),
sa.Column('port_count', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['profile_id'],
['cisco_policy_profiles.id'], ),
sa.PrimaryKeyConstraint('name'))
op.create_table(
'cisco_n1kv_trunk_segments',
sa.Column('trunk_segment_id', sa.String(length=36), nullable=False),
sa.Column('segment_id', sa.String(length=36), nullable=False),
sa.Column('dot1qtag', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['trunk_segment_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('trunk_segment_id', 'segment_id', 'dot1qtag'))
op.create_table(
'cisco_provider_networks',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('network_type', sa.String(length=255), nullable=False),
sa.Column('segmentation_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id'))
op.create_table(
'cisco_n1kv_multi_segments',
sa.Column('multi_segment_id', sa.String(length=36), nullable=False),
sa.Column('segment1_id', sa.String(length=36), nullable=False),
sa.Column('segment2_id', sa.String(length=36), nullable=False),
sa.Column('encap_profile_name', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['multi_segment_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('multi_segment_id', 'segment1_id',
'segment2_id'))
op.create_table(
'cisco_n1kv_network_bindings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('network_type', sa.String(length=32), nullable=False),
sa.Column('physical_network', sa.String(length=64), nullable=True),
sa.Column('segmentation_id', sa.Integer(), nullable=True),
sa.Column('multicast_ip', sa.String(length=32), nullable=True),
sa.Column('profile_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['profile_id'],
['cisco_network_profiles.id']),
sa.PrimaryKeyConstraint('network_id'))
op.create_table(
'cisco_n1kv_port_bindings',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('profile_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['profile_id'], ['cisco_policy_profiles.id']),
sa.PrimaryKeyConstraint('port_id'))
def downgrade():
op.drop_table('cisco_n1kv_port_bindings')
op.drop_table('cisco_n1kv_network_bindings')
op.drop_table('cisco_n1kv_multi_segments')
op.drop_table('cisco_provider_networks')
op.drop_table('cisco_n1kv_trunk_segments')
op.drop_table('cisco_n1kv_vmnetworks')
op.drop_table('cisco_n1kv_profile_bindings')
op.drop_table('cisco_nexusport_bindings')
op.drop_table('cisco_qos_policies')
op.drop_table('cisco_credentials')
op.drop_table('cisco_n1kv_vxlan_allocations')
op.drop_table('cisco_network_profiles')
op.drop_table('cisco_n1kv_vlan_allocations')
op.drop_table('cisco_policy_profiles')
# generate DDL for dropping enumns
segment_type.drop(op.get_bind(), checkfirst=False)
profile_type.drop(op.get_bind(), checkfirst=False)
|
apache-2.0
|
AustinWise/mongrel2
|
examples/python/mongrel2/config/model.py
|
92
|
7490
|
from storm.locals import *
database = None
store = None
TABLES = ["server", "host", "route", "proxy", "directory", "handler",
"setting"]
def load_db(spec):
global database
global store
if not store:
database = create_database(spec)
store = Store(database)
return store
def clear_db():
for table in TABLES:
store.execute("DELETE FROM %s" % table)
def begin(config_db, clear=False):
store = load_db("sqlite:" + config_db)
store.mongrel2_clear=clear
if clear:
clear_db()
return store
def commit(servers, settings=None):
for server in servers:
store.add(server)
for host in server.hosts:
host.server = server
store.add(host)
for route in host.routes:
route.host = host
store.add(route)
if store.mongrel2_clear:
store.commit()
else:
print "Results won't be committed unless you begin(clear=True)."
if settings:
for k,v in settings.items():
store.add(Setting(unicode(k), unicode(v)))
store.commit()
class Server(object):
__storm_table__ = "server"
id = Int(primary = True)
uuid = Unicode()
access_log = Unicode()
error_log = Unicode()
chroot = Unicode()
default_host = Unicode()
name = Unicode()
pid_file = Unicode()
port = Int()
bind_addr = Unicode(default=unicode('0.0.0.0'))
use_ssl = Bool(default = 0)
def __init__(self, uuid=None, access_log=None, error_log=None,
chroot=None, default_host=None, name=None, pid_file=None,
port=None, hosts=None, bind_addr='0.0.0.0', use_ssl=False):
super(Server, self).__init__()
self.uuid = unicode(uuid)
self.access_log = unicode(access_log)
self.error_log = unicode(error_log)
self.chroot = unicode(chroot)
self.default_host = unicode(default_host)
self.name = unicode(name) if name else self.default_host
self.pid_file = unicode(pid_file)
self.port = port
self.bind_addr = unicode(bind_addr)
self.use_ssl = use_ssl
for h in hosts or []:
self.hosts.add(h)
def __repr__(self):
return "Server(uuid=%r, access_log=%r, error_log=%r, chroot=%r, default_host=%r, port=%d)" % (
self.uuid, self.access_log, self.error_log,
self.chroot, self.default_host, self.port)
class Host(object):
__storm_table__ = "host"
id = Int(primary = True)
server_id = Int()
server = Reference(server_id, Server.id)
maintenance = Bool(default = 0)
name = Unicode()
matching = Unicode()
def __init__(self, server=None, name=None, matching=None,
maintenance=False, routes=None):
super(Host, self).__init__()
self.server = server
self.name = unicode(name)
self.matching = matching or self.name
self.maintenance = maintenance
if routes:
for p,t in routes.items():
self.routes.add(Route(path=p, target=t))
def __repr__(self):
return "Host(maintenance=%d, name=%r, matching=%r)" % (
self.maintenance, self.name, self.matching)
Server.hosts = ReferenceSet(Server.id, Host.server_id)
class Handler(object):
__storm_table__ = "handler"
id = Int(primary = True)
send_spec = Unicode()
send_ident = Unicode()
recv_spec = Unicode()
recv_ident = Unicode()
raw_payload = Bool(default = 0)
protocol = Unicode(default = unicode('json'))
def __init__(self, send_spec, send_ident, recv_spec, recv_ident,
raw_payload=False, protocol='json'):
super(Handler, self).__init__()
self.send_spec = unicode(send_spec)
self.send_ident = unicode(send_ident)
self.recv_spec = unicode(recv_spec)
self.recv_ident = unicode(recv_ident)
self.raw_payload = raw_payload
self.protocol = unicode(protocol)
def __repr__(self):
return "Handler(send_spec=%r, send_ident=%r, recv_spec=%r, recv_ident=%r)" % (
self.send_spec, self.send_ident, self.recv_spec,
self.recv_ident)
class Proxy(object):
__storm_table__ = "proxy"
id = Int(primary = True)
addr = Unicode()
port = Int()
def __init__(self, addr, port):
super(Proxy, self).__init__()
self.addr = unicode(addr)
self.port = port
def __repr__(self):
return "Proxy(addr=%r, port=%d)" % (
self.addr, self.port)
class Dir(object):
__storm_table__ = "directory"
id = Int(primary = True)
base = Unicode()
index_file = Unicode()
default_ctype = Unicode()
cache_ttl = Int(default=0)
def __init__(self, base, index_file, default_ctype="text/plain", cache_ttl=0):
super(Dir, self).__init__()
self.base = unicode(base)
self.index_file = unicode(index_file)
self.default_ctype = unicode(default_ctype)
self.cache_ttl = cache_ttl
def __repr__(self):
return "Dir(base=%r, index_file=%r, default_ctype=%r)" % (
self.base, self.index_file, self.default_ctype)
class Route(object):
__storm_table__ = "route"
id = Int(primary = True)
path = Unicode()
reversed = Bool(default = 0)
host_id = Int()
host = Reference(host_id, Host.id)
target_id = Int()
target_type = Unicode()
_targets = {'dir': Dir,
'handler': Handler,
'proxy': Proxy}
def __init__(self, path=None, reversed=False, host=None, target=None):
super(Route, self).__init__()
self.path = unicode(path)
self.reversed = reversed
self.host = host
if target:
store.add(target)
store.commit()
self.target_id = target.id
self.target_type = unicode(target.__class__.__name__.lower())
def target_class(self):
return self._targets[self.target_type]
@property
def target(self):
kls = self.target_class()
targets = store.find(kls, kls.id == self.target_id)
assert targets.count() <= 1, "Routes should only map to one target."
return targets[0] if targets.count() else None
def __repr__(self):
return "Route(path=%r, reversed=%r, target=%r)" % (
self.path, self.reversed, self.target)
Host.routes = ReferenceSet(Host.id, Route.host_id)
class Log(object):
__storm_table__ = "log"
id = Int(primary = True)
who = Unicode()
what = Unicode()
happened_at = DateTime()
location = Unicode()
how = Unicode()
why = Unicode()
def __repr__(self):
return "[%s, %s@%s, %s] %s" % (
self.happened_at.isoformat(), self.who, self.location, self.what,
self.why)
class MIMEType(object):
__storm_table__ = "mimetype"
id = Int(primary = True)
mimetype = Unicode()
extension = Unicode()
def __repr__(self):
return "MIMEType(mimetype=%r, extension=%r)" % (
self.mimetype, self.extension)
class Setting(object):
__storm_table__ = "setting"
id = Int(primary = True)
key = Unicode()
value = Unicode()
def __init__(self, key, value):
super(Setting, self).__init__()
self.key = key
self.value = value
def __repr__(self):
return "Setting(key=%r, value=%r)" % (self.key, self.value)
|
bsd-3-clause
|
taotie12010/bigfour
|
lms/djangoapps/commerce/utils.py
|
107
|
1296
|
"""Utilities to assist with commerce tasks."""
import logging
log = logging.getLogger(__name__)
def audit_log(name, **kwargs):
"""DRY helper used to emit an INFO-level log message.
Messages logged with this function are used to construct an audit trail. Log messages
should be emitted immediately after the event they correspond to has occurred and, if
applicable, after the database has been updated. These log messages use a verbose
key-value pair syntax to make it easier to extract fields when parsing the application's
logs.
This function is variadic, accepting a variable number of keyword arguments.
Arguments:
name (str): The name of the message to log. For example, 'payment_received'.
Keyword Arguments:
Indefinite. Keyword arguments are strung together as comma-separated key-value
pairs ordered alphabetically by key in the resulting log message.
Returns:
None
"""
# Joins sorted keyword argument keys and values with an "=", wraps each value
# in quotes, and separates each pair with a comma and a space.
payload = u', '.join(['{k}="{v}"'.format(k=k, v=v) for k, v in sorted(kwargs.items())])
message = u'{name}: {payload}'.format(name=name, payload=payload)
log.info(message)
|
agpl-3.0
|
jamesbeebop/CouchPotatoServer
|
couchpotato/core/media/_base/providers/torrent/torrentleech.py
|
33
|
5128
|
import traceback
from bs4 import BeautifulSoup
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
import six
log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'https://www.torrentleech.org/',
'login': 'https://www.torrentleech.org/user/account/login/',
'login_check': 'https://torrentleech.org/user/messages',
'detail': 'https://www.torrentleech.org/torrent/%s',
'search': 'https://www.torrentleech.org/torrents/browse/index/query/%s/categories/%s',
'download': 'https://www.torrentleech.org%s',
}
http_time_between_calls = 1 # Seconds
cat_backup_id = None
def _searchOnTitle(self, title, media, quality, results):
url = self.urls['search'] % self.buildUrl(title, media, quality)
data = self.getHTMLData(url)
if data:
html = BeautifulSoup(data)
try:
result_table = html.find('table', attrs = {'id': 'torrenttable'})
if not result_table:
return
entries = result_table.find_all('tr')
for result in entries[1:]:
link = result.find('td', attrs = {'class': 'name'}).find('a')
url = result.find('td', attrs = {'class': 'quickdownload'}).find('a')
details = result.find('td', attrs = {'class': 'name'}).find('a')
results.append({
'id': link['href'].replace('/torrent/', ''),
'name': six.text_type(link.string),
'url': self.urls['download'] % url['href'],
'detail_url': self.urls['download'] % details['href'],
'size': self.parseSize(result.find_all('td')[4].string),
'seeders': tryInt(result.find('td', attrs = {'class': 'seeders'}).string),
'leechers': tryInt(result.find('td', attrs = {'class': 'leechers'}).string),
})
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return {
'username': self.conf('username'),
'password': self.conf('password'),
'remember_me': 'on',
'login': 'submit',
}
def loginSuccess(self, output):
return '/user/account/logout' in output.lower() or 'welcome back' in output.lower()
loginCheckSuccess = loginSuccess
config = [{
'name': 'torrentleech',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'TorrentLeech',
'description': '<a href="http://torrentleech.org">TorrentLeech</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACHUlEQVR4AZVSO48SYRSdGTCBEMKzILLAWiybkKAGMZRUUJEoDZX7B9zsbuQPYEEjNLTQkYgJDwsoSaxspEBsCITXjjNAIKi8AkzceXgmbHQ1NJ5iMufmO9/9zrmXlCSJ+B8o75J8Pp/NZj0eTzweBy0Wi4PBYD6f12o1r9ebTCZx+22HcrnMsuxms7m6urTZ7LPZDMVYLBZ8ZV3yo8aq9Pq0wzCMTqe77dDv9y8uLyAWBH6xWOyL0K/56fcb+rrPgPZ6PZfLRe1fsl6vCUmGKIqoqNXqdDr9Dbjps9znUV0uTqdTjuPkDoVCIfcuJ4gizjMMm8u9vW+1nr04czqdK56c37CbKY9j2+1WEARZ0Gq1RFHAz2q1qlQqXxoN69HRcDjUarW8ZD6QUigUOnY8uKYH8N1sNkul9yiGw+F6vS4Rxn8EsodEIqHRaOSnq9T7ajQazWQycEIR1AEBYDabSZJyHDucJyegwWBQr9ebTCaKvHd4cCQANUU9evwQ1Ofz4YvUKUI43GE8HouSiFiNRhOowWBIpVLyHITJkuW3PwgAEf3pgIwxF5r+OplMEsk3CPT5szCMnY7EwUdhwUh/CXiej0Qi3idPz89fdrpdbsfBzH7S3Q9K5pP4c0sAKpVKoVAQGO1ut+t0OoFAQHkH2Da/3/+but3uarWK0ZMQoNdyucRutdttmqZxMTzY7XaYxsrgtUjEZrNhkSwWyy/0NCatZumrNQAAAABJRU5ErkJggg==',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False,
},
{
'name': 'username',
'default': '',
},
{
'name': 'password',
'default': '',
'type': 'password',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 20,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]
|
gpl-3.0
|
JamesLinEngineer/RKMC
|
addons/script.module.liveresolver/lib/liveresolver/resolvers/castamp.py
|
2
|
1440
|
# -*- coding: utf-8 -*-
import re,urllib,urlparse,base64
from liveresolver.modules import client,constants
from liveresolver.modules.log_utils import log
def resolve(url):
try:
id = urlparse.parse_qs(urlparse.urlparse(url).query)['c'][0]
try:
referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
except:
referer= url
url = 'http://castamp.com/embed.php?c=%s&vwidth=640&vheight=380'%id
pageUrl=url
result = client.request(url, referer=referer,headers = {'Host':'www.castamp.com'})
result = urllib.unquote(result).replace('unescape(','').replace("'+'",'')
rplcs = re.findall('=(.+?).replace\([\"\'](.+?)[\"\']\s*,\s*[\"\']([^\"\']*)[\"\']',result)
result = re.sub('\/\*[^*]+\*\/','',result)
var = re.compile('var\s(.+?)\s*=\s*[\'\"](.+?)[\'\"]').findall(result)
var_dict = dict(var)
file = re.compile('\'file\'\s*:\s*(.+?),').findall(result)[-1]
file = var_dict[file]
rtmp = re.compile('(rtmp://[^\"\']+)').findall(result)[0]
for r in rplcs:
file = file.replace(r[1],r[2])
url = rtmp + ' playpath=' + file + ' swfUrl=http://p.castamp.com/cplayer.swf' + ' flashver=' + constants.flash_ver() + ' live=true timeout=15 swfVfy=1 pageUrl=' + pageUrl
return url
except:
return
|
gpl-2.0
|
marscher/cython
|
Cython/Build/Tests/TestInline.py
|
20
|
2071
|
import os, tempfile
from Cython.Shadow import inline
from Cython.Build.Inline import safe_type
from Cython.TestUtils import CythonTest
try:
import numpy
has_numpy = True
except:
has_numpy = False
test_kwds = dict(force=True, quiet=True)
global_value = 100
class TestInline(CythonTest):
def setUp(self):
CythonTest.setUp(self)
self.test_kwds = dict(test_kwds)
if os.path.isdir('BUILD'):
lib_dir = os.path.join('BUILD','inline')
else:
lib_dir = tempfile.mkdtemp(prefix='cython_inline_')
self.test_kwds['lib_dir'] = lib_dir
def test_simple(self):
self.assertEquals(inline("return 1+2", **self.test_kwds), 3)
def test_types(self):
self.assertEquals(inline("""
cimport cython
return cython.typeof(a), cython.typeof(b)
""", a=1.0, b=[], **self.test_kwds), ('double', 'list object'))
def test_locals(self):
a = 1
b = 2
self.assertEquals(inline("return a+b", **self.test_kwds), 3)
def test_globals(self):
self.assertEquals(inline("return global_value + 1", **self.test_kwds), global_value + 1)
def test_no_return(self):
self.assertEquals(inline("""
a = 1
cdef double b = 2
cdef c = []
""", **self.test_kwds), dict(a=1, b=2.0, c=[]))
def test_def_node(self):
foo = inline("def foo(x): return x * x", **self.test_kwds)['foo']
self.assertEquals(foo(7), 49)
def test_pure(self):
import cython as cy
b = inline("""
b = cy.declare(float, a)
c = cy.declare(cy.pointer(cy.float), &b)
return b
""", a=3, **self.test_kwds)
self.assertEquals(type(b), float)
if has_numpy:
def test_numpy(self):
import numpy
a = numpy.ndarray((10, 20))
a[0,0] = 10
self.assertEquals(safe_type(a), 'numpy.ndarray[numpy.float64_t, ndim=2]')
self.assertEquals(inline("return a[0,0]", a=a, **self.test_kwds), 10.0)
|
apache-2.0
|
nitinitprof/odoo
|
addons/purchase_requisition/wizard/bid_line_qty.py
|
374
|
1711
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class bid_line_qty(osv.osv_memory):
_name = "bid.line.qty"
_description = "Change Bid line quantity"
_columns = {
'qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
}
def change_qty(self, cr, uid, ids, context=None):
active_ids = context and context.get('active_ids', [])
data = self.browse(cr, uid, ids, context=context)[0]
self.pool.get('purchase.order.line').write(cr, uid, active_ids, {'quantity_bid': data.qty})
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
danielquinn/spirithunter
|
src/aspects/arbiters/celestial.py
|
1
|
3517
|
import ephem
from .base import Arbiter
from ..models import Facet, Element
class CelestialArbiter(Arbiter):
WEIGHT_DAY = 5
WEIGHT_NIGHT = 5
WEIGHT_MOON_FULL = 5
WEIGHT_MOON_NEW = 5
WEIGHT_MERCURY = 1
WEIGHT_VENUS = 1
WEIGHT_MARS = 1
WEIGHT_JUPITER = 1
WEIGHT_SATURN = 1
WEIGHT_URANUS = 1
WEIGHT_NEPTUNE = 1
def __init__(self, lat, lng):
self.observer = ephem.Observer()
# Using str() is necessary or pyephem flips out and uses crazy values
# for lat/lng, resulting in an AlwaysUp error. Unfortunately, keeping
# str() here results in a DeprecationWarning that's documented in a
# (closed and ignored) bug here:
# https://github.com/brandon-rhodes/pyephem/issues/18
self.observer.lat = str(lat)
self.observer.lon = str(lng)
def get_results(self):
r = {"elements": [], "facets": [], "nationalities": []}
if self._sun_up():
r["elements"].append((Element.ELEMENT_DAY, self.WEIGHT_DAY))
else:
r["elements"].append((Element.ELEMENT_NIGHT, self.WEIGHT_NIGHT))
if self._moon_full():
r["elements"].append(
(Element.ELEMENT_MOON_FULL, self.WEIGHT_MOON_FULL)
)
elif self._moon_new():
r["elements"].append(
(Element.ELEMENT_MOON_NEW, self.WEIGHT_MOON_NEW)
)
if self._mercury_up():
r["facets"].append((Facet.FACET_MERCURY, self.WEIGHT_MERCURY))
if self._venus_up():
r["facets"].append((Facet.FACET_VENUS, self.WEIGHT_VENUS))
if self._mars_up():
r["facets"].append((Facet.FACET_MARS, self.WEIGHT_MARS))
if self._jupiter_up():
r["facets"].append((Facet.FACET_JUPITER, self.WEIGHT_JUPITER))
if self._saturn_up():
r["facets"].append((Facet.FACET_SATURN, self.WEIGHT_SATURN))
if self._uranus_up():
r["facets"].append((Facet.FACET_URANUS, self.WEIGHT_URANUS))
if self._neptune_up():
r["facets"].append((Facet.FACET_NEPTUNE, self.WEIGHT_NEPTUNE))
return r
def _sun_up(self):
return self._check_transit(ephem.Sun)
def _mercury_up(self):
return self._check_transit(ephem.Mercury)
def _venus_up(self):
return self._check_transit(ephem.Venus)
def _mars_up(self):
return self._check_transit(ephem.Mars)
def _jupiter_up(self):
return self._check_transit(ephem.Jupiter)
def _saturn_up(self):
return self._check_transit(ephem.Saturn)
def _uranus_up(self):
return self._check_transit(ephem.Uranus)
def _neptune_up(self):
return self._check_transit(ephem.Neptune)
def _moon_new(self):
if self._check_transit(ephem.Moon):
b = ephem.Moon()
b.compute()
if b.moon_phase < 0.05:
return True
return False
def _moon_full(self):
if self._check_transit(ephem.Moon):
b = ephem.Moon()
b.compute()
if b.moon_phase > 0.95:
return True
return False
def _check_transit(self, body):
"""
Is `body` above the horizon?
"""
body = body()
next_rise = self.observer.next_rising(body).datetime()
next_set = self.observer.next_setting(body).datetime()
return (next_set - next_rise).total_seconds() < 0
|
agpl-3.0
|
darjeeling/django
|
django/conf/locale/it/formats.py
|
65
|
2012
|
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y' # 25 Ottobre 2006
TIME_FORMAT = 'H:i' # 14:30
DATETIME_FORMAT = 'l d F Y H:i' # Mercoledì 25 Ottobre 2006 14:30
YEAR_MONTH_FORMAT = 'F Y' # Ottobre 2006
MONTH_DAY_FORMAT = 'j/F' # 10/2006
SHORT_DATE_FORMAT = 'd/m/Y' # 25/12/2009
SHORT_DATETIME_FORMAT = 'd/m/Y H:i' # 25/10/2009 14:30
FIRST_DAY_OF_WEEK = 1 # Lunedì
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%Y/%m/%d', # '25/10/2006', '2008/10/25'
'%d-%m-%Y', '%Y-%m-%d', # '25-10-2006', '2008-10-25'
'%d-%m-%y', '%d/%m/%y', # '25-10-06', '25/10/06'
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59'
'%d-%m-%Y %H:%M:%S.%f', # '25-10-2006 14:30:59.000200'
'%d-%m-%Y %H:%M', # '25-10-2006 14:30'
'%d-%m-%Y', # '25-10-2006'
'%d-%m-%y %H:%M:%S', # '25-10-06 14:30:59'
'%d-%m-%y %H:%M:%S.%f', # '25-10-06 14:30:59.000200'
'%d-%m-%y %H:%M', # '25-10-06 14:30'
'%d-%m-%y', # '25-10-06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
bsd-3-clause
|
codekaki/odoo
|
addons/note/tests/test_note.py
|
427
|
1686
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class TestNote(common.TransactionCase):
def test_bug_lp_1156215(self):
"""ensure any users can create new users"""
cr, uid = self.cr, self.uid
IMD = self.registry('ir.model.data')
Users = self.registry('res.users')
_, demo_user = IMD.get_object_reference(cr, uid, 'base', 'user_demo')
_, group_id = IMD.get_object_reference(cr, uid, 'base', 'group_erp_manager')
Users.write(cr, uid, [demo_user], {
'groups_id': [(4, group_id)],
})
# must not fail
Users.create(cr, demo_user, {
'name': 'test bug lp:1156215',
'login': 'lp_1156215',
})
|
agpl-3.0
|
all-of-us/raw-data-repository
|
rdr_service/lib_fhir/fhirclient_4_0_0/models/medicinalproduct_tests.py
|
1
|
3848
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import medicinalproduct
from .fhirdate import FHIRDate
class MedicinalProductTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("MedicinalProduct", js["resourceType"])
return medicinalproduct.MedicinalProduct(js)
def testMedicinalProduct1(self):
inst = self.instantiate_from("medicinalproduct-example.json")
self.assertIsNotNone(inst, "Must have instantiated a MedicinalProduct instance")
self.implMedicinalProduct1(inst)
js = inst.as_json()
self.assertEqual("MedicinalProduct", js["resourceType"])
inst2 = medicinalproduct.MedicinalProduct(js)
self.implMedicinalProduct1(inst2)
def implMedicinalProduct1(self, inst):
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].system, "http://ema.europa.eu/example/MPID")
self.assertEqual(inst.identifier[0].value, "{mpid}")
self.assertEqual(inst.manufacturingBusinessOperation[0].authorisationReferenceNumber.system, "http://ema.europa.eu/example/manufacturingAuthorisationReferenceNumber")
self.assertEqual(inst.manufacturingBusinessOperation[0].authorisationReferenceNumber.value, "1324TZ")
self.assertEqual(inst.manufacturingBusinessOperation[0].effectiveDate.date, FHIRDate("2013-03-15").date)
self.assertEqual(inst.manufacturingBusinessOperation[0].effectiveDate.as_json(), "2013-03-15")
self.assertEqual(inst.manufacturingBusinessOperation[0].operationType.coding[0].code, "Batchrelease")
self.assertEqual(inst.manufacturingBusinessOperation[0].operationType.coding[0].system, "http://ema.europa.eu/example/manufacturingOperationType")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.name[0].countryLanguage[0].country.coding[0].code, "EU")
self.assertEqual(inst.name[0].countryLanguage[0].country.coding[0].system, "http://ema.europa.eu/example/countryCode")
self.assertEqual(inst.name[0].countryLanguage[0].jurisdiction.coding[0].code, "EU")
self.assertEqual(inst.name[0].countryLanguage[0].jurisdiction.coding[0].system, "http://ema.europa.eu/example/jurisdictionCode")
self.assertEqual(inst.name[0].countryLanguage[0].language.coding[0].code, "EN")
self.assertEqual(inst.name[0].countryLanguage[0].language.coding[0].system, "http://ema.europa.eu/example/languageCode")
self.assertEqual(inst.name[0].namePart[0].part, "Equilidem")
self.assertEqual(inst.name[0].namePart[0].type.code, "INV")
self.assertEqual(inst.name[0].namePart[1].part, "2.5 mg")
self.assertEqual(inst.name[0].namePart[1].type.code, "STR")
self.assertEqual(inst.name[0].namePart[2].part, "film-coated tablets")
self.assertEqual(inst.name[0].namePart[2].type.code, "FRM")
self.assertEqual(inst.name[0].productName, "Equilidem 2.5 mg film-coated tablets")
self.assertEqual(inst.productClassification[0].coding[0].code, "WHOAnatomicalTherapeuticChemicalATCClassificationSystem|B01AF02")
self.assertEqual(inst.productClassification[0].coding[0].system, "http://ema.europa.eu/example/WHOAnatomicalTherapeuticChemicalATCClassificationSystem")
self.assertEqual(inst.text.status, "generated")
|
bsd-3-clause
|
landlab/landlab
|
tests/components/flow_accum/conftest.py
|
3
|
7713
|
import os
import numpy as np
import pytest
from landlab import RasterModelGrid
XX = RasterModelGrid.BAD_INDEX
@pytest.fixture
def dans_grid1():
"""
Create a 5x5 test grid.
This is a sheet flow test.
"""
mg = RasterModelGrid((5, 5), xy_spacing=(10.0, 10.0))
this_dir = os.path.abspath(os.path.dirname(__file__))
infile = os.path.join(this_dir, "test_fr_input.txt")
z = mg.node_x.copy()
A_target = (
np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[3.0, 3.0, 2.0, 1.0, 0.0],
[3.0, 3.0, 2.0, 1.0, 0.0],
[3.0, 3.0, 2.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
).flatten()
* 100.0
)
frcvr_target = np.array(
[
[0, 1, 2, 3, 4],
[5, 5, 6, 7, 9],
[10, 10, 11, 12, 14],
[15, 15, 16, 17, 19],
[20, 21, 22, 23, 24],
]
).flatten()
upids_target = np.array(
[
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
]
).flatten()
links2rcvr_target = np.full(25, XX)
links2rcvr_target[mg.core_nodes] = np.array([9, 10, 11, 18, 19, 20, 27, 28, 29])
Q_target = A_target * 2.0 # only once Q_in is used
steepest_target = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
).flatten()
mg.add_field("topographic__elevation", z, at="node", units="-")
class DansGrid(object):
pass
dans_grid = DansGrid()
dans_grid.mg = mg
dans_grid.z = z
dans_grid.infile = infile
dans_grid.A_target = A_target
dans_grid.frcvr_target = frcvr_target
dans_grid.upids_target = upids_target
dans_grid.Q_target = Q_target
dans_grid.steepest_target = steepest_target
dans_grid.links2rcvr_target = links2rcvr_target
return dans_grid
@pytest.fixture
def internal_closed():
"""
Create a 6x5 test grid, but with two internal nodes closed.
This is a sheet flow test.
"""
mg = RasterModelGrid((6, 5), xy_spacing=(10.0, 10.0))
mg.set_closed_boundaries_at_grid_edges(True, True, False, True)
mg.status_at_node[7] = mg.BC_NODE_IS_CLOSED
mg.status_at_node[16] = mg.BC_NODE_IS_CLOSED
z = mg.node_x.copy()
Q_in = np.full(25, 2.0)
A_target = (
np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 1.0, 0.0],
[6.0, 6.0, 3.0, 1.0, 0.0],
[0.0, 0.0, 2.0, 1.0, 0.0],
[3.0, 3.0, 2.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
).flatten()
* 100.0
)
frcvr_target = np.array(
[
[0, 1, 2, 3, 4],
[5, 5, 7, 12, 9],
[10, 10, 11, 12, 14],
[15, 16, 11, 17, 19],
[20, 20, 21, 22, 24],
[25, 26, 27, 28, 29],
]
).flatten()
links2rcvr_target = np.full(mg.number_of_nodes, XX)
links2rcvr_target[mg.core_nodes] = np.array([9, 62, 18, 19, 20, 67, 29, 36, 37, 38])
steepest_target = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
).flatten()
steepest_target[np.array([8, 17])] = 1.0 / np.sqrt(2.0)
mg.add_field("topographic__elevation", z, at="node", units="-")
class DansGrid(object):
pass
dans_grid = DansGrid()
dans_grid.mg = mg
dans_grid.z = z
dans_grid.Q_in = Q_in
dans_grid.A_target = A_target
dans_grid.frcvr_target = frcvr_target
dans_grid.steepest_target = steepest_target
dans_grid.links2rcvr_target = links2rcvr_target
return dans_grid
@pytest.fixture
def dans_grid2():
"""
Create a 5x5 test grid.
This tests more complex routing, with diffs between D4 & D8.
"""
mg = RasterModelGrid((5, 5), xy_spacing=(10.0, 10.0))
this_dir = os.path.abspath(os.path.dirname(__file__))
infile = os.path.join(this_dir, "test_fr_input.txt")
z = np.array(
[
[7.0, 7.0, 7.0, 7.0, 7.0],
[7.0, 5.0, 3.2, 6.0, 7.0],
[7.0, 2.0, 3.0, 5.0, 7.0],
[7.0, 1.0, 1.9, 4.0, 7.0],
[7.0, 0.0, 7.0, 7.0, 7.0],
]
).flatten()
A_target_D8 = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 100.0, 200.0, 100.0, 0.0],
[0.0, 400.0, 100.0, 100.0, 0.0],
[0.0, 600.0, 300.0, 100.0, 0.0],
[0.0, 900.0, 0.0, 0.0, 0.0],
]
).flatten()
A_target_D4 = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 100.0, 200.0, 100.0, 0.0],
[0.0, 200.0, 400.0, 100.0, 0.0],
[0.0, 900.0, 600.0, 100.0, 0.0],
[0.0, 900.0, 0.0, 0.0, 0.0],
]
).flatten()
frcvr_target_D8 = np.array(
[
[0, 1, 2, 3, 4],
[5, 11, 11, 7, 9],
[10, 16, 16, 17, 14],
[15, 21, 21, 17, 19],
[20, 21, 22, 23, 24],
]
).flatten()
frcvr_target_D4 = np.array(
[
[0, 1, 2, 3, 4],
[5, 11, 12, 7, 9],
[10, 16, 17, 12, 14],
[15, 21, 16, 17, 19],
[20, 21, 22, 23, 24],
]
).flatten()
upids_target_D8 = np.array(
[
[0, 1, 2, 3, 4],
[5, 9, 10, 14, 15],
[19, 20, 21, 16, 11],
[6, 7, 8, 12, 17],
[13, 18, 22, 23, 24],
]
).flatten()
upids_target_D4 = np.array(
[
[0, 1, 2, 3, 4],
[5, 9, 10, 14, 15],
[19, 20, 21, 16, 11],
[6, 17, 12, 7, 8],
[13, 18, 22, 23, 24],
]
).flatten()
links2rcvr_target_D8 = np.full(25, XX)
links2rcvr_target_D8[mg.core_nodes] = np.array([14, 51, 11, 23, 59, 61, 32, 67, 29])
links2rcvr_target_D4 = np.full(25, XX)
links2rcvr_target_D4[mg.core_nodes] = np.array([14, 15, 11, 23, 24, 20, 32, 28, 29])
steepest_target_D8 = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.3, 0.08485281, 0.28, 0.0],
[0.0, 0.1, 0.14142136, 0.21920310, 0.0],
[0.0, 0.1, 0.13435029, 0.21, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
).flatten()
steepest_target_D4 = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.3, 0.02, 0.28, 0.0],
[0.0, 0.1, 0.11, 0.2, 0.0],
[0.0, 0.1, 0.09, 0.21, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
).flatten()
mg.add_field("topographic__elevation", z, at="node", units="-")
class DansGrid(object):
pass
dans_grid = DansGrid()
dans_grid.mg = mg
dans_grid.z = z
dans_grid.infile = infile
dans_grid.A_target_D8 = A_target_D8
dans_grid.A_target_D4 = A_target_D4
dans_grid.frcvr_target_D8 = frcvr_target_D8
dans_grid.frcvr_target_D4 = frcvr_target_D4
dans_grid.upids_target_D8 = upids_target_D8
dans_grid.upids_target_D4 = upids_target_D4
dans_grid.steepest_target_D8 = steepest_target_D8
dans_grid.steepest_target_D4 = steepest_target_D4
dans_grid.links2rcvr_target_D8 = links2rcvr_target_D8
dans_grid.links2rcvr_target_D4 = links2rcvr_target_D4
return dans_grid
|
mit
|
idlead/scikit-learn
|
examples/linear_model/plot_lasso_lars.py
|
363
|
1080
|
#!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
treejames/viewfinder
|
marketing/tornado/netutil.py
|
32
|
18082
|
#!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Miscellaneous network utility code."""
from __future__ import absolute_import, division, print_function, with_statement
import errno
import os
import re
import socket
import ssl
import stat
from tornado.concurrent import dummy_executor, run_on_executor
from tornado.ioloop import IOLoop
from tornado.platform.auto import set_close_exec
from tornado.util import Configurable
def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128, flags=None):
"""Creates listening sockets bound to the given port and address.
Returns a list of socket objects (multiple sockets are returned if
the given address maps to multiple IP addresses, which is most common
for mixed IPv4 and IPv6 use).
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either `socket.AF_INET`
or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
both will be used if available.
The ``backlog`` argument has the same meaning as for
`socket.listen() <socket.socket.listen>`.
``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
"""
sockets = []
if address == "":
address = None
if not socket.has_ipv6 and family == socket.AF_UNSPEC:
# Python can be compiled with --disable-ipv6, which causes
# operations on AF_INET6 sockets to fail, but does not
# automatically exclude those results from getaddrinfo
# results.
# http://bugs.python.org/issue16208
family = socket.AF_INET
if flags is None:
flags = socket.AI_PASSIVE
for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_STREAM,
0, flags)):
af, socktype, proto, canonname, sockaddr = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error as e:
if e.args[0] == errno.EAFNOSUPPORT:
continue
raise
set_close_exec(sock.fileno())
if os.name != 'nt':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if af == socket.AF_INET6:
# On linux, ipv6 sockets accept ipv4 too by default,
# but this makes it impossible to bind to both
# 0.0.0.0 in ipv4 and :: in ipv6. On other systems,
# separate sockets *must* be used to listen for both ipv4
# and ipv6. For consistency, always disable ipv4 on our
# ipv6 sockets and use a separate ipv4 socket when needed.
#
# Python 2.x on windows doesn't have IPPROTO_IPV6.
if hasattr(socket, "IPPROTO_IPV6"):
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
sock.setblocking(0)
sock.bind(sockaddr)
sock.listen(backlog)
sockets.append(sock)
return sockets
if hasattr(socket, 'AF_UNIX'):
def bind_unix_socket(file, mode=0o600, backlog=128):
"""Creates a listening unix socket.
If a socket with the given name already exists, it will be deleted.
If any other file with that name exists, an exception will be
raised.
Returns a socket object (not a list of socket objects like
`bind_sockets`)
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
set_close_exec(sock.fileno())
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
try:
st = os.stat(file)
except OSError as err:
if err.errno != errno.ENOENT:
raise
else:
if stat.S_ISSOCK(st.st_mode):
os.remove(file)
else:
raise ValueError("File %s exists and is not a socket", file)
sock.bind(file)
os.chmod(file, mode)
sock.listen(backlog)
return sock
def add_accept_handler(sock, callback, io_loop=None):
"""Adds an `.IOLoop` event handler to accept new connections on ``sock``.
When a connection is accepted, ``callback(connection, address)`` will
be run (``connection`` is a socket object, and ``address`` is the
address of the other end of the connection). Note that this signature
is different from the ``callback(fd, events)`` signature used for
`.IOLoop` handlers.
"""
if io_loop is None:
io_loop = IOLoop.current()
def accept_handler(fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error as e:
# EWOULDBLOCK and EAGAIN indicate we have accepted every
# connection that is available.
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
return
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
if e.args[0] == errno.ECONNABORTED:
continue
raise
callback(connection, address)
io_loop.add_handler(sock.fileno(), accept_handler, IOLoop.READ)
def is_valid_ip(ip):
"""Returns true if the given string is a well-formed IP address.
Supports IPv4 and IPv6.
"""
try:
res = socket.getaddrinfo(ip, 0, socket.AF_UNSPEC,
socket.SOCK_STREAM,
0, socket.AI_NUMERICHOST)
return bool(res)
except socket.gaierror as e:
if e.args[0] == socket.EAI_NONAME:
return False
raise
return True
class Resolver(Configurable):
"""Configurable asynchronous DNS resolver interface.
By default, a blocking implementation is used (which simply calls
`socket.getaddrinfo`). An alternative implementation can be
chosen with the `Resolver.configure <.Configurable.configure>`
class method::
Resolver.configure('tornado.netutil.ThreadedResolver')
The implementations of this interface included with Tornado are
* `tornado.netutil.BlockingResolver`
* `tornado.netutil.ThreadedResolver`
* `tornado.netutil.OverrideResolver`
* `tornado.platform.twisted.TwistedResolver`
* `tornado.platform.caresresolver.CaresResolver`
"""
@classmethod
def configurable_base(cls):
return Resolver
@classmethod
def configurable_default(cls):
return BlockingResolver
def resolve(self, host, port, family=socket.AF_UNSPEC, callback=None):
"""Resolves an address.
The ``host`` argument is a string which may be a hostname or a
literal IP address.
Returns a `.Future` whose result is a list of (family,
address) pairs, where address is a tuple suitable to pass to
`socket.connect <socket.socket.connect>` (i.e. a ``(host,
port)`` pair for IPv4; additional fields may be present for
IPv6). If a ``callback`` is passed, it will be run with the
result as an argument when it is complete.
"""
raise NotImplementedError()
def close(self):
"""Closes the `Resolver`, freeing any resources used.
.. versionadded:: 3.1
"""
pass
class ExecutorResolver(Resolver):
"""Resolver implementation using a `concurrent.futures.Executor`.
Use this instead of `ThreadedResolver` when you require additional
control over the executor being used.
The executor will be shut down when the resolver is closed unless
``close_resolver=False``; use this if you want to reuse the same
executor elsewhere.
"""
def initialize(self, io_loop=None, executor=None, close_executor=True):
self.io_loop = io_loop or IOLoop.current()
if executor is not None:
self.executor = executor
self.close_executor = close_executor
else:
self.executor = dummy_executor
self.close_executor = False
def close(self):
if self.close_executor:
self.executor.shutdown()
self.executor = None
@run_on_executor
def resolve(self, host, port, family=socket.AF_UNSPEC):
# On Solaris, getaddrinfo fails if the given port is not found
# in /etc/services and no socket type is given, so we must pass
# one here. The socket type used here doesn't seem to actually
# matter (we discard the one we get back in the results),
# so the addresses we return should still be usable with SOCK_DGRAM.
addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM)
results = []
for family, socktype, proto, canonname, address in addrinfo:
results.append((family, address))
return results
class BlockingResolver(ExecutorResolver):
"""Default `Resolver` implementation, using `socket.getaddrinfo`.
The `.IOLoop` will be blocked during the resolution, although the
callback will not be run until the next `.IOLoop` iteration.
"""
def initialize(self, io_loop=None):
super(BlockingResolver, self).initialize(io_loop=io_loop)
class ThreadedResolver(ExecutorResolver):
"""Multithreaded non-blocking `Resolver` implementation.
Requires the `concurrent.futures` package to be installed
(available in the standard library since Python 3.2,
installable with ``pip install futures`` in older versions).
The thread pool size can be configured with::
Resolver.configure('tornado.netutil.ThreadedResolver',
num_threads=10)
.. versionchanged:: 3.1
All ``ThreadedResolvers`` share a single thread pool, whose
size is set by the first one to be created.
"""
_threadpool = None
_threadpool_pid = None
def initialize(self, io_loop=None, num_threads=10):
threadpool = ThreadedResolver._create_threadpool(num_threads)
super(ThreadedResolver, self).initialize(
io_loop=io_loop, executor=threadpool, close_executor=False)
@classmethod
def _create_threadpool(cls, num_threads):
pid = os.getpid()
if cls._threadpool_pid != pid:
# Threads cannot survive after a fork, so if our pid isn't what it
# was when we created the pool then delete it.
cls._threadpool = None
if cls._threadpool is None:
from concurrent.futures import ThreadPoolExecutor
cls._threadpool = ThreadPoolExecutor(num_threads)
cls._threadpool_pid = pid
return cls._threadpool
class OverrideResolver(Resolver):
"""Wraps a resolver with a mapping of overrides.
This can be used to make local DNS changes (e.g. for testing)
without modifying system-wide settings.
The mapping can contain either host strings or host-port pairs.
"""
def initialize(self, resolver, mapping):
self.resolver = resolver
self.mapping = mapping
def close(self):
self.resolver.close()
def resolve(self, host, port, *args, **kwargs):
if (host, port) in self.mapping:
host, port = self.mapping[(host, port)]
elif host in self.mapping:
host = self.mapping[host]
return self.resolver.resolve(host, port, *args, **kwargs)
# These are the keyword arguments to ssl.wrap_socket that must be translated
# to their SSLContext equivalents (the other arguments are still passed
# to SSLContext.wrap_socket).
_SSL_CONTEXT_KEYWORDS = frozenset(['ssl_version', 'certfile', 'keyfile',
'cert_reqs', 'ca_certs', 'ciphers'])
def ssl_options_to_context(ssl_options):
"""Try to convert an ``ssl_options`` dictionary to an
`~ssl.SSLContext` object.
The ``ssl_options`` dictionary contains keywords to be passed to
`ssl.wrap_socket`. In Python 3.2+, `ssl.SSLContext` objects can
be used instead. This function converts the dict form to its
`~ssl.SSLContext` equivalent, and may be used when a component which
accepts both forms needs to upgrade to the `~ssl.SSLContext` version
to use features like SNI or NPN.
"""
if isinstance(ssl_options, dict):
assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options
if (not hasattr(ssl, 'SSLContext') or
isinstance(ssl_options, ssl.SSLContext)):
return ssl_options
context = ssl.SSLContext(
ssl_options.get('ssl_version', ssl.PROTOCOL_SSLv23))
if 'certfile' in ssl_options:
context.load_cert_chain(ssl_options['certfile'], ssl_options.get('keyfile', None))
if 'cert_reqs' in ssl_options:
context.verify_mode = ssl_options['cert_reqs']
if 'ca_certs' in ssl_options:
context.load_verify_locations(ssl_options['ca_certs'])
if 'ciphers' in ssl_options:
context.set_ciphers(ssl_options['ciphers'])
return context
def ssl_wrap_socket(socket, ssl_options, server_hostname=None, **kwargs):
"""Returns an ``ssl.SSLSocket`` wrapping the given socket.
``ssl_options`` may be either a dictionary (as accepted by
`ssl_options_to_context`) or an `ssl.SSLContext` object.
Additional keyword arguments are passed to ``wrap_socket``
(either the `~ssl.SSLContext` method or the `ssl` module function
as appropriate).
"""
context = ssl_options_to_context(ssl_options)
if hasattr(ssl, 'SSLContext') and isinstance(context, ssl.SSLContext):
if server_hostname is not None and getattr(ssl, 'HAS_SNI'):
# Python doesn't have server-side SNI support so we can't
# really unittest this, but it can be manually tested with
# python3.2 -m tornado.httpclient https://sni.velox.ch
return context.wrap_socket(socket, server_hostname=server_hostname,
**kwargs)
else:
return context.wrap_socket(socket, **kwargs)
else:
return ssl.wrap_socket(socket, **dict(context, **kwargs))
if hasattr(ssl, 'match_hostname') and hasattr(ssl, 'CertificateError'): # python 3.2+
ssl_match_hostname = ssl.match_hostname
SSLCertificateError = ssl.CertificateError
else:
# match_hostname was added to the standard library ssl module in python 3.2.
# The following code was backported for older releases and copied from
# https://bitbucket.org/brandon/backports.ssl_match_hostname
class SSLCertificateError(ValueError):
pass
def _dnsname_to_pat(dn, max_wildcards=1):
pats = []
for frag in dn.split(r'.'):
if frag.count('*') > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survery of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise SSLCertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
if frag == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
else:
# Otherwise, '*' matches any dotless fragment.
frag = re.escape(frag)
pats.append(frag.replace(r'\*', '[^.]*'))
return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
def ssl_match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules
are mostly followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise SSLCertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise SSLCertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise SSLCertificateError("no appropriate commonName or "
"subjectAltName fields were found")
|
apache-2.0
|
sunqm/pyscf
|
examples/scf/41-hf_with_given_densityfit_ints.py
|
2
|
1697
|
#!/usr/bin/env python
#
# Author: Qiming Sun <[email protected]>
#
'''
Input Cholesky decomposed integrals for SCF module by overwriting the _cderi attribute.
See also:
examples/df/40-precompute_df_ints.py
'''
import tempfile
import h5py
from pyscf import gto, df, scf
mol = gto.M(atom='H 0 0 0; F 0 0 1', basis='ccpvdz')
# Integrals in memory
int3c = df.incore.cholesky_eri(mol, auxbasis='ccpvdz-fit')
# Integrals on disk
ftmp = tempfile.NamedTemporaryFile()
df.outcore.cholesky_eri(mol, ftmp.name, auxbasis='ccpvdz-fit')
fake_mol = gto.M()
fake_mol.nelectron = 10 # Note: you need define the problem size
#
# Redefine the 2-electron integrals by overwriting mf._cderi with the given
# 3-center intagrals. You need create density-fitting SCF object. The
# regulare SCF object cannot hold the 3-center integrals.
#
mf = scf.density_fit(scf.RHF(fake_mol))
mf.get_hcore = lambda *args: (mol.intor('cint1e_kin_sph') +
mol.intor('cint1e_nuc_sph'))
mf.get_ovlp = lambda *args: mol.intor('cint1e_ovlp_sph')
mf._cderi = int3c
mf.init_guess = '1e' # Initial guess from Hcore
mf.kernel()
#
# Assuming the 3-center integrals happens too huge to be held in memory, there
# is a hacky way to input the integrals. The h5py dataset can be accessed in
# the same way as the numpy ndarray.
#
with h5py.File(ftmp.name, 'r') as file1:
mf = scf.density_fit(scf.RHF(fake_mol))
mf._cderi = file1['j3c']
mf.get_hcore = lambda *args: (mol.intor('cint1e_kin_sph') +
mol.intor('cint1e_nuc_sph'))
mf.get_ovlp = lambda *args: mol.intor('cint1e_ovlp_sph')
mf.init_guess = '1e' # Initial guess from Hcore
mf.kernel()
|
apache-2.0
|
rperier/linux-rockchip
|
scripts/gdb/linux/tasks.py
|
630
|
2892
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# task & thread tools
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import utils
task_type = utils.CachedType("struct task_struct")
def task_lists():
task_ptr_type = task_type.get_type().pointer()
init_task = gdb.parse_and_eval("init_task").address
t = g = init_task
while True:
while True:
yield t
t = utils.container_of(t['thread_group']['next'],
task_ptr_type, "thread_group")
if t == g:
break
t = g = utils.container_of(g['tasks']['next'],
task_ptr_type, "tasks")
if t == init_task:
return
def get_task_by_pid(pid):
for task in task_lists():
if int(task['pid']) == pid:
return task
return None
class LxTaskByPidFunc(gdb.Function):
"""Find Linux task by PID and return the task_struct variable.
$lx_task_by_pid(PID): Given PID, iterate over all tasks of the target and
return that task_struct variable which PID matches."""
def __init__(self):
super(LxTaskByPidFunc, self).__init__("lx_task_by_pid")
def invoke(self, pid):
task = get_task_by_pid(pid)
if task:
return task.dereference()
else:
raise gdb.GdbError("No task of PID " + str(pid))
LxTaskByPidFunc()
class LxPs(gdb.Command):
"""Dump Linux tasks."""
def __init__(self):
super(LxPs, self).__init__("lx-ps", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
for task in task_lists():
gdb.write("{address} {pid} {comm}\n".format(
address=task,
pid=task["pid"],
comm=task["comm"].string()))
LxPs()
thread_info_type = utils.CachedType("struct thread_info")
ia64_task_size = None
def get_thread_info(task):
thread_info_ptr_type = thread_info_type.get_type().pointer()
if utils.is_target_arch("ia64"):
global ia64_task_size
if ia64_task_size is None:
ia64_task_size = gdb.parse_and_eval("sizeof(struct task_struct)")
thread_info_addr = task.address + ia64_task_size
thread_info = thread_info_addr.cast(thread_info_ptr_type)
else:
thread_info = task['stack'].cast(thread_info_ptr_type)
return thread_info.dereference()
class LxThreadInfoFunc (gdb.Function):
"""Calculate Linux thread_info from task variable.
$lx_thread_info(TASK): Given TASK, return the corresponding thread_info
variable."""
def __init__(self):
super(LxThreadInfoFunc, self).__init__("lx_thread_info")
def invoke(self, task):
return get_thread_info(task)
LxThreadInfoFunc()
|
gpl-2.0
|
archen/django
|
tests/custom_pk/fields.py
|
33
|
1629
|
import random
import string
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class MyWrapper(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.value)
def __str__(self):
return self.value
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.value == other.value
return self.value == other
class MyAutoField(six.with_metaclass(models.SubfieldBase, models.CharField)):
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 10
super(MyAutoField, self).__init__(*args, **kwargs)
def pre_save(self, instance, add):
value = getattr(instance, self.attname, None)
if not value:
value = MyWrapper(''.join(random.sample(string.ascii_lowercase, 10)))
setattr(instance, self.attname, value)
return value
def to_python(self, value):
if not value:
return
if not isinstance(value, MyWrapper):
value = MyWrapper(value)
return value
def get_db_prep_save(self, value, connection):
if not value:
return
if isinstance(value, MyWrapper):
return six.text_type(value)
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not value:
return
if isinstance(value, MyWrapper):
return six.text_type(value)
return value
|
bsd-3-clause
|
Johnetordoff/osf.io
|
osf/migrations/0126_update_social_data_format.py
|
11
|
2570
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-25 20:39
from __future__ import unicode_literals
import logging
from tqdm import tqdm
from django.db import migrations, connection
logger = logging.getLogger(__name__)
FIELDS_TO_MIGRATE = [
'github',
'linkedIn',
'twitter'
]
class Migration(migrations.Migration):
def update_social_fields(state, schema):
for field in FIELDS_TO_MIGRATE:
sql = """
UPDATE osf_osfuser
SET social = social || json_build_object(
'{0}', CASE WHEN (osf_osfuser.social ->> '{0}') = '' THEN '[]'
WHEN (osf_osfuser.social ->> '{0}') IS NOT NULL
AND json_typeof(osf_osfuser.social::json -> '{0}') != 'array'
THEN json_build_array(osf_osfuser.social ->> '{0}')
ELSE (osf_osfuser.social -> '{0}')::json
END
)::jsonb
WHERE osf_osfuser.social ? '{0}';
""".format(field)
with connection.cursor() as cursor:
logger.info('Setting social fields for {}...'.format(field))
cursor.execute(sql)
def reset_social_fields(state, schema):
OSFUser = state.get_model('osf', 'osfuser')
users_with_social = OSFUser.objects.filter(social__has_any_keys=FIELDS_TO_MIGRATE)
users_to_update = users_with_social.count()
logger.info('Updating social fields for {} users'.format(users_to_update))
progress_bar = tqdm(total=users_to_update or 100)
users_updated = 0
for user in users_with_social:
old_social = {}
for key, value in user.social.items():
if key in FIELDS_TO_MIGRATE:
if len(value) > 1:
raise ValueError('Current social list field has more than one value, cannot reset to just one value.')
old_social[key] = value[0]
else:
old_social[key] = value
user.social = old_social
user.save()
users_updated += 1
progress_bar.update(users_updated)
progress_bar.close()
logger.info('Updated social field for {} users'.format(users_updated))
dependencies = [
('osf', '0125_merge_20180824_1856'),
]
operations = [
migrations.RunPython(update_social_fields, reset_social_fields)
]
|
apache-2.0
|
lepistone/odoo
|
addons/hr_holidays/report/holidays_summary_report.py
|
42
|
10487
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
import openerp
from openerp.osv import fields, osv
from openerp.report.interface import report_rml
from openerp.report.interface import toxml
from openerp.report import report_sxw
from openerp.tools import ustr
from openerp.tools.translate import _
from openerp.tools import to_xml
def lengthmonth(year, month):
if month == 2 and ((year % 4 == 0) and ((year % 100 != 0) or (year % 400 == 0))):
return 29
return [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][month]
def strToDate(dt):
if dt:
dt_date=datetime.date(int(dt[0:4]),int(dt[5:7]),int(dt[8:10]))
return dt_date
else:
return
def emp_create_xml(self, cr, uid, dept, holiday_type, row_id, empid, name, som, eom):
display={}
if dept==0:
count=0
registry = openerp.registry(cr.dbname)
p_id = registry['hr.holidays'].search(cr, uid, [('employee_id','in',[empid,False]), ('type', '=', 'remove')])
ids_date = registry['hr.holidays'].read(cr, uid, p_id, ['date_from','date_to','holiday_status_id','state'])
for index in range(1,61):
diff=index-1
current=som+datetime.timedelta(diff)
for item in ids_date:
if current >= strToDate(item['date_from']) and current <= strToDate(item['date_to']):
if item['state'] in holiday_type:
display[index]=item['holiday_status_id'][0]
count=count +1
else:
display[index]=' '
break
else:
display[index]=' '
else:
for index in range(1,61):
display[index]=' '
count=''
data_xml=['<info id="%d" number="%d" val="%s" />' % (row_id,x,display[x]) for x in range(1,len(display)+1) ]
# Computing the xml
xml = '''
%s
<employee row="%d" id="%d" name="%s" sum="%s">
</employee>
''' % (data_xml,row_id,dept, ustr(toxml(name)),count)
return xml
class report_custom(report_rml):
def create_xml(self, cr, uid, ids, data, context):
registry = openerp.registry(cr.dbname)
obj_dept = registry['hr.department']
obj_emp = registry['hr.employee']
depts=[]
emp_id={}
rpt_obj = registry['hr.holidays']
rml_obj=report_sxw.rml_parse(cr, uid, rpt_obj._name,context)
cr.execute("SELECT name FROM res_company")
res=cr.fetchone()[0]
date_xml=[]
date_today=time.strftime('%Y-%m-%d %H:%M:%S')
date_xml +=['<res name="%s" today="%s" />' % (to_xml(res),date_today)]
cr.execute("SELECT id, name, color_name FROM hr_holidays_status ORDER BY id")
legend=cr.fetchall()
today=datetime.datetime.today()
first_date=data['form']['date_from']
som = strToDate(first_date)
eom = som+datetime.timedelta(59)
day_diff=eom-som
name = ''
if len(data['form'].get('emp', ())) == 1:
name = obj_emp.read(cr, uid, data['form']['emp'][0], ['name'])['name']
if data['form']['holiday_type']!='both':
type=data['form']['holiday_type']
if data['form']['holiday_type']=='Confirmed':
holiday_type=('confirm')
else:
holiday_type=('validate')
else:
type="Confirmed and Approved"
holiday_type=('confirm','validate')
date_xml.append('<from>%s</from>\n'% (str(rml_obj.formatLang(som.strftime("%Y-%m-%d"),date=True))))
date_xml.append('<to>%s</to>\n' %(str(rml_obj.formatLang(eom.strftime("%Y-%m-%d"),date=True))))
date_xml.append('<type>%s</type>'%(type))
date_xml.append('<name>%s</name>'%(name))
# date_xml=[]
for l in range(0,len(legend)):
date_xml += ['<legend row="%d" id="%d" name="%s" color="%s" />' % (l+1,legend[l][0],_(legend[l][1]),legend[l][2])]
date_xml += ['<date month="%s" year="%d" />' % (ustr(som.strftime('%B')), som.year),'<days>']
cell=1
if day_diff.days>=30:
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som.replace(day=x).strftime('%a')),x-som.day+1) for x in range(som.day, lengthmonth(som.year, som.month)+1)]
else:
if day_diff.days>=(lengthmonth(som.year, som.month)-som.day):
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som.replace(day=x).strftime('%a')),x-som.day+1) for x in range(som.day, lengthmonth(som.year, som.month)+1)]
else:
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som.replace(day=x).strftime('%a')),x-som.day+1) for x in range(som.day, eom.day+1)]
cell=x-som.day+1
day_diff1=day_diff.days-cell+1
width_dict={}
month_dict={}
i=1
j=1
year=som.year
month=som.month
month_dict[j]=som.strftime('%B')
width_dict[j]=cell
while day_diff1>0:
if month+i<=12:
if day_diff1 > lengthmonth(year,i+month): # Not on 30 else you have problems when entering 01-01-2009 for example
som1=datetime.date(year,month+i,1)
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, lengthmonth(year,i+month)+1)]
i=i+1
j=j+1
month_dict[j]=som1.strftime('%B')
cell=cell+x
width_dict[j]=x
else:
som1=datetime.date(year,month+i,1)
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, eom.day+1)]
i=i+1
j=j+1
month_dict[j]=som1.strftime('%B')
cell=cell+x
width_dict[j]=x
day_diff1=day_diff1-x
else:
years=year+1
year=years
month=0
i=1
if day_diff1>=30:
som1=datetime.date(years,i,1)
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, lengthmonth(years,i)+1)]
i=i+1
j=j+1
month_dict[j]=som1.strftime('%B')
cell=cell+x
width_dict[j]=x
else:
som1=datetime.date(years,i,1)
i=i+1
j=j+1
month_dict[j]=som1.strftime('%B')
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, eom.day+1)]
cell=cell+x
width_dict[j]=x
day_diff1=day_diff1-x
date_xml.append('</days>')
date_xml.append('<cols>3.5cm%s,0.4cm</cols>\n' % (',0.4cm' * (60)))
date_xml = ''.join(date_xml)
st='<cols_months>3.5cm'
for m in range(1,len(width_dict)+1):
st+=',' + str(0.4 *width_dict[m])+'cm'
st+=',0.4cm</cols_months>\n'
months_xml =['<months number="%d" name="%s"/>' % (x, _(month_dict[x])) for x in range(1,len(month_dict)+1) ]
months_xml.append(st)
emp_xml=''
row_id=1
if data['model']=='hr.employee':
for id in data['form']['emp']:
items = obj_emp.read(cr, uid, id, ['id','name'])
emp_xml += emp_create_xml(self, cr, uid, 0, holiday_type, row_id, items['id'], items['name'], som, eom)
row_id = row_id +1
elif data['model']=='ir.ui.menu':
for id in data['form']['depts']:
dept = obj_dept.browse(cr, uid, id, context=context)
cr.execute("""SELECT id FROM hr_employee \
WHERE department_id = %s""", (id,))
emp_ids = [x[0] for x in cr.fetchall()]
if emp_ids==[]:
continue
dept_done=0
for item in obj_emp.read(cr, uid, emp_ids, ['id', 'name']):
if dept_done==0:
emp_xml += emp_create_xml(self, cr, uid, 1, holiday_type, row_id, dept.id, dept.name, som, eom)
row_id = row_id +1
dept_done=1
emp_xml += emp_create_xml(self, cr, uid, 0, holiday_type, row_id, item['id'], item['name'], som, eom)
row_id = row_id +1
header_xml = '''
<header>
<date>%s</date>
<company>%s</company>
</header>
''' % (str(rml_obj.formatLang(time.strftime("%Y-%m-%d"),date=True))+' ' + str(time.strftime("%H:%M")),to_xml(registry['res.users'].browse(cr,uid,uid).company_id.name))
# Computing the xml
xml='''<?xml version="1.0" encoding="UTF-8" ?>
<report>
%s
%s
%s
%s
</report>
''' % (header_xml,months_xml,date_xml, ustr(emp_xml))
return xml
report_custom('report.holidays.summary', 'hr.holidays', '', 'addons/hr_holidays/report/holidays_summary.xsl')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
ncos/lisa
|
src/lisa_drive/scripts/venv/lib/python3.5/site-packages/pip-10.0.1-py3.5.egg/pip/_vendor/__init__.py
|
5
|
4841
|
"""
pip._vendor is for vendoring dependencies of pip to prevent needing pip to
depend on something external.
Files inside of pip._vendor should be considered immutable and should only be
updated to versions from upstream.
"""
from __future__ import absolute_import
import glob
import os.path
import sys
# Downstream redistributors which have debundled our dependencies should also
# patch this value to be true. This will trigger the additional patching
# to cause things like "six" to be available as pip.
DEBUNDLED = False
# By default, look in this directory for a bunch of .whl files which we will
# add to the beginning of sys.path before attempting to import anything. This
# is done to support downstream re-distributors like Debian and Fedora who
# wish to create their own Wheels for our dependencies to aid in debundling.
WHEEL_DIR = os.path.abspath(os.path.dirname(__file__))
# Define a small helper function to alias our vendored modules to the real ones
# if the vendored ones do not exist. This idea of this was taken from
# https://github.com/kennethreitz/requests/pull/2567.
def vendored(modulename):
vendored_name = "{0}.{1}".format(__name__, modulename)
try:
__import__(vendored_name, globals(), locals(), level=0)
except ImportError:
try:
__import__(modulename, globals(), locals(), level=0)
except ImportError:
# We can just silently allow import failures to pass here. If we
# got to this point it means that ``import pip._vendor.whatever``
# failed and so did ``import whatever``. Since we're importing this
# upfront in an attempt to alias imports, not erroring here will
# just mean we get a regular import error whenever pip *actually*
# tries to import one of these modules to use it, which actually
# gives us a better error message than we would have otherwise
# gotten.
pass
else:
sys.modules[vendored_name] = sys.modules[modulename]
base, head = vendored_name.rsplit(".", 1)
setattr(sys.modules[base], head, sys.modules[modulename])
# If we're operating in a debundled setup, then we want to go ahead and trigger
# the aliasing of our vendored libraries as well as looking for wheels to add
# to our sys.path. This will cause all of this code to be a no-op typically
# however downstream redistributors can enable it in a consistent way across
# all platforms.
if DEBUNDLED:
# Actually look inside of WHEEL_DIR to find .whl files and add them to the
# front of our sys.path.
sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path
# Actually alias all of our vendored dependencies.
vendored("cachecontrol")
vendored("colorama")
vendored("distlib")
vendored("distro")
vendored("html5lib")
vendored("lockfile")
vendored("six")
vendored("six.moves")
vendored("six.moves.urllib")
vendored("six.moves.urllib.parse")
vendored("packaging")
vendored("packaging.version")
vendored("packaging.specifiers")
vendored("pkg_resources")
vendored("progress")
vendored("pytoml")
vendored("retrying")
vendored("requests")
vendored("requests.packages")
vendored("requests.packages.urllib3")
vendored("requests.packages.urllib3._collections")
vendored("requests.packages.urllib3.connection")
vendored("requests.packages.urllib3.connectionpool")
vendored("requests.packages.urllib3.contrib")
vendored("requests.packages.urllib3.contrib.ntlmpool")
vendored("requests.packages.urllib3.contrib.pyopenssl")
vendored("requests.packages.urllib3.exceptions")
vendored("requests.packages.urllib3.fields")
vendored("requests.packages.urllib3.filepost")
vendored("requests.packages.urllib3.packages")
vendored("requests.packages.urllib3.packages.ordered_dict")
vendored("requests.packages.urllib3.packages.six")
vendored("requests.packages.urllib3.packages.ssl_match_hostname")
vendored("requests.packages.urllib3.packages.ssl_match_hostname."
"_implementation")
vendored("requests.packages.urllib3.poolmanager")
vendored("requests.packages.urllib3.request")
vendored("requests.packages.urllib3.response")
vendored("requests.packages.urllib3.util")
vendored("requests.packages.urllib3.util.connection")
vendored("requests.packages.urllib3.util.request")
vendored("requests.packages.urllib3.util.response")
vendored("requests.packages.urllib3.util.retry")
vendored("requests.packages.urllib3.util.ssl_")
vendored("requests.packages.urllib3.util.timeout")
vendored("requests.packages.urllib3.util.url")
|
mit
|
santisiri/popego
|
envs/ALPHA-POPEGO/lib/python2.5/site-packages/utils-0.2-py2.5.egg/utils/reflection.py
|
4
|
2749
|
# -*- coding: utf-8 -*-
__docformat__='restructuredtext'
import types, os
def pathToObject(path):
if ':' in path:
modulePath,attrs = path.split(':',1)
needsCall = False
if attrs.endswith('()'):
attrs = attrs[:-2]
needsCall = True
attrs = attrs.split(".")
obj = __import__(modulePath, fromlist=['__name__'])
for attr in attrs:
obj = getattr(obj, attr)
if needsCall:
obj = obj()
else:
obj = __import__(path, fromlist=['__name__'])
return obj
def importModule(modulepath):
""" Imports a module given a modulepath """
return __import__(modulepath, fromlist=['__name__'])
def findClasses(module, predicate=lambda name, klass: True):
"""
Finds classes in a module. Returns [(ClassName, Class)]
If ``predicate`` is given, returns classes matching
that predicate. A predicate recieves an module attr name and a class
The ``module`` parameter can be a module or a modulepath
"""
if isinstance(module, str):
module = importModule(module)
return [(name, obj) for name, obj in vars(module).items()
if isClass(obj) and predicate(name, obj)]
def isValidClass(classpath):
"""
Given a ``classpath`` (format: my.module.path:MyClassName), it indicates
if the class exists (and it's a class)
"""
modulepath, classname = classpath.split(':',1)
try:
m = importModule(modulepath)
print "this is %s" % getattr(m, classname)
return isClass(getattr(m, classname))
except ImportError:
return False
def getObject(objpath):
"""
Given an ``objpath`` (format: my.module.path:moduleattr), it returns
the obj corresponding to that path
"""
modulepath, attrname = objpath.split(':',1)
m = importModule(modulepath)
return getattr(m, attrname)
def isClass(obj):
""" Indicates if ``obj`` is a class """
objtype = type(obj)
return objtype is types.ClassType or issubclass(objtype, type)
def findSubModules(module):
"""
Recieves a ``module``, and return any direct submodules relative names if they exists.
``module`` can be a module path
"""
if isinstance(module, str):
module = importModule(module)
if not module.__file__.endswith('__init__.pyc'):
return []
basepath = os.path.abspath(os.path.dirname(module.__file__))
modules = []
for filename in os.listdir(basepath):
if filename != "__init__.py" and filename.endswith('.py'):
modules.append(filename[:-3])
elif os.path.isfile(os.path.join(basepath, filename, '__init__.py')):
modules.append(filename)
return modules
|
bsd-3-clause
|
CopeX/odoo
|
addons/account/tests/test_reconciliation.py
|
179
|
14166
|
from openerp.tests.common import TransactionCase
import time
class TestReconciliation(TransactionCase):
"""Tests for reconciliation (account.tax)
Test used to check that when doing a sale or purchase invoice in a different currency,
the result will be balanced.
"""
def setUp(self):
super(TestReconciliation, self).setUp()
self.account_invoice_model = self.registry('account.invoice')
self.account_invoice_line_model = self.registry('account.invoice.line')
self.acc_bank_stmt_model = self.registry('account.bank.statement')
self.acc_bank_stmt_line_model = self.registry('account.bank.statement.line')
self.res_currency_model = self.registry('res.currency')
self.res_currency_rate_model = self.registry('res.currency.rate')
self.partner_agrolait_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "base", "res_partner_2")[1]
self.currency_swiss_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "base", "CHF")[1]
self.currency_usd_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "base", "USD")[1]
self.account_rcv_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "account", "a_recv")[1]
self.account_fx_income_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "account", "income_fx_income")[1]
self.account_fx_expense_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "account", "income_fx_expense")[1]
self.product_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "product", "product_product_4")[1]
self.bank_journal_usd_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "account", "bank_journal_usd")[1]
self.account_usd_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "account", "usd_bnk")[1]
self.company_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "base", "main_company")[1]
#set expense_currency_exchange_account_id and income_currency_exchange_account_id to the according accounts
self.registry("res.company").write(self.cr, self.uid, [self.company_id], {'expense_currency_exchange_account_id': self.account_fx_expense_id, 'income_currency_exchange_account_id':self.account_fx_income_id})
def test_balanced_customer_invoice(self):
cr, uid = self.cr, self.uid
#we create an invoice in CHF
invoice_id = self.account_invoice_model.create(cr, uid, {'partner_id': self.partner_agrolait_id,
'reference_type': 'none',
'currency_id': self.currency_swiss_id,
'name': 'invoice to client',
'account_id': self.account_rcv_id,
'type': 'out_invoice',
'date_invoice': time.strftime('%Y')+'-07-01', # to use USD rate rateUSDbis
})
self.account_invoice_line_model.create(cr, uid, {'product_id': self.product_id,
'quantity': 1,
'price_unit': 100,
'invoice_id': invoice_id,
'name': 'product that cost 100',})
#validate purchase
self.registry('account.invoice').signal_workflow(cr, uid, [invoice_id], 'invoice_open')
invoice_record = self.account_invoice_model.browse(cr, uid, [invoice_id])
#we pay half of it on a journal with currency in dollar (bank statement)
bank_stmt_id = self.acc_bank_stmt_model.create(cr, uid, {
'journal_id': self.bank_journal_usd_id,
'date': time.strftime('%Y')+'-07-15',
})
bank_stmt_line_id = self.acc_bank_stmt_line_model.create(cr, uid, {'name': 'half payment',
'statement_id': bank_stmt_id,
'partner_id': self.partner_agrolait_id,
'amount': 42,
'amount_currency': 50,
'currency_id': self.currency_swiss_id,
'date': time.strftime('%Y')+'-07-15',})
#reconcile the payment with the invoice
for l in invoice_record.move_id.line_id:
if l.account_id.id == self.account_rcv_id:
line_id = l
break
self.acc_bank_stmt_line_model.process_reconciliation(cr, uid, bank_stmt_line_id, [
{'counterpart_move_line_id': line_id.id, 'credit':50, 'debit':0, 'name': line_id.name,}])
#we check that the line is balanced (bank statement line)
move_line_ids = self.acc_bank_stmt_model.browse(cr,uid,bank_stmt_id).move_line_ids
self.assertEquals(len(move_line_ids), 3)
checked_line = 0
for move_line in move_line_ids:
if move_line.account_id.id == self.account_usd_id:
self.assertEquals(move_line.debit, 27.47)
self.assertEquals(move_line.credit, 0.0)
self.assertEquals(move_line.amount_currency, 42)
self.assertEquals(move_line.currency_id.id, self.currency_usd_id)
checked_line += 1
continue
if move_line.account_id.id == self.account_rcv_id:
self.assertEquals(move_line.debit, 0.0)
self.assertEquals(move_line.credit, 38.21)
self.assertEquals(move_line.amount_currency, -50)
self.assertEquals(move_line.currency_id.id, self.currency_swiss_id)
checked_line += 1
continue
if move_line.account_id.id == self.account_fx_expense_id:
self.assertEquals(move_line.debit, 10.74)
self.assertEquals(move_line.credit, 0.0)
checked_line += 1
continue
self.assertEquals(checked_line, 3)
def test_balanced_supplier_invoice(self):
cr, uid = self.cr, self.uid
#we create a supplier invoice in CHF
invoice_id = self.account_invoice_model.create(cr, uid, {'partner_id': self.partner_agrolait_id,
'reference_type': 'none',
'currency_id': self.currency_swiss_id,
'name': 'invoice to client',
'account_id': self.account_rcv_id,
'type': 'in_invoice',
'date_invoice': time.strftime('%Y')+'-07-01',
})
self.account_invoice_line_model.create(cr, uid, {'product_id': self.product_id,
'quantity': 1,
'price_unit': 100,
'invoice_id': invoice_id,
'name': 'product that cost 100',})
#validate purchase
self.registry('account.invoice').signal_workflow(cr, uid, [invoice_id], 'invoice_open')
invoice_record = self.account_invoice_model.browse(cr, uid, [invoice_id])
#we pay half of it on a journal with currency in dollar (bank statement)
bank_stmt_id = self.acc_bank_stmt_model.create(cr, uid, {
'journal_id': self.bank_journal_usd_id,
'date': time.strftime('%Y')+'-07-15',
})
bank_stmt_line_id = self.acc_bank_stmt_line_model.create(cr, uid, {'name': 'half payment',
'statement_id': bank_stmt_id,
'partner_id': self.partner_agrolait_id,
'amount': -42,
'amount_currency': -50,
'currency_id': self.currency_swiss_id,
'date': time.strftime('%Y')+'-07-15',})
#reconcile the payment with the invoice
for l in invoice_record.move_id.line_id:
if l.account_id.id == self.account_rcv_id:
line_id = l
break
self.acc_bank_stmt_line_model.process_reconciliation(cr, uid, bank_stmt_line_id, [
{'counterpart_move_line_id': line_id.id, 'credit':0, 'debit':50, 'name': line_id.name,}])
#we check that the line is balanced (bank statement line)
move_line_ids = self.acc_bank_stmt_model.browse(cr,uid,bank_stmt_id).move_line_ids
self.assertEquals(len(move_line_ids), 3)
checked_line = 0
for move_line in move_line_ids:
if move_line.account_id.id == self.account_usd_id:
self.assertEquals(move_line.debit, 0.0)
self.assertEquals(move_line.credit, 27.47)
self.assertEquals(move_line.amount_currency, -42)
self.assertEquals(move_line.currency_id.id, self.currency_usd_id)
checked_line += 1
continue
if move_line.account_id.id == self.account_rcv_id:
self.assertEquals(move_line.debit, 38.21)
self.assertEquals(move_line.credit, 0.0)
self.assertEquals(move_line.amount_currency, 50)
self.assertEquals(move_line.currency_id.id, self.currency_swiss_id)
checked_line += 1
continue
if move_line.account_id.id == self.account_fx_income_id:
self.assertEquals(move_line.debit, 0.0)
self.assertEquals(move_line.credit, 10.74)
checked_line += 1
continue
self.assertEquals(checked_line, 3)
def test_balanced_exchanges_gain_loss(self):
# The point of this test is to show that we handle correctly the gain/loss exchanges during reconciliations in foreign currencies.
# For instance, with a company set in EUR, and a USD rate set to 0.033,
# the reconciliation of an invoice of 2.00 USD (60.61 EUR) and a bank statement of two lines of 1.00 USD (30.30 EUR)
# will lead to an exchange loss, that should be handled correctly within the journal items.
cr, uid = self.cr, self.uid
# We update the currency rate of the currency USD in order to force the gain/loss exchanges in next steps
self.res_currency_rate_model.create(cr, uid, {
'name': time.strftime('%Y-%m-%d') + ' 00:00:00',
'currency_id': self.currency_usd_id,
'rate': 0.033,
})
# We create a customer invoice of 2.00 USD
invoice_id = self.account_invoice_model.create(cr, uid, {
'partner_id': self.partner_agrolait_id,
'currency_id': self.currency_usd_id,
'name': 'Foreign invoice with exchange gain',
'account_id': self.account_rcv_id,
'type': 'out_invoice',
'date_invoice': time.strftime('%Y-%m-%d'),
'journal_id': self.bank_journal_usd_id,
'invoice_line': [
(0, 0, {
'name': 'line that will lead to an exchange gain',
'quantity': 1,
'price_unit': 2,
})
]
})
self.registry('account.invoice').signal_workflow(cr, uid, [invoice_id], 'invoice_open')
invoice = self.account_invoice_model.browse(cr, uid, invoice_id)
# We create a bank statement with two lines of 1.00 USD each.
bank_stmt_id = self.acc_bank_stmt_model.create(cr, uid, {
'journal_id': self.bank_journal_usd_id,
'date': time.strftime('%Y-%m-%d'),
'line_ids': [
(0, 0, {
'name': 'half payment',
'partner_id': self.partner_agrolait_id,
'amount': 1.0,
'date': time.strftime('%Y-%m-%d')
}),
(0, 0, {
'name': 'second half payment',
'partner_id': self.partner_agrolait_id,
'amount': 1.0,
'date': time.strftime('%Y-%m-%d')
})
]
})
statement = self.acc_bank_stmt_model.browse(cr, uid, bank_stmt_id)
# We process the reconciliation of the invoice line with the two bank statement lines
line_id = None
for l in invoice.move_id.line_id:
if l.account_id.id == self.account_rcv_id:
line_id = l
break
for statement_line in statement.line_ids:
self.acc_bank_stmt_line_model.process_reconciliation(cr, uid, statement_line.id, [
{'counterpart_move_line_id': line_id.id, 'credit': 1.0, 'debit': 0.0, 'name': line_id.name}
])
# The invoice should be paid, as the payments totally cover its total
self.assertEquals(invoice.state, 'paid', 'The invoice should be paid by now')
reconcile = None
for payment in invoice.payment_ids:
reconcile = payment.reconcile_id
break
# The invoice should be reconciled (entirely, not a partial reconciliation)
self.assertTrue(reconcile, 'The invoice should be totally reconciled')
result = {}
exchange_loss_line = None
for line in reconcile.line_id:
res_account = result.setdefault(line.account_id, {'debit': 0.0, 'credit': 0.0, 'count': 0})
res_account['debit'] = res_account['debit'] + line.debit
res_account['credit'] = res_account['credit'] + line.credit
res_account['count'] += 1
if line.credit == 0.01:
exchange_loss_line = line
# We should be able to find a move line of 0.01 EUR on the Debtors account, being the cent we lost during the currency exchange
self.assertTrue(exchange_loss_line, 'There should be one move line of 0.01 EUR in credit')
# The journal items of the reconciliation should have their debit and credit total equal
# Besides, the total debit and total credit should be 60.61 EUR (2.00 USD)
self.assertEquals(sum([res['debit'] for res in result.values()]), 60.61)
self.assertEquals(sum([res['credit'] for res in result.values()]), 60.61)
counterpart_exchange_loss_line = None
for line in exchange_loss_line.move_id.line_id:
if line.account_id.id == self.account_fx_expense_id:
counterpart_exchange_loss_line = line
# We should be able to find a move line of 0.01 EUR on the Foreign Exchange Loss account
self.assertTrue(counterpart_exchange_loss_line, 'There should be one move line of 0.01 EUR on account "Foreign Exchange Loss"')
|
agpl-3.0
|
kutuhal/oracle-r12-accounting
|
lib/django/contrib/gis/geos/prototypes/coordseq.py
|
103
|
3171
|
from ctypes import POINTER, c_double, c_int, c_uint
from django.contrib.gis.geos.libgeos import CS_PTR, GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import (
GEOSException, last_arg_byref,
)
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
# ## Error-checking routines specific to coordinate sequences. ##
def check_cs_ptr(result, func, cargs):
"Error checking on routines that return Geometries."
if not result:
raise GEOSException(
'Error encountered checking Coordinate Sequence returned from GEOS '
'C function "%s".' % func.__name__
)
return result
def check_cs_op(result, func, cargs):
"Checks the status code of a coordinate sequence operation."
if result == 0:
raise GEOSException('Could not set value on coordinate sequence')
else:
return result
def check_cs_get(result, func, cargs):
"Checking the coordinate sequence retrieval."
check_cs_op(result, func, cargs)
# Object in by reference, return its value.
return last_arg_byref(cargs)
# ## Coordinate sequence prototype generation functions. ##
def cs_int(func):
"For coordinate sequence routines that return an integer."
func.argtypes = [CS_PTR, POINTER(c_uint)]
func.restype = c_int
func.errcheck = check_cs_get
return func
def cs_operation(func, ordinate=False, get=False):
"For coordinate sequence operations."
if get:
# Get routines get double parameter passed-in by reference.
func.errcheck = check_cs_get
dbl_param = POINTER(c_double)
else:
func.errcheck = check_cs_op
dbl_param = c_double
if ordinate:
# Get/Set ordinate routines have an extra uint parameter.
func.argtypes = [CS_PTR, c_uint, c_uint, dbl_param]
else:
func.argtypes = [CS_PTR, c_uint, dbl_param]
func.restype = c_int
return func
def cs_output(func, argtypes):
"For routines that return a coordinate sequence."
func.argtypes = argtypes
func.restype = CS_PTR
func.errcheck = check_cs_ptr
return func
# ## Coordinate Sequence ctypes prototypes ##
# Coordinate Sequence constructors & cloning.
cs_clone = cs_output(GEOSFunc('GEOSCoordSeq_clone'), [CS_PTR])
create_cs = cs_output(GEOSFunc('GEOSCoordSeq_create'), [c_uint, c_uint])
get_cs = cs_output(GEOSFunc('GEOSGeom_getCoordSeq'), [GEOM_PTR])
# Getting, setting ordinate
cs_getordinate = cs_operation(GEOSFunc('GEOSCoordSeq_getOrdinate'), ordinate=True, get=True)
cs_setordinate = cs_operation(GEOSFunc('GEOSCoordSeq_setOrdinate'), ordinate=True)
# For getting, x, y, z
cs_getx = cs_operation(GEOSFunc('GEOSCoordSeq_getX'), get=True)
cs_gety = cs_operation(GEOSFunc('GEOSCoordSeq_getY'), get=True)
cs_getz = cs_operation(GEOSFunc('GEOSCoordSeq_getZ'), get=True)
# For setting, x, y, z
cs_setx = cs_operation(GEOSFunc('GEOSCoordSeq_setX'))
cs_sety = cs_operation(GEOSFunc('GEOSCoordSeq_setY'))
cs_setz = cs_operation(GEOSFunc('GEOSCoordSeq_setZ'))
# These routines return size & dimensions.
cs_getsize = cs_int(GEOSFunc('GEOSCoordSeq_getSize'))
cs_getdims = cs_int(GEOSFunc('GEOSCoordSeq_getDimensions'))
|
bsd-3-clause
|
DataDog/moto
|
tests/test_ec2/test_tags.py
|
2
|
8756
|
from __future__ import unicode_literals
import itertools
import boto
from boto.exception import EC2ResponseError
import sure # noqa
from moto import mock_ec2
from nose.tools import assert_raises
@mock_ec2
def test_add_tag():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
instance.add_tag("a key", "some value")
chain = itertools.chain.from_iterable
existing_instances = list(chain([res.instances for res in conn.get_all_instances()]))
existing_instances.should.have.length_of(1)
existing_instance = existing_instances[0]
existing_instance.tags["a key"].should.equal("some value")
@mock_ec2
def test_remove_tag():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
instance.add_tag("a key", "some value")
tags = conn.get_all_tags()
tag = tags[0]
tag.name.should.equal("a key")
tag.value.should.equal("some value")
instance.remove_tag("a key")
conn.get_all_tags().should.have.length_of(0)
instance.add_tag("a key", "some value")
conn.get_all_tags().should.have.length_of(1)
instance.remove_tag("a key", "some value")
@mock_ec2
def test_get_all_tags():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
instance.add_tag("a key", "some value")
tags = conn.get_all_tags()
tag = tags[0]
tag.name.should.equal("a key")
tag.value.should.equal("some value")
@mock_ec2
def test_create_tags():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
tag_dict = {'a key': 'some value',
'another key': 'some other value',
'blank key': ''}
conn.create_tags(instance.id, tag_dict)
tags = conn.get_all_tags()
set([key for key in tag_dict]).should.equal(set([tag.name for tag in tags]))
set([tag_dict[key] for key in tag_dict]).should.equal(set([tag.value for tag in tags]))
@mock_ec2
def test_tag_limit_exceeded():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
tag_dict = {'01': '',
'02': '',
'03': '',
'04': '',
'05': '',
'06': '',
'07': '',
'08': '',
'09': '',
'10': '',
'11': ''}
with assert_raises(EC2ResponseError) as cm:
conn.create_tags(instance.id, tag_dict)
cm.exception.code.should.equal('TagLimitExceeded')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
instance.add_tag("a key", "a value")
with assert_raises(EC2ResponseError) as cm:
conn.create_tags(instance.id, tag_dict)
cm.exception.code.should.equal('TagLimitExceeded')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
tags = conn.get_all_tags()
tag = tags[0]
tags.should.have.length_of(1)
tag.name.should.equal("a key")
tag.value.should.equal("a value")
@mock_ec2
def test_invalid_parameter_tag_null():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
with assert_raises(EC2ResponseError) as cm:
instance.add_tag("a key", None)
cm.exception.code.should.equal('InvalidParameterValue')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_invalid_id():
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.create_tags('ami-blah', {'key': 'tag'})
cm.exception.code.should.equal('InvalidID')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
with assert_raises(EC2ResponseError) as cm:
conn.create_tags('blah-blah', {'key': 'tag'})
cm.exception.code.should.equal('InvalidID')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_get_all_tags_resource_id_filter():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={'resource-id': instance.id})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(instance.id)
tag.res_type.should.equal('instance')
tag.name.should.equal("an instance key")
tag.value.should.equal("some value")
tags = conn.get_all_tags(filters={'resource-id': image_id})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(image_id)
tag.res_type.should.equal('image')
tag.name.should.equal("an image key")
tag.value.should.equal("some value")
@mock_ec2
def test_get_all_tags_resource_type_filter():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={'resource-type': 'instance'})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(instance.id)
tag.res_type.should.equal('instance')
tag.name.should.equal("an instance key")
tag.value.should.equal("some value")
tags = conn.get_all_tags(filters={'resource-type': 'image'})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(image_id)
tag.res_type.should.equal('image')
tag.name.should.equal("an image key")
tag.value.should.equal("some value")
@mock_ec2
def test_get_all_tags_key_filter():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={'key': 'an instance key'})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(instance.id)
tag.res_type.should.equal('instance')
tag.name.should.equal("an instance key")
tag.value.should.equal("some value")
@mock_ec2
def test_get_all_tags_value_filter():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
reservation_b = conn.run_instances('ami-1234abcd')
instance_b = reservation_b.instances[0]
instance_b.add_tag("an instance key", "some other value")
reservation_c = conn.run_instances('ami-1234abcd')
instance_c = reservation_c.instances[0]
instance_c.add_tag("an instance key", "other value*")
reservation_d = conn.run_instances('ami-1234abcd')
instance_d = reservation_d.instances[0]
instance_d.add_tag("an instance key", "other value**")
reservation_e = conn.run_instances('ami-1234abcd')
instance_e = reservation_e.instances[0]
instance_e.add_tag("an instance key", "other value*?")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={'value': 'some value'})
tags.should.have.length_of(2)
tags = conn.get_all_tags(filters={'value': 'some*value'})
tags.should.have.length_of(3)
tags = conn.get_all_tags(filters={'value': '*some*value'})
tags.should.have.length_of(3)
tags = conn.get_all_tags(filters={'value': '*some*value*'})
tags.should.have.length_of(3)
tags = conn.get_all_tags(filters={'value': '*value\*'})
tags.should.have.length_of(1)
tags = conn.get_all_tags(filters={'value': '*value\*\*'})
tags.should.have.length_of(1)
tags = conn.get_all_tags(filters={'value': '*value\*\?'})
tags.should.have.length_of(1)
|
apache-2.0
|
vecnet/vnetsource
|
simulation_models/model_id.py
|
2
|
1655
|
"""
The ID constants for all known simulation models.
Example::
from simulation_models import model_id
def my_function(sim_model, ...):
if sim_model == model_id.OPEN_MALARIA:
# do something special for OpenMalaria
...
else:
# handle all other simulation models in the same way
...
"""
# This module has no import statements, so it can be safely used in a Django project settings file.
# Short strings are used to facilitate debugging
EMOD = 'EMOD' #: EMOD
OPEN_MALARIA = 'OM' #: OpenMalaria http://code.google.com/p/openmalaria/wiki/Start
ALL_KNOWN = (EMOD, OPEN_MALARIA) #: List of all known IDs.
_set_of_all_known = set(ALL_KNOWN)
MAX_LENGTH = 10 #: For use when storing IDs in a database field, e.g. CharField(max_length=model_id.MAX_LENGTH)
def is_valid(mod_id):
"""
Is an ID a known value?
:param str mod_id: The model ID to check
:returns: True or False
:raises: TypeError if mod_id is not a string.
"""
if not isinstance(mod_id, basestring):
raise TypeError('Expected string or unicode, received %s' % str(type(mod_id)))
return mod_id in _set_of_all_known
def parse(text):
"""
Parse a model ID from a text string.
Handles both unicode or ASCII strings.
Leading and trailing whitespace is ignored.
Letter case is also ignored.
:param str text: The string with a model ID.
:returns: a model ID or None if the string isn't a valid ID.
"""
text_lower = text.strip().lower()
for id_ in ALL_KNOWN:
if text_lower == id_.lower():
return id_
return None
|
mpl-2.0
|
wolverineav/horizon
|
openstack_dashboard/test/integration_tests/regions/baseregion.py
|
13
|
2649
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import basewebobject
class BaseRegion(basewebobject.BaseWebObject):
"""Base class for region module
* there is necessity to override some basic methods for obtaining elements
as in content of regions it is required to do relative searches
* self.driver cannot be easily replaced with self.src_elem because that
would result in functionality loss, self.driver is WebDriver and
src_elem is WebElement its usage is different.
* this does not mean that self.src_elem cannot be self.driver
"""
_default_src_locator = None
# private methods
def __init__(self, driver, conf, src_elem=None):
super(BaseRegion, self).__init__(driver, conf)
if self._default_src_locator:
root = src_elem or driver
src_elem = root.find_element(*self._default_src_locator)
self.src_elem = src_elem or driver
# variable for storing names of dynamic properties and
# associated 'getters' - meaning method that are supplying
# regions or web elements
self._dynamic_properties = {}
def __getattr__(self, name):
"""It is not possible to create property bounded just to object
and not class at runtime, therefore it is necessary to
override __getattr__ and make fake 'properties' by storing them in
the protected attribute _dynamic_attributes and returning result
of the method associated with the specified attribute.
This way the feeling of having regions accessed as 'properties'
is created, which is one of the requirement of page object pattern.
"""
try:
return self._dynamic_properties[name]
except KeyError:
msg = "'{0}' object has no attribute '{1}'"
raise AttributeError(msg.format(type(self).__name__, name))
def _get_element(self, *locator):
return self.src_elem.find_element(*locator)
def _get_elements(self, *locator):
return self.src_elem.find_elements(*locator)
|
apache-2.0
|
EraYaN/CouchPotatoServer
|
libs/rtorrent/file.py
|
174
|
3566
|
# Copyright (c) 2013 Chris Lucas, <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# from rtorrent.rpc import Method
import rtorrent.rpc
from rtorrent.common import safe_repr
Method = rtorrent.rpc.Method
class File:
"""Represents an individual file within a L{Torrent} instance."""
def __init__(self, _rt_obj, info_hash, index, **kwargs):
self._rt_obj = _rt_obj
self.info_hash = info_hash # : info hash for the torrent the file is associated with
self.index = index # : The position of the file within the file list
for k in kwargs.keys():
setattr(self, k, kwargs.get(k, None))
self.rpc_id = "{0}:f{1}".format(
self.info_hash, self.index) # : unique id to pass to rTorrent
def update(self):
"""Refresh file data
@note: All fields are stored as attributes to self.
@return: None
"""
multicall = rtorrent.rpc.Multicall(self)
retriever_methods = [m for m in methods
if m.is_retriever() and m.is_available(self._rt_obj)]
for method in retriever_methods:
multicall.add(method, self.rpc_id)
multicall.call()
def __repr__(self):
return safe_repr("File(index={0} path=\"{1}\")", self.index, self.path)
methods = [
# RETRIEVERS
Method(File, 'get_last_touched', 'f.get_last_touched'),
Method(File, 'get_range_second', 'f.get_range_second'),
Method(File, 'get_size_bytes', 'f.get_size_bytes'),
Method(File, 'get_priority', 'f.get_priority'),
Method(File, 'get_match_depth_next', 'f.get_match_depth_next'),
Method(File, 'is_resize_queued', 'f.is_resize_queued',
boolean=True,
),
Method(File, 'get_range_first', 'f.get_range_first'),
Method(File, 'get_match_depth_prev', 'f.get_match_depth_prev'),
Method(File, 'get_path', 'f.get_path'),
Method(File, 'get_completed_chunks', 'f.get_completed_chunks'),
Method(File, 'get_path_components', 'f.get_path_components'),
Method(File, 'is_created', 'f.is_created',
boolean=True,
),
Method(File, 'is_open', 'f.is_open',
boolean=True,
),
Method(File, 'get_size_chunks', 'f.get_size_chunks'),
Method(File, 'get_offset', 'f.get_offset'),
Method(File, 'get_frozen_path', 'f.get_frozen_path'),
Method(File, 'get_path_depth', 'f.get_path_depth'),
Method(File, 'is_create_queued', 'f.is_create_queued',
boolean=True,
),
# MODIFIERS
]
|
gpl-3.0
|
sanjeevtripurari/hue
|
desktop/core/ext-py/django-openid-auth-0.5/django_openid_auth/urls.py
|
44
|
1696
|
# django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2007 Simon Willison
# Copyright (C) 2008-2013 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from django.conf.urls.defaults import *
urlpatterns = patterns('django_openid_auth.views',
url(r'^login/$', 'login_begin', name='openid-login'),
url(r'^complete/$', 'login_complete', name='openid-complete'),
url(r'^logo.gif$', 'logo', name='openid-logo'),
)
|
apache-2.0
|
andela-ooladayo/django
|
django/contrib/sessions/middleware.py
|
104
|
2642
|
import time
from importlib import import_module
from django.conf import settings
from django.utils.cache import patch_vary_headers
from django.utils.http import cookie_date
class SessionMiddleware(object):
def __init__(self):
engine = import_module(settings.SESSION_ENGINE)
self.SessionStore = engine.SessionStore
def process_request(self, request):
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME)
request.session = self.SessionStore(session_key)
def process_response(self, request, response):
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie or delete
the session cookie if the session has been emptied.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
empty = request.session.is_empty()
except AttributeError:
pass
else:
# First check if we need to delete this cookie.
# The session should be deleted only if the session is entirely empty
if settings.SESSION_COOKIE_NAME in request.COOKIES and empty:
response.delete_cookie(settings.SESSION_COOKIE_NAME,
domain=settings.SESSION_COOKIE_DOMAIN)
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
if modified or settings.SESSION_SAVE_EVERY_REQUEST:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# Save the session data and refresh the client cookie.
# Skip session save for 500 responses, refs #3881.
if response.status_code != 500:
request.session.save()
response.set_cookie(settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
return response
|
bsd-3-clause
|
40223149/2015cd_midterm
|
static/Brython3.1.1-20150328-091302/Lib/xml/etree/ElementPath.py
|
785
|
9477
|
#
# ElementTree
# $Id: ElementPath.py 3375 2008-02-13 08:05:08Z fredrik $
#
# limited xpath support for element trees
#
# history:
# 2003-05-23 fl created
# 2003-05-28 fl added support for // etc
# 2003-08-27 fl fixed parsing of periods in element names
# 2007-09-10 fl new selection engine
# 2007-09-12 fl fixed parent selector
# 2007-09-13 fl added iterfind; changed findall to return a list
# 2007-11-30 fl added namespaces support
# 2009-10-30 fl added child element value filter
#
# Copyright (c) 2003-2009 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2009 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
##
# Implementation module for XPath support. There's usually no reason
# to import this module directly; the <b>ElementTree</b> does this for
# you, if needed.
##
import re
xpath_tokenizer_re = re.compile(
"("
"'[^']*'|\"[^\"]*\"|"
"::|"
"//?|"
"\.\.|"
"\(\)|"
"[/.*:\[\]\(\)@=])|"
"((?:\{[^}]+\})?[^/\[\]\(\)@=\s]+)|"
"\s+"
)
def xpath_tokenizer(pattern, namespaces=None):
for token in xpath_tokenizer_re.findall(pattern):
tag = token[1]
if tag and tag[0] != "{" and ":" in tag:
try:
prefix, uri = tag.split(":", 1)
if not namespaces:
raise KeyError
yield token[0], "{%s}%s" % (namespaces[prefix], uri)
except KeyError:
raise SyntaxError("prefix %r not found in prefix map" % prefix)
else:
yield token
def get_parent_map(context):
parent_map = context.parent_map
if parent_map is None:
context.parent_map = parent_map = {}
for p in context.root.iter():
for e in p:
parent_map[e] = p
return parent_map
def prepare_child(next, token):
tag = token[1]
def select(context, result):
for elem in result:
for e in elem:
if e.tag == tag:
yield e
return select
def prepare_star(next, token):
def select(context, result):
for elem in result:
for e in elem:
yield e
return select
def prepare_self(next, token):
def select(context, result):
for elem in result:
yield elem
return select
def prepare_descendant(next, token):
token = next()
if token[0] == "*":
tag = "*"
elif not token[0]:
tag = token[1]
else:
raise SyntaxError("invalid descendant")
def select(context, result):
for elem in result:
for e in elem.iter(tag):
if e is not elem:
yield e
return select
def prepare_parent(next, token):
def select(context, result):
# FIXME: raise error if .. is applied at toplevel?
parent_map = get_parent_map(context)
result_map = {}
for elem in result:
if elem in parent_map:
parent = parent_map[elem]
if parent not in result_map:
result_map[parent] = None
yield parent
return select
def prepare_predicate(next, token):
# FIXME: replace with real parser!!! refs:
# http://effbot.org/zone/simple-iterator-parser.htm
# http://javascript.crockford.com/tdop/tdop.html
signature = []
predicate = []
while 1:
token = next()
if token[0] == "]":
break
if token[0] and token[0][:1] in "'\"":
token = "'", token[0][1:-1]
signature.append(token[0] or "-")
predicate.append(token[1])
signature = "".join(signature)
# use signature to determine predicate type
if signature == "@-":
# [@attribute] predicate
key = predicate[1]
def select(context, result):
for elem in result:
if elem.get(key) is not None:
yield elem
return select
if signature == "@-='":
# [@attribute='value']
key = predicate[1]
value = predicate[-1]
def select(context, result):
for elem in result:
if elem.get(key) == value:
yield elem
return select
if signature == "-" and not re.match("\d+$", predicate[0]):
# [tag]
tag = predicate[0]
def select(context, result):
for elem in result:
if elem.find(tag) is not None:
yield elem
return select
if signature == "-='" and not re.match("\d+$", predicate[0]):
# [tag='value']
tag = predicate[0]
value = predicate[-1]
def select(context, result):
for elem in result:
for e in elem.findall(tag):
if "".join(e.itertext()) == value:
yield elem
break
return select
if signature == "-" or signature == "-()" or signature == "-()-":
# [index] or [last()] or [last()-index]
if signature == "-":
index = int(predicate[0]) - 1
else:
if predicate[0] != "last":
raise SyntaxError("unsupported function")
if signature == "-()-":
try:
index = int(predicate[2]) - 1
except ValueError:
raise SyntaxError("unsupported expression")
else:
index = -1
def select(context, result):
parent_map = get_parent_map(context)
for elem in result:
try:
parent = parent_map[elem]
# FIXME: what if the selector is "*" ?
elems = list(parent.findall(elem.tag))
if elems[index] is elem:
yield elem
except (IndexError, KeyError):
pass
return select
raise SyntaxError("invalid predicate")
ops = {
"": prepare_child,
"*": prepare_star,
".": prepare_self,
"..": prepare_parent,
"//": prepare_descendant,
"[": prepare_predicate,
}
_cache = {}
class _SelectorContext:
parent_map = None
def __init__(self, root):
self.root = root
# --------------------------------------------------------------------
##
# Generate all matching objects.
def iterfind(elem, path, namespaces=None):
# compile selector pattern
if path[-1:] == "/":
path = path + "*" # implicit all (FIXME: keep this?)
try:
selector = _cache[path]
except KeyError:
if len(_cache) > 100:
_cache.clear()
if path[:1] == "/":
raise SyntaxError("cannot use absolute path on element")
next = iter(xpath_tokenizer(path, namespaces)).__next__
token = next()
selector = []
while 1:
try:
selector.append(ops[token[0]](next, token))
except StopIteration:
raise SyntaxError("invalid path")
try:
token = next()
if token[0] == "/":
token = next()
except StopIteration:
break
_cache[path] = selector
# execute selector pattern
result = [elem]
context = _SelectorContext(elem)
for select in selector:
result = select(context, result)
return result
##
# Find first matching object.
def find(elem, path, namespaces=None):
try:
return next(iterfind(elem, path, namespaces))
except StopIteration:
return None
##
# Find all matching objects.
def findall(elem, path, namespaces=None):
return list(iterfind(elem, path, namespaces))
##
# Find text for first matching object.
def findtext(elem, path, default=None, namespaces=None):
try:
elem = next(iterfind(elem, path, namespaces))
return elem.text or ""
except StopIteration:
return default
|
gpl-3.0
|
polyaxon/polyaxon
|
sdks/python/http_client/v1/polyaxon_sdk/models/v1_preset.py
|
1
|
8969
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.10.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1Preset(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'uuid': 'str',
'name': 'str',
'description': 'str',
'tags': 'list[str]',
'created_at': 'datetime',
'updated_at': 'datetime',
'frozen': 'bool',
'live_state': 'int',
'content': 'str'
}
attribute_map = {
'uuid': 'uuid',
'name': 'name',
'description': 'description',
'tags': 'tags',
'created_at': 'created_at',
'updated_at': 'updated_at',
'frozen': 'frozen',
'live_state': 'live_state',
'content': 'content'
}
def __init__(self, uuid=None, name=None, description=None, tags=None, created_at=None, updated_at=None, frozen=None, live_state=None, content=None, local_vars_configuration=None): # noqa: E501
"""V1Preset - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._uuid = None
self._name = None
self._description = None
self._tags = None
self._created_at = None
self._updated_at = None
self._frozen = None
self._live_state = None
self._content = None
self.discriminator = None
if uuid is not None:
self.uuid = uuid
if name is not None:
self.name = name
if description is not None:
self.description = description
if tags is not None:
self.tags = tags
if created_at is not None:
self.created_at = created_at
if updated_at is not None:
self.updated_at = updated_at
if frozen is not None:
self.frozen = frozen
if live_state is not None:
self.live_state = live_state
if content is not None:
self.content = content
@property
def uuid(self):
"""Gets the uuid of this V1Preset. # noqa: E501
:return: The uuid of this V1Preset. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this V1Preset.
:param uuid: The uuid of this V1Preset. # noqa: E501
:type: str
"""
self._uuid = uuid
@property
def name(self):
"""Gets the name of this V1Preset. # noqa: E501
:return: The name of this V1Preset. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1Preset.
:param name: The name of this V1Preset. # noqa: E501
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this V1Preset. # noqa: E501
:return: The description of this V1Preset. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this V1Preset.
:param description: The description of this V1Preset. # noqa: E501
:type: str
"""
self._description = description
@property
def tags(self):
"""Gets the tags of this V1Preset. # noqa: E501
:return: The tags of this V1Preset. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this V1Preset.
:param tags: The tags of this V1Preset. # noqa: E501
:type: list[str]
"""
self._tags = tags
@property
def created_at(self):
"""Gets the created_at of this V1Preset. # noqa: E501
:return: The created_at of this V1Preset. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this V1Preset.
:param created_at: The created_at of this V1Preset. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def updated_at(self):
"""Gets the updated_at of this V1Preset. # noqa: E501
:return: The updated_at of this V1Preset. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this V1Preset.
:param updated_at: The updated_at of this V1Preset. # noqa: E501
:type: datetime
"""
self._updated_at = updated_at
@property
def frozen(self):
"""Gets the frozen of this V1Preset. # noqa: E501
:return: The frozen of this V1Preset. # noqa: E501
:rtype: bool
"""
return self._frozen
@frozen.setter
def frozen(self, frozen):
"""Sets the frozen of this V1Preset.
:param frozen: The frozen of this V1Preset. # noqa: E501
:type: bool
"""
self._frozen = frozen
@property
def live_state(self):
"""Gets the live_state of this V1Preset. # noqa: E501
:return: The live_state of this V1Preset. # noqa: E501
:rtype: int
"""
return self._live_state
@live_state.setter
def live_state(self, live_state):
"""Sets the live_state of this V1Preset.
:param live_state: The live_state of this V1Preset. # noqa: E501
:type: int
"""
self._live_state = live_state
@property
def content(self):
"""Gets the content of this V1Preset. # noqa: E501
:return: The content of this V1Preset. # noqa: E501
:rtype: str
"""
return self._content
@content.setter
def content(self, content):
"""Sets the content of this V1Preset.
:param content: The content of this V1Preset. # noqa: E501
:type: str
"""
self._content = content
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Preset):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Preset):
return True
return self.to_dict() != other.to_dict()
|
apache-2.0
|
ppmt/Crust
|
flask/lib/python2.7/site-packages/setuptools/command/easy_install.py
|
18
|
85008
|
#!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://pythonhosted.org/setuptools/easy_install.html
"""
from glob import glob
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import DistutilsArgError, DistutilsOptionError, \
DistutilsError, DistutilsPlatformError
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import platform
import textwrap
import warnings
import site
import struct
from setuptools import Command, _dont_write_bytecode
from setuptools.sandbox import run_setup
from setuptools.py31compat import get_path, get_config_vars
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import PackageIndex
from setuptools.package_index import URL_SCHEME
from setuptools.command import bdist_egg, egg_info
from setuptools.compat import (iteritems, maxsize, basestring, unicode,
reraise, PY2, PY3)
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
import pkg_resources
sys_executable = os.environ.get('__PYVENV_LAUNCHER__',
os.path.normpath(sys.executable))
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if PY2:
def _to_ascii(s):
return s
def isascii(s):
try:
unicode(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=', 'S', "list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('install-layout=', None, "installation layout to choose (known values: deb)"),
('force-installation-into-system-dir', '0', "force installation into /usr"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version', 'force-installation-into-system-dir'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
if site.ENABLE_USER_SITE:
whereami = os.path.abspath(__file__)
self.user = whereami.startswith(site.USER_SITE)
else:
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# enable custom installation, known values: deb
self.install_layout = None
self.force_installation_into_system_dir = None
self.multiarch = None
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
for filename in blockers:
if os.path.exists(filename) or os.path.islink(filename):
log.info("Deleting %s", filename)
if not self.dry_run:
if (os.path.isdir(filename) and
not os.path.islink(filename)):
rmtree(filename)
else:
os.unlink(filename)
def finalize_options(self):
if self.version:
print('setuptools %s' % get_distribution('setuptools').version)
sys.exit()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
# fix the install_dir if "--user" was used
# XXX: duplicate of the code in the setup command
if self.user and site.ENABLE_USER_SITE:
self.create_home_path()
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
if os.name == 'posix':
self.select_scheme("unix_user")
else:
self.select_scheme(os.name + "_user")
self.expand_basedirs()
self.expand_dirs()
if self.install_layout:
if not self.install_layout.lower() in ['deb']:
raise DistutilsOptionError("unknown value for --install-layout")
import sysconfig
if sys.version_info[:2] >= (3, 3):
self.multiarch = sysconfig.get_config_var('MULTIARCH')
self._expand('install_dir', 'script_dir', 'build_directory',
'site_dirs')
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options(
'install_lib', ('install_dir', 'install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options(
'install_scripts', ('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
if self.prefix == '/usr' and not self.force_installation_into_system_dir:
raise DistutilsOptionError("""installation into /usr
Trying to install into the system managed parts of the file system. Please
consider to install to another location, or use the option
--force-installation-into-system-dir to overwrite this warning.
""")
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in
self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d + " (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable:
self.check_site_dir()
self.index_url = self.index_url or "https://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path=self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path + sys.path)
if self.find_links is not None:
if isinstance(self.find_links, basestring):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path + sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize', 'optimize'))
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2):
raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
self._expand_attrs(['install_purelib', 'install_platlib',
'install_lib', 'install_headers',
'install_scripts', 'install_data', ])
def run(self):
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except:
pid = random.randint(0, maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir, 'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname() + '.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists:
os.unlink(testfile)
open(testfile, 'w').close()
os.unlink(testfile)
except (OSError, IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep)
if instdir not in map(normalize_path, [_f for _f in PYTHONPATH if _f]):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
def cant_write_to_target(self):
template = """can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
"""
msg = template % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += """
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
"""
else:
msg += """
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://pythonhosted.org/setuptools/easy_install.html
Please make the appropriate changes for your system and try again.
"""
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname() + ".pth"
ok_file = pth_file + '.ok'
ok_exists = os.path.exists(ok_file)
try:
if ok_exists:
os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(pth_file, 'w')
except (OSError, IOError):
self.cant_write_to_target()
else:
try:
f.write("import os; f = open(%r, 'w'); f.write('OK'); "
"f.close()\n" % (ok_file,))
f.close()
f = None
executable = sys.executable
if os.name == 'nt':
dirname, basename = os.path.split(executable)
alt = os.path.join(dirname, 'pythonw.exe')
if (basename.lower() == 'python.exe' and
os.path.exists(alt)):
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable, '-E', '-c', 'pass'], 0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/' + script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base, filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self, spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
def easy_install(self, spec, deps=False):
tmpdir = tempfile.mkdtemp(prefix="easy_install-")
download = None
if not self.editable:
self.install_site_py()
try:
if not isinstance(spec, Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
download = self.package_index.download(spec, tmpdir)
return self.install_item(None, download, tmpdir, deps,
True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable,
not self.always_copy, self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg += " (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence == DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
finally:
if os.path.exists(tmpdir):
rmtree(tmpdir)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location == download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
if dist in self.local_index[dist.key]:
self.local_index.remove(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = requirement or distreq
requirement = Requirement(
distreq.project_name, distreq.specs, requirement.extras
)
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound:
e = sys.exc_info()[1]
raise DistutilsError(
"Could not find required distribution %s" % e.args
)
except VersionConflict:
e = sys.exc_info()[1]
raise DistutilsError(
"Installed distribution %s conflicts with requirement %s"
% e.args
)
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = ("%r already exists in %s; build directory %s will not be "
"kept")
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename) == setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents) == 1:
dist_filename = os.path.join(setup_base, contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if not self.exclude_scripts:
for args in get_script_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script:
script_text = (get_script_header(script_text) +
self._load_template(dev_path) % locals())
self.write_script(script_name, _to_ascii(script_text), 'b')
@staticmethod
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://bitbucket.org/pypa/setuptools/issue/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir, x) for x in blockers]
)
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" %
os.path.abspath(dist_filename)
)
if len(setups) > 1:
raise DistutilsError(
"Multiple setup scripts in %s" %
os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path, os.path.join(egg_path,
'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path, metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(self.install_dir,
os.path.basename(egg_path))
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(os.unlink, (destination,), "Removing " +
destination)
try:
new_dist_is_zipped = False
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f, m = self.unpack_and_compile, "Extracting"
else:
new_dist_is_zipped = True
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copy2, "Copying"
self.execute(f, (egg_path, destination),
(m + " %s to %s") %
(os.path.basename(egg_path),
os.path.dirname(destination)))
update_dist_caches(destination,
fix_zipimporter_caches=new_dist_is_zipped)
except:
update_dist_caches(destination, fix_zipimporter_caches=False)
raise
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata', 'name'),
version=cfg.get('metadata', 'version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = dist.location = os.path.join(tmpdir, dist.egg_name() +
'.egg')
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf, 'w')
f.write('Metadata-Version: 1.0\n')
for k, v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
f.close()
script_dir = os.path.join(_egg_info, 'scripts')
self.delete_blockers( # delete entry-point scripts to avoid duping
[os.path.join(script_dir, args[0]) for args in
get_script_args(dist)]
)
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src, dst):
s = src.lower()
for old, new in prefixes:
if s.startswith(old):
src = new + src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old != 'SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(
os.path.join(egg_tmp, 'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level', 'native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
if not os.path.exists(txt):
f = open(txt, 'w')
f.write('\n'.join(locals()[name]) + '\n')
f.close()
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += """
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
"""
if self.install_dir not in map(normalize_path, sys.path):
msg += """
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
"""
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return """\nExtracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""" % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose > 2:
v = 'v' * (self.verbose - 1)
args.insert(0, '-' + v)
elif self.verbose < 2:
args.insert(0, '-q')
if self.dry_run:
args.insert(0, '-n')
log.info(
"Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit:
v = sys.exc_info()[1]
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives:
continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key == 'setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir, 'setuptools.pth')
if os.path.islink(filename):
os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location) + '\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src, dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
chmod(f, mode)
def byte_compile(self, to_compile):
if _dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
def no_default_version_msg(self):
template = """bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://pythonhosted.org/setuptools/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again."""
return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
f = open(sitepy, 'rb')
current = f.read()
# we want str, not bytes
if PY3:
current = current.decode()
f.close()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
f = open(sitepy, 'wb')
f.write(source)
f.close()
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
if sys.version[:3] in ('2.3', '2.4', '2.5') or 'real_prefix' in sys.__dict__:
sitedir_name = 'site-packages'
else:
sitedir_name = 'dist-packages'
INSTALL_SCHEMES = dict(
posix=dict(
install_dir='$base/lib/python$py_version_short/site-packages',
script_dir='$base/bin',
),
unix_local = dict(
install_dir = '$base/local/lib/python$py_version_short/%s' % sitedir_name,
script_dir = '$base/local/bin',
),
posix_local = dict(
install_dir = '$base/local/lib/python$py_version_short/%s' % sitedir_name,
script_dir = '$base/local/bin',
),
deb_system = dict(
install_dir = '$base/lib/python3/%s' % sitedir_name,
script_dir = '$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir='$base/Lib/site-packages',
script_dir='$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix or self.install_layout:
if self.install_layout and self.install_layout.lower() in ['deb']:
scheme_name = "deb_system"
self.prefix = '/usr'
elif self.prefix or 'real_prefix' in sys.__dict__:
scheme_name = os.name
else:
scheme_name = "posix_local"
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(scheme_name,self.DEFAULT_SCHEME)
for attr, val in scheme.items():
if getattr(self, attr, None) is None:
setattr(self, attr, val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def get_site_dirs():
# return a list of 'site' dirs
sitedirs = [_f for _f in os.environ.get('PYTHONPATH',
'').split(os.pathsep) if _f]
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python")])
else:
if sys.version[:3] in ('2.3', '2.4', '2.5'):
sdir = "site-packages"
else:
sdir = "dist-packages"
sitedirs.extend(
[os.path.join(prefix, "local/lib", "python" + sys.version[:3], sdir),
os.path.join(prefix, "lib", "python" + sys.version[:3], sdir)]
)
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs:
sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a ConfigParser.RawConfigParser, or None
"""
f = open(dist_filename, 'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended - 12)
from setuptools.compat import StringIO, ConfigParser
import struct
tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended - (12 + cfglen))
cfg = ConfigParser.RawConfigParser(
{'version': '', 'target_version': ''})
try:
part = f.read(cfglen)
# part is in bytes, but we need to read up to the first null
# byte.
if sys.version_info >= (2, 6):
null_byte = bytes([0])
else:
null_byte = chr(0)
config = part.split(null_byte, 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(StringIO(config))
except ConfigParser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name)
if PY3:
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
finally:
z.close()
prefixes = [(x.lower(), y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
)
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename, 'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir, path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
data = '\n'.join(map(self.make_relative, self.paths))
if data:
log.debug("Saving %s", self.filename)
data = (
"import sys; sys.__plen = len(sys.path)\n"
"%s\n"
"import sys; new=sys.path[sys.__plen:];"
" del sys.path[sys.__plen:];"
" p=getattr(sys,'__egginsert',0); sys.path[p:p]=new;"
" sys.__egginsert = p+len(new)\n"
) % data
if os.path.islink(self.filename):
os.unlink(self.filename)
f = open(self.filename, 'wt')
f.write(data)
f.close()
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
def add(self, dist):
"""Add `dist` to the distribution map"""
if (dist.location not in self.paths and (
dist.location not in self.sitedirs or
dist.location == os.getcwd() # account for '.' being in PYTHONPATH
)):
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self, path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep == '/' and '/' or os.sep
while len(npath) >= baselen:
if npath == self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
def _first_line_re():
"""
Return a regular expression based on first_line_re suitable for matching
strings.
"""
if isinstance(first_line_re.pattern, str):
return first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
return re.compile(first_line_re.pattern.decode())
def get_script_header(script_text, executable=sys_executable, wininst=False):
"""Create a #! line, getting options (if any) from script_text"""
first = (script_text + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = ''
if match:
options = match.group(1) or ''
if options:
options = ' ' + options
if wininst:
executable = "python.exe"
else:
executable = nt_quote_arg(executable)
hdr = "#!%(executable)s%(options)s\n" % locals()
if not isascii(hdr):
# Non-ascii path to sys.executable, use -x to prevent warnings
if options:
if options.strip().startswith('-'):
options = ' -x' + options.strip()[1:]
# else: punt, we can't do it, let the warning happen anyway
else:
options = ' -x'
executable = fix_jython_executable(executable, options)
hdr = "#!%(executable)s%(options)s\n" % locals()
return hdr
def auto_chmod(func, arg, exc):
if func is os.remove and os.name == 'nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
def update_dist_caches(dist_path, fix_zipimporter_caches):
"""
Fix any globally cached `dist_path` related data
`dist_path` should be a path of a newly installed egg distribution (zipped
or unzipped).
sys.path_importer_cache contains finder objects that have been cached when
importing data from the original distribution. Any such finders need to be
cleared since the replacement distribution might be packaged differently,
e.g. a zipped egg distribution might get replaced with an unzipped egg
folder or vice versa. Having the old finders cached may then cause Python
to attempt loading modules from the replacement distribution using an
incorrect loader.
zipimport.zipimporter objects are Python loaders charged with importing
data packaged inside zip archives. If stale loaders referencing the
original distribution, are left behind, they can fail to load modules from
the replacement distribution. E.g. if an old zipimport.zipimporter instance
is used to load data from a new zipped egg archive, it may cause the
operation to attempt to locate the requested data in the wrong location -
one indicated by the original distribution's zip archive directory
information. Such an operation may then fail outright, e.g. report having
read a 'bad local file header', or even worse, it may fail silently &
return invalid data.
zipimport._zip_directory_cache contains cached zip archive directory
information for all existing zipimport.zipimporter instances and all such
instances connected to the same archive share the same cached directory
information.
If asked, and the underlying Python implementation allows it, we can fix
all existing zipimport.zipimporter instances instead of having to track
them down and remove them one by one, by updating their shared cached zip
archive directory information. This, of course, assumes that the
replacement distribution is packaged as a zipped egg.
If not asked to fix existing zipimport.zipimporter instances, we still do
our best to clear any remaining zipimport.zipimporter related cached data
that might somehow later get used when attempting to load data from the new
distribution and thus cause such load operations to fail. Note that when
tracking down such remaining stale data, we can not catch every conceivable
usage from here, and we clear only those that we know of and have found to
cause problems if left alive. Any remaining caches should be updated by
whomever is in charge of maintaining them, i.e. they should be ready to
handle us replacing their zip archives with new distributions at runtime.
"""
# There are several other known sources of stale zipimport.zipimporter
# instances that we do not clear here, but might if ever given a reason to
# do so:
# * Global setuptools pkg_resources.working_set (a.k.a. 'master working
# set') may contain distributions which may in turn contain their
# zipimport.zipimporter loaders.
# * Several zipimport.zipimporter loaders held by local variables further
# up the function call stack when running the setuptools installation.
# * Already loaded modules may have their __loader__ attribute set to the
# exact loader instance used when importing them. Python 3.4 docs state
# that this information is intended mostly for introspection and so is
# not expected to cause us problems.
normalized_path = normalize_path(dist_path)
_uncache(normalized_path, sys.path_importer_cache)
if fix_zipimporter_caches:
_replace_zip_directory_cache_data(normalized_path)
else:
# Here, even though we do not want to fix existing and now stale
# zipimporter cache information, we still want to remove it. Related to
# Python's zip archive directory information cache, we clear each of
# its stale entries in two phases:
# 1. Clear the entry so attempting to access zip archive information
# via any existing stale zipimport.zipimporter instances fails.
# 2. Remove the entry from the cache so any newly constructed
# zipimport.zipimporter instances do not end up using old stale
# zip archive directory information.
# This whole stale data removal step does not seem strictly necessary,
# but has been left in because it was done before we started replacing
# the zip archive directory information cache content if possible, and
# there are no relevant unit tests that we can depend on to tell us if
# this is really needed.
_remove_and_clear_zip_directory_cache_data(normalized_path)
def _collect_zipimporter_cache_entries(normalized_path, cache):
"""
Return zipimporter cache entry keys related to a given normalized path.
Alternative path spellings (e.g. those using different character case or
those using alternative path separators) related to the same path are
included. Any sub-path entries are included as well, i.e. those
corresponding to zip archives embedded in other zip archives.
"""
result = []
prefix_len = len(normalized_path)
for p in cache:
np = normalize_path(p)
if (np.startswith(normalized_path) and
np[prefix_len:prefix_len + 1] in (os.sep, '')):
result.append(p)
return result
def _update_zipimporter_cache(normalized_path, cache, updater=None):
"""
Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None.
"""
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
# not support the complete dict interface:
# * Does not support item assignment, thus not allowing this function
# to be used only for removing existing cache entries.
# * Does not support the dict.pop() method, forcing us to use the
# get/del patterns instead. For more detailed information see the
# following links:
# https://bitbucket.org/pypa/setuptools/issue/202/more-robust-zipimporter-cache-invalidation#comment-10495960
# https://bitbucket.org/pypy/pypy/src/dd07756a34a41f674c0cacfbc8ae1d4cc9ea2ae4/pypy/module/zipimport/interp_zipimport.py#cl-99
old_entry = cache[p]
del cache[p]
new_entry = updater and updater(p, old_entry)
if new_entry is not None:
cache[p] = new_entry
def _uncache(normalized_path, cache):
_update_zipimporter_cache(normalized_path, cache)
def _remove_and_clear_zip_directory_cache_data(normalized_path):
def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
old_entry.clear()
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=clear_and_remove_cached_zip_archive_directory_data)
# PyPy Python implementation does not allow directly writing to the
# zipimport._zip_directory_cache and so prevents us from attempting to correct
# its content. The best we can do there is clear the problematic cache content
# and have PyPy repopulate it as needed. The downside is that if there are any
# stale zipimport.zipimporter instances laying around, attempting to use them
# will fail due to not having its zip archive directory information available
# instead of being automatically corrected to use the new correct zip archive
# directory information.
if '__pypy__' in sys.builtin_module_names:
_replace_zip_directory_cache_data = \
_remove_and_clear_zip_directory_cache_data
else:
def _replace_zip_directory_cache_data(normalized_path):
def replace_cached_zip_archive_directory_data(path, old_entry):
# N.B. In theory, we could load the zip directory information just
# once for all updated path spellings, and then copy it locally and
# update its contained path strings to contain the correct
# spelling, but that seems like a way too invasive move (this cache
# structure is not officially documented anywhere and could in
# theory change with new Python releases) for no significant
# benefit.
old_entry.clear()
zipimport.zipimporter(path)
old_entry.update(zipimport._zip_directory_cache[path])
return old_entry
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=replace_cached_zip_archive_directory_data)
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
fp = open(executable)
magic = fp.read(2)
fp.close()
except (OSError, IOError):
return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
result = []
needquote = False
nb = 0
needquote = (" " in arg) or ("\t" in arg)
if needquote:
result.append('"')
for c in arg:
if c == '\\':
nb += 1
elif c == '"':
# double preceding backslashes, then add a \"
result.append('\\' * (nb * 2) + '\\"')
nb = 0
else:
if nb:
result.append('\\' * nb)
nb = 0
result.append(c)
if nb:
result.append('\\' * nb)
if needquote:
result.append('\\' * nb) # double the trailing backslashes
result.append('"')
return ''.join(result)
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args):
pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error:
e = sys.exc_info()[1]
log.debug("chmod failed: %s", e)
def fix_jython_executable(executable, options):
if sys.platform.startswith('java') and is_sh(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty("os.name") == "Linux":
return executable
# Workaround Jython's sys.executable being a .sh (an invalid
# shebang line interpreter)
if options:
# Can't apply the workaround, leave it broken
log.warn(
"WARNING: Unable to adapt shebang line for Jython,"
" the following script is NOT executable\n"
" see http://bugs.jython.org/issue1112 for"
" more information.")
else:
return '/usr/bin/env %s' % executable
return executable
class ScriptWriter(object):
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent("""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""").lstrip()
@classmethod
def get_script_args(cls, dist, executable=sys_executable, wininst=False):
"""
Yield write_script() argument tuples for a distribution's entrypoints
"""
gen_class = cls.get_writer(wininst)
spec = str(dist.as_requirement())
header = get_script_header("", executable, wininst)
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
script_text = gen_class.template % locals()
for res in gen_class._get_script_args(type_, name, header,
script_text):
yield res
@classmethod
def get_writer(cls, force_windows):
if force_windows or sys.platform == 'win32':
return WindowsScriptWriter.get_writer()
return cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header + script_text)
class WindowsScriptWriter(ScriptWriter):
@classmethod
def get_writer(cls):
"""
Get a script writer suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
warnings.warn("%s not listed in PATHEXT; scripts will not be "
"recognized as executables." % ext, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield name + ext, header + script_text, 't', blockers
@staticmethod
def _adjust_header(type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
clean_header = new_header[2:-1].strip('"')
if sys.platform == 'win32' and not os.path.exists(clean_header):
# the adjusted version doesn't exist, so return the original
return orig_header
return new_header
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_ == 'gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py', '.pyc', '.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield (name + ext, hdr + script_text, 't', blockers)
yield (
name + '.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if platform.machine().lower() == 'arm':
launcher_fn = launcher_fn.replace(".", "-arm.")
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if PY2:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
"""Recursively delete a directory tree.
This code is taken from the Python 2.4 version of 'shutil', because
the 2.3 version doesn't really work right.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def current_umask():
tmp = os.umask(0o022)
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
import distutils.core
USAGE = """\
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
"""
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
def with_ei_usage(f):
old_gen_usage = distutils.core.gen_usage
try:
distutils.core.gen_usage = gen_usage
return f()
finally:
distutils.core.gen_usage = old_gen_usage
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self, *args, **kw):
with_ei_usage(lambda: Distribution._show_help(self, *args, **kw))
if argv is None:
argv = sys.argv[1:]
with_ei_usage(
lambda: setup(
script_args=['-q', 'easy_install', '-v'] + argv,
script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands, **kw
)
)
|
gpl-2.0
|
factorlibre/OCB
|
addons/edi/models/res_company.py
|
437
|
3186
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class res_company(osv.osv):
"""Helper subclass for res.company providing util methods for working with
companies in the context of EDI import/export. The res.company object
itself is not EDI-exportable"""
_inherit = "res.company"
def edi_export_address(self, cr, uid, company, edi_address_struct=None, context=None):
"""Returns a dict representation of the address of the company record, suitable for
inclusion in an EDI document, and matching the given edi_address_struct if provided.
The first found address is returned, in order of preference: invoice, contact, default.
:param browse_record company: company to export
:return: dict containing the address representation for the company record, or
an empty dict if no address can be found
"""
res_partner = self.pool.get('res.partner')
addresses = res_partner.address_get(cr, uid, [company.partner_id.id], ['default', 'contact', 'invoice'])
addr_id = addresses['invoice'] or addresses['contact'] or addresses['default']
result = {}
if addr_id:
address = res_partner.browse(cr, uid, addr_id, context=context)
result = res_partner.edi_export(cr, uid, [address], edi_struct=edi_address_struct, context=context)[0]
if company.logo:
result['logo'] = company.logo # already base64-encoded
if company.paypal_account:
result['paypal_account'] = company.paypal_account
# bank info: include only bank account supposed to be displayed in document footers
res_partner_bank = self.pool.get('res.partner.bank')
bank_ids = res_partner_bank.search(cr, uid, [('company_id','=',company.id),('footer','=',True)], context=context)
if bank_ids:
result['bank_ids'] = res_partner.edi_m2m(cr, uid,
res_partner_bank.browse(cr, uid, bank_ids, context=context),
context=context)
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
buddly27/champollion
|
source/champollion/parser/js_data.py
|
1
|
2654
|
# :coding: utf-8
import re
import functools
from .helper import collapse_all
from .helper import get_docstring
#: Regular Expression pattern for data
_DATA_PATTERN = re.compile(
r"(?P<start_regex>(\n|^)) *(?P<export>export +)?(?P<default>default +)?"
r"(?P<type>(const|let|var)) (?P<name>[\w._-]+) *= *(?P<value>.+?;)",
re.DOTALL
)
def fetch_environment(content, module_id):
"""Return data environment dictionary from *content*.
*module_id* represent the identifier of the module.
The environment is in the form of::
{
"moduleName.DATA": {
"id": "moduleName.DATA",
"module_id": "moduleName",
"exported": False,
"default": False,
"name": "DATA",
"value": "42",
"type": "const",
"line_number": 2,
"description": "Variable doc.\\n\\nDetailed description."
}
}
"""
environment = {}
lines = content.split("\n")
# The comment filter is made during the collapse content process to
# preserve the entire value (with semi-colons and docstrings!)
content, collapsed_content = collapse_all(content, filter_comment=True)
for match in _DATA_PATTERN.finditer(content):
data_id = ".".join([module_id, match.group("name")])
line_number = (
content[:match.start()].count("\n") +
match.group("start_regex").count("\n") + 1
)
value = match.group("value")
if "{}" in value and line_number in collapsed_content.keys():
value = value.replace("{}", collapsed_content[line_number])
# Do not keep semi-colon in value
if value.endswith(";"):
value = value[:-1]
data_environment = {
"id": data_id,
"module_id": module_id,
"exported": match.group("export") is not None,
"default": match.group("default") is not None,
"name": match.group("name"),
"value": functools.reduce(_clean_value, value.split('\n')).strip(),
"type": match.group("type"),
"line_number": line_number,
"description": get_docstring(line_number, lines)
}
environment[data_id] = data_environment
return environment
def _clean_value(line1, line2):
"""Clean up variable value for display."""
_line1 = line1.strip()
_line2 = line2.strip()
# Let trailing space to make the code easier to read
if _line1[-1:] in ["{", "}", "(", ")", "[", "]", ";", ","]:
_line1 += " "
return _line1 + _line2
|
apache-2.0
|
ooici/marine-integrations
|
mi/dataset/driver/dofst_k/wfp/test/test_driver.py
|
1
|
24458
|
"""
@package mi.dataset.driver.dofst_k.wfp.test.test_driver
@file marine-integrations/mi/dataset/driver/dofst_k/wfp/driver.py
@author Emily Hahn
@brief Test cases for dofst_k_wfp driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/dsa/test_driver
$ bin/dsa/test_driver -i [-t testname]
$ bin/dsa/test_driver -q [-t testname]
"""
__author__ = 'Emily Hahn'
__license__ = 'Apache 2.0'
import unittest
from nose.plugins.attrib import attr
from pyon.agent.agent import ResourceAgentState
from interface.objects import ResourceAgentErrorEvent
from mi.core.log import get_logger ; log = get_logger()
from mi.idk.exceptions import SampleTimeout
from mi.idk.dataset.unit_test import DataSetTestCase
from mi.idk.dataset.unit_test import DataSetIntegrationTestCase
from mi.idk.dataset.unit_test import DataSetQualificationTestCase
from mi.dataset.dataset_driver import DriverParameter
from mi.dataset.dataset_driver import DataSourceConfigKey, DataSetDriverConfigKeys
from mi.dataset.driver.dofst_k.wfp.driver import DofstKWfpDataSetDriver, DataTypeKey
from mi.dataset.parser.dofst_k_wfp_particles import\
DataParticleType,\
DofstKWfpRecoveredDataParticle,\
DofstKWfpRecoveredMetadataParticle,\
DofstKWfpTelemeteredDataParticle,\
DofstKWfpTelemeteredMetadataParticle
from mi.dataset.parser.wfp_c_file_common import StateKey
TELEM_TEST_DIR = '/tmp/dsatest'
RECOV_TEST_DIR = '/tmp/dsatest2'
# Fill in driver details
DataSetTestCase.initialize(
driver_module='mi.dataset.driver.dofst_k.wfp.driver',
driver_class='DofstKWfpDataSetDriver',
agent_resource_id = '123xyz',
agent_name = 'Agent007',
agent_packet_config = DofstKWfpDataSetDriver.stream_config(),
startup_config = {
DataSourceConfigKey.RESOURCE_ID: 'dofst_k_wfp',
DataSourceConfigKey.HARVESTER:
{
DataTypeKey.DOFST_K_WFP_TELEMETERED:
{
DataSetDriverConfigKeys.DIRECTORY: TELEM_TEST_DIR,
DataSetDriverConfigKeys.PATTERN: 'C*.DAT',
DataSetDriverConfigKeys.FREQUENCY: 1,
},
DataTypeKey.DOFST_K_WFP_RECOVERED:
{
DataSetDriverConfigKeys.DIRECTORY: RECOV_TEST_DIR,
DataSetDriverConfigKeys.PATTERN: 'C*.DAT',
DataSetDriverConfigKeys.FREQUENCY: 1,
}
},
DataSourceConfigKey.PARSER: {
DataTypeKey.DOFST_K_WFP_TELEMETERED: {},
DataTypeKey.DOFST_K_WFP_RECOVERED: {}
}
}
)
RECOV_PARTICLES = (DofstKWfpRecoveredDataParticle, DofstKWfpRecoveredMetadataParticle)
TELEM_PARTICLES = (DofstKWfpTelemeteredDataParticle, DofstKWfpTelemeteredMetadataParticle)
###############################################################################
# INTEGRATION TESTS #
# Device specific integration tests are for #
# testing device specific capabilities #
###############################################################################
@attr('INT', group='mi')
class IntegrationTest(DataSetIntegrationTestCase):
def test_get(self):
"""
Test that we can get data from files. Verify that the driver
sampling can be started and stopped
"""
# Start sampling and watch for an exception
self.driver.start_sampling()
self.clear_async_data()
self.create_sample_data_set_dir('first.DAT', TELEM_TEST_DIR, "C0000001.DAT")
self.assert_data(TELEM_PARTICLES, 'tel_first.result.yml', count=4, timeout=10)
self.clear_async_data()
self.create_sample_data_set_dir('second.DAT', TELEM_TEST_DIR, "C0000002.DAT")
self.assert_data(TELEM_PARTICLES, 'tel_second.result.yml', count=7, timeout=10)
self.clear_async_data()
self.create_sample_data_set_dir('first.DAT', RECOV_TEST_DIR, "C0000001.DAT")
self.assert_data(RECOV_PARTICLES, 'rec_first.result.yml', count=4, timeout=10)
self.clear_async_data()
self.create_sample_data_set_dir('second.DAT', RECOV_TEST_DIR, "C0000002.DAT")
self.assert_data(RECOV_PARTICLES, 'rec_second.result.yml', count=7, timeout=10)
def test_mid_state(self):
"""
Test the ability to start sampling with a state in the middle of a file
since recovered and telemetered are on the same stream, there is no way to know which
order samples will arrive in, test telemetered first then recovered
"""
path_1 = self.create_sample_data_set_dir('first.DAT', TELEM_TEST_DIR, "C0000001.DAT")
path_2 = self.create_sample_data_set_dir('second.DAT', TELEM_TEST_DIR, "C0000002.DAT")
# Create and store the new driver state
state = {
DataTypeKey.DOFST_K_WFP_TELEMETERED: {
'C0000001.DAT': self.get_file_state(path_1, True, 33),
'C0000002.DAT': self.get_file_state(path_2, False, 33)
},
DataTypeKey.DOFST_K_WFP_RECOVERED: {
'C0000001.DAT': self.get_file_state(path_1, True, 33),
'C0000002.DAT': self.get_file_state(path_2, False, 33)
}
}
# only the position field in the parser state is initialized in get_file_state, need to add the other state fields
state[DataTypeKey.DOFST_K_WFP_TELEMETERED]['C0000001.DAT']['parser_state'][StateKey.RECORDS_READ] = 3
state[DataTypeKey.DOFST_K_WFP_TELEMETERED]['C0000001.DAT']['parser_state'][StateKey.METADATA_SENT] = True
state[DataTypeKey.DOFST_K_WFP_TELEMETERED]['C0000002.DAT']['parser_state'][StateKey.RECORDS_READ] = 3
state[DataTypeKey.DOFST_K_WFP_TELEMETERED]['C0000002.DAT']['parser_state'][StateKey.METADATA_SENT] = True
state[DataTypeKey.DOFST_K_WFP_RECOVERED]['C0000001.DAT']['parser_state'][StateKey.RECORDS_READ] = 3
state[DataTypeKey.DOFST_K_WFP_RECOVERED]['C0000001.DAT']['parser_state'][StateKey.METADATA_SENT] = True
state[DataTypeKey.DOFST_K_WFP_RECOVERED]['C0000002.DAT']['parser_state'][StateKey.RECORDS_READ] = 3
state[DataTypeKey.DOFST_K_WFP_RECOVERED]['C0000002.DAT']['parser_state'][StateKey.METADATA_SENT] = True
self.driver = self._get_driver_object(memento=state)
# create some data to parse
self.clear_async_data()
self.driver.start_sampling()
# verify data is produced
self.assert_data(TELEM_PARTICLES, 'tel_partial_second.result.yml', count=3, timeout=10)
# now create the recovered files
path_3 = self.create_sample_data_set_dir('first.DAT', RECOV_TEST_DIR, "C0000001.DAT")
path_4 = self.create_sample_data_set_dir('second.DAT', RECOV_TEST_DIR, "C0000002.DAT")
# the starting state for recovered is the same as telemetered, so do the same compare
# again for recovered
self.assert_data(RECOV_PARTICLES, 'rec_partial_second.result.yml', count=3, timeout=10)
def test_ingest_order(self):
"""
Test the ability to stop and restart sampling, ingesting files in the
correct order
"""
# create some data to parse
self.clear_async_data()
self.driver.start_sampling()
self.create_sample_data_set_dir('first.DAT', TELEM_TEST_DIR, "C0000001.DAT")
self.create_sample_data_set_dir('second.DAT', TELEM_TEST_DIR, "C0000002.DAT")
self.assert_data(TELEM_PARTICLES, 'tel_first.result.yml', count=4, timeout=10)
self.assert_file_ingested("C0000001.DAT", DataTypeKey.DOFST_K_WFP_TELEMETERED)
self.driver.stop_sampling()
self.driver.start_sampling()
self.assert_data(TELEM_PARTICLES, 'tel_second.result.yml', count=7, timeout=10)
self.assert_file_ingested("C0000002.DAT", DataTypeKey.DOFST_K_WFP_TELEMETERED)
# now check the same thing for recovered (all the files are not created at the
# start because there is no guarantee what order telemetered or recovered
# particles will be found because they are on the same stream)
self.create_sample_data_set_dir('first.DAT', RECOV_TEST_DIR, "C0000001.DAT")
self.create_sample_data_set_dir('second.DAT', RECOV_TEST_DIR, "C0000002.DAT")
self.assert_data(RECOV_PARTICLES, 'rec_first.result.yml', count=4, timeout=10)
self.assert_file_ingested("C0000001.DAT", DataTypeKey.DOFST_K_WFP_RECOVERED)
self.driver.stop_sampling()
self.driver.start_sampling()
self.assert_data(RECOV_PARTICLES, 'rec_second.result.yml', count=7, timeout=10)
self.assert_file_ingested("C0000002.DAT", DataTypeKey.DOFST_K_WFP_RECOVERED)
def test_sample_exception_empty_telem(self):
"""
Test a case that should produce a sample exception and confirm the
sample exception occurs. In this case an empty file will produce a sample exception.
"""
self.clear_async_data()
config = self._driver_config()['startup_config']['harvester'][DataTypeKey.DOFST_K_WFP_TELEMETERED]['pattern']
filename = config.replace("*", "foo")
self.create_sample_data_set_dir(filename, TELEM_TEST_DIR)
# Start sampling and watch for an exception
self.driver.start_sampling()
# an event catches the sample exception
self.assert_event('ResourceAgentErrorEvent')
self.assert_file_ingested(filename, DataTypeKey.DOFST_K_WFP_TELEMETERED)
def test_sample_exception_empty_recov(self):
"""
Test a case that should produce a sample exception and confirm the
sample exception occurs. In this case an empty file will produce a sample exception.
"""
self.clear_async_data()
config = self._driver_config()['startup_config']['harvester'][DataTypeKey.DOFST_K_WFP_RECOVERED]['pattern']
filename = config.replace("*", "foo")
self.create_sample_data_set_dir(filename, RECOV_TEST_DIR)
# Start sampling and watch for an exception
self.driver.start_sampling()
# an event catches the sample exception
self.assert_event('ResourceAgentErrorEvent')
self.assert_file_ingested(filename, DataTypeKey.DOFST_K_WFP_RECOVERED)
def test_sample_exception_num_samples(self):
"""
Test a case that should produce a sample exception and confirm the
sample exception occurs. In this case an empty file will produce a sample exception.
"""
self.clear_async_data()
self.create_sample_data_set_dir('bad_num_samples.DAT', TELEM_TEST_DIR, 'C0000001.DAT')
# Start sampling and watch for an exception
self.driver.start_sampling()
# an event catches the sample exception
self.assert_event('ResourceAgentErrorEvent')
self.assert_file_ingested('C0000001.DAT', DataTypeKey.DOFST_K_WFP_TELEMETERED)
# same test for recovered
self.clear_async_data()
self.create_sample_data_set_dir('bad_num_samples.DAT', RECOV_TEST_DIR, 'C0000001.DAT')
self.assert_event('ResourceAgentErrorEvent')
self.assert_file_ingested('C0000001.DAT', DataTypeKey.DOFST_K_WFP_RECOVERED)
def test_timestamp_only(self):
"""
Test a case that should produce a sample exception and confirm the
sample exception occurs. In this case an empty file will produce a sample exception.
"""
self.clear_async_data()
self.create_sample_data_set_dir('ts_only.DAT', TELEM_TEST_DIR, 'C0000001.DAT')
# Start sampling and watch for an exception
self.driver.start_sampling()
self.assert_data(TELEM_PARTICLES, 'tel_ts_only.result.yml', count=1, timeout=10)
self.assert_file_ingested('C0000001.DAT', DataTypeKey.DOFST_K_WFP_TELEMETERED)
# same test for recovered
self.create_sample_data_set_dir('ts_only.DAT', RECOV_TEST_DIR, 'C0000001.DAT')
self.assert_data(RECOV_PARTICLES, 'rec_ts_only.result.yml', count=1, timeout=10)
self.assert_file_ingested('C0000001.DAT', DataTypeKey.DOFST_K_WFP_RECOVERED)
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for #
# testing device specific capabilities #
###############################################################################
@attr('QUAL', group='mi')
class QualificationTest(DataSetQualificationTestCase):
def assert_all_queue_empty(self):
"""
Assert the sample queue for all 4 data streams are empty
"""
self.assert_sample_queue_size(DataParticleType.TELEMETERED_METADATA, 0)
self.assert_sample_queue_size(DataParticleType.TELEMETERED_DATA, 0)
self.assert_sample_queue_size(DataParticleType.RECOVERED_METADATA, 0)
self.assert_sample_queue_size(DataParticleType.RECOVERED_DATA, 0)
def test_publish_path(self):
"""
Setup an agent/driver/harvester/parser and verify that data is
published out the agent
"""
self.create_sample_data_set_dir('first.DAT', TELEM_TEST_DIR, 'C0000001.DAT')
self.assert_initialize()
# Verify we get one sample
try:
result = self.data_subscribers.get_samples(DataParticleType.TELEMETERED_METADATA, 1)
log.debug("First RESULT: %s", result)
result_2 = self.data_subscribers.get_samples(DataParticleType.TELEMETERED_DATA, 3)
log.debug("Second RESULT: %s", result_2)
result.extend(result_2)
log.debug("Extended RESULT: %s", result)
# Verify values
self.assert_data_values(result, 'tel_first.result.yml')
except Exception as e:
log.error("Exception trapped: %s", e)
self.fail("Sample timeout.")
# again for recovered
self.create_sample_data_set_dir('first.DAT', RECOV_TEST_DIR, 'C0000001.DAT')
# Verify we get one sample
try:
result = self.data_subscribers.get_samples(DataParticleType.RECOVERED_METADATA, 1)
log.debug("First RESULT: %s", result)
result_2 = self.data_subscribers.get_samples(DataParticleType.RECOVERED_DATA, 3)
log.debug("Second RESULT: %s", result_2)
result.extend(result_2)
log.debug("Extended RESULT: %s", result)
# Verify values
self.assert_data_values(result, 'rec_first.result.yml')
except Exception as e:
log.error("Exception trapped: %s", e)
self.fail("Sample timeout.")
def test_large_import(self):
"""
Test importing a large number of samples from the file at once
"""
self.create_sample_data_set_dir('C0000038.DAT', TELEM_TEST_DIR)
self.assert_initialize()
# get results for each of the data particle streams
result1 = self.data_subscribers.get_samples(DataParticleType.TELEMETERED_METADATA,1,10)
result2 = self.data_subscribers.get_samples(DataParticleType.TELEMETERED_DATA,270,40)
# again for recovered
self.create_sample_data_set_dir('C0000038.DAT', RECOV_TEST_DIR)
# get results for each of the data particle streams
result1 = self.data_subscribers.get_samples(DataParticleType.RECOVERED_METADATA,1,10)
result2 = self.data_subscribers.get_samples(DataParticleType.RECOVERED_DATA,270,40)
def test_stop_start(self):
"""
Test the agents ability to start data flowing, stop, then restart
at the correct spot.
"""
log.info("CONFIG: %s", self._agent_config())
self.create_sample_data_set_dir('first.DAT', TELEM_TEST_DIR, "C0000001.DAT")
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Slow down processing to 1 per second to give us time to stop
self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})
self.assert_start_sampling()
try:
# Read the first file and verify the data
result = self.data_subscribers.get_samples(DataParticleType.TELEMETERED_METADATA)
result2 = self.data_subscribers.get_samples(DataParticleType.TELEMETERED_DATA, 3)
result.extend(result2)
log.debug("RESULT: %s", result)
# Verify values
self.assert_data_values(result, 'tel_first.result.yml')
self.assert_all_queue_empty()
# stop sampling between telemetered
self.create_sample_data_set_dir('second.DAT', TELEM_TEST_DIR, "C0000002.DAT")
# Now read the first three records (1 metadata, 2 data) of the second file then stop
result = self.data_subscribers.get_samples(DataParticleType.TELEMETERED_METADATA)
result2 = self.data_subscribers.get_samples(DataParticleType.TELEMETERED_DATA, 2)
result.extend(result2)
log.debug("got result 1 %s", result)
self.assert_stop_sampling()
self.assert_all_queue_empty()
# Restart sampling and ensure we get the last 4 records of the file
self.assert_start_sampling()
result3 = self.data_subscribers.get_samples(DataParticleType.TELEMETERED_DATA, 4)
log.debug("got result 2 %s", result3)
result.extend(result3)
self.assert_data_values(result, 'tel_second.result.yml')
self.assert_all_queue_empty()
# stop sampling between recovered
self.create_sample_data_set_dir('second.DAT', RECOV_TEST_DIR, "C0000002.DAT")
# Now read the first three records (1 metadata, 2 data) of the second file then stop
result = self.data_subscribers.get_samples(DataParticleType.RECOVERED_METADATA)
result2 = self.data_subscribers.get_samples(DataParticleType.RECOVERED_DATA, 2)
result.extend(result2)
log.debug("got result 1 %s", result)
self.assert_stop_sampling()
self.assert_all_queue_empty()
# Restart sampling and ensure we get the last 4 records of the file
self.assert_start_sampling()
result3 = self.data_subscribers.get_samples(DataParticleType.RECOVERED_DATA, 4)
log.debug("got result 2 %s", result3)
result.extend(result3)
self.assert_data_values(result, 'rec_second.result.yml')
self.assert_all_queue_empty()
except SampleTimeout as e:
log.error("Exception trapped: %s", e, exc_info=True)
self.fail("Sample timeout.")
def test_shutdown_restart(self):
"""
Test a full stop of the dataset agent, then restart the agent
and confirm it restarts at the correct spot.
"""
log.info("CONFIG: %s", self._agent_config())
self.create_sample_data_set_dir('first.DAT', TELEM_TEST_DIR, "C0000001.DAT")
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Slow down processing to 1 per second to give us time to stop
self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})
self.assert_start_sampling()
try:
# Read the first file and verify the data
result = self.data_subscribers.get_samples(DataParticleType.TELEMETERED_METADATA)
result2 = self.data_subscribers.get_samples(DataParticleType.TELEMETERED_DATA, 3)
result.extend(result2)
log.debug("RESULT: %s", result)
# Verify values
self.assert_data_values(result, 'tel_first.result.yml')
self.assert_all_queue_empty()
# stop the dataset agent between telemetered
self.create_sample_data_set_dir('second.DAT', TELEM_TEST_DIR, "C0000002.DAT")
# Now read the first three records (1 metadata, 2 data) of the second file then stop
result = self.data_subscribers.get_samples(DataParticleType.TELEMETERED_METADATA)
result2 = self.data_subscribers.get_samples(DataParticleType.TELEMETERED_DATA, 2)
result.extend(result2)
log.debug("got result 1 %s", result)
self.assert_stop_sampling()
self.assert_all_queue_empty()
# stop the agent
self.stop_dataset_agent_client()
# re-start the agent
self.init_dataset_agent_client()
#re-initialize
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Slow down processing to 1 per second to give us time to stop again
self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})
# Restart sampling and ensure we get the last 4 records of the file
self.assert_start_sampling()
result3 = self.data_subscribers.get_samples(DataParticleType.TELEMETERED_DATA, 4)
log.debug("got result 2 %s", result3)
result.extend(result3)
self.assert_data_values(result, 'tel_second.result.yml')
self.assert_all_queue_empty()
self.create_sample_data_set_dir('second.DAT', RECOV_TEST_DIR, "C0000002.DAT")
# Now read the first three records (1 metadata, 2 data) of the second file then stop
result = self.data_subscribers.get_samples(DataParticleType.RECOVERED_METADATA)
result2 = self.data_subscribers.get_samples(DataParticleType.RECOVERED_DATA, 2)
result.extend(result2)
log.debug("got result 1 %s", result)
self.assert_stop_sampling()
self.assert_all_queue_empty()
# stop the agent
self.stop_dataset_agent_client()
# re-start the agent
self.init_dataset_agent_client()
#re-initialize
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Restart sampling and ensure we get the last 4 records of the file
self.assert_start_sampling()
result3 = self.data_subscribers.get_samples(DataParticleType.RECOVERED_DATA, 4)
log.debug("got result 2 %s", result3)
result.extend(result3)
self.assert_data_values(result, 'rec_second.result.yml')
self.assert_all_queue_empty()
except SampleTimeout as e:
log.error("Exception trapped: %s", e, exc_info=True)
self.fail("Sample timeout.")
def test_parser_exception(self):
"""
Test an exception is raised after the driver is started during
record parsing.
"""
# cause the error for telemetered
self.create_sample_data_set_dir('bad_num_samples.DAT', TELEM_TEST_DIR, 'C0000001.DAT')
self.create_sample_data_set_dir('first.DAT', TELEM_TEST_DIR, 'C0000002.DAT')
self.assert_initialize()
self.event_subscribers.clear_events()
result = self.data_subscribers.get_samples(DataParticleType.TELEMETERED_METADATA)
result1 = self.data_subscribers.get_samples(DataParticleType.TELEMETERED_DATA, 3)
result.extend(result1)
self.assert_data_values(result, 'tel_first.result.yml')
self.assert_all_queue_empty();
# Verify an event was raised and we are in our retry state
self.assert_event_received(ResourceAgentErrorEvent, 10)
self.assert_state_change(ResourceAgentState.STREAMING, 10)
# cause the same error for recovered
self.event_subscribers.clear_events()
self.create_sample_data_set_dir('bad_num_samples.DAT', RECOV_TEST_DIR, 'C0000001.DAT')
self.create_sample_data_set_dir('first.DAT', RECOV_TEST_DIR, 'C0000002.DAT')
result = self.data_subscribers.get_samples(DataParticleType.RECOVERED_METADATA)
result1 = self.data_subscribers.get_samples(DataParticleType.RECOVERED_DATA, 3)
result.extend(result1)
self.assert_data_values(result, 'rec_first.result.yml')
self.assert_all_queue_empty();
# Verify an event was raised and we are in our retry state
self.assert_event_received(ResourceAgentErrorEvent, 10)
self.assert_state_change(ResourceAgentState.STREAMING, 10)
|
bsd-2-clause
|
henjo/pyoa
|
oaTools/oaExUtl22.py
|
1
|
3804
|
#
#********************************************************************
#* Copyright (C) 2004 LSI Logic Corporation. *
#* All Rights Reserved. *
#********************************************************************
#
# Sample Test program to try Observers
import openaccess22
from openaccess22 import *
#-----------------------------------------------
def CreateLibrary(libname,libpath):
ns=oaNativeNS()
slibname=oaScalarName(ns,libname)
# set mode to apped
print "Setup lib.defs"
oas=oaString("lib.defs")
liblist=oaLibDefList.static_get(oas,'a')
print "CreateLibrary: %s"%libname
found=0
# We only check top level members for the entry
for libdefmem in liblist.getMembersIter():
if (libdefmem.getType().oaTypeEnum()!=oacLibDefType): continue
sname=oaScalarName()
libdefmem.getLibName(sname)
sname.get(ns,oas)
name=str(oas)
print " lib.defs Entry:",name
if (name==libname):
print " Found in lib.defs"
ostr=oaString()
libdefmem.getLibPath(ostr)
libpath=str(ostr)
found=1
break
if (not found):
print " Adding to lib.defs"
oaLibDef.static_create(liblist,slibname,libpath)
print " Save Lib List"
liblist.save()
liblist.destroy()
print " Using LibPath: %s"%libpath
if (oaLib.static_exists(libpath)):
print " Openned Existing Library"
lib=oaLib.static_open(slibname,libpath)
return lib
print " Creating Library"
lib=oaLib.static_create(slibname,libpath,
oaLibMode(oacSharedLibMode), # mode
"oaDMTurbo"
# "oaDMFileSys" # mode
)
return lib
#-----------------------------------------------
def OpenLibrary(libname):
ns=oaNativeNS()
slibname=oaScalarName(ns,libname)
# set mode to apped
print "Setup lib.defs"
oas=oaString("lib.defs")
liblist=oaLibDefList.static_get(oas,'r')
print "OpenLibrary: %s"%libname
found=0
# We only check top level members for the entry
for libdefmem in liblist.getMembersIter():
if (libdefmem.getType().oaTypeEnum()!=oacLibDefType): continue
sname=oaScalarName()
libdefmem.getLibName(sname)
sname.get(ns,oas)
name=str(oas)
print " lib.defs Entry:",name
if (name==libname):
print " Found in lib.defs"
ostr=oaString()
libdefmem.getLibPath(ostr)
libpath=str(ostr)
found=1
break
liblist.destroy()
if (not found):
print "Library Not found:",libname
return None
print " Using LibPath: %s"%libpath
print " Openned Existing Library"
lib=oaLib.static_open(slibname,libpath)
return lib
#-----------------------------------------------
def CreateDesign(libname,cellname,viewname):
print "Creating Design: %s/%s/%s"%(libname,cellname,viewname)
ns=oaNativeNS()
libName=oaScalarName(ns,libname)
cellName=oaScalarName(ns,cellname)
viewName=oaScalarName(ns,viewname)
vt=oaViewType.static_get(oaReservedViewType(oacMaskLayout))
design=oaDesign.static_open(libName,cellName,viewName,vt,'w')
return design
#-----------------------------------------------
def OpenDesign(libname,cellname,viewname,mode='r'):
print "Creating Design: %s/%s/%s"%(libname,cellname,viewname)
ns=oaNativeNS()
libName=oaScalarName(ns,libname)
cellName=oaScalarName(ns,cellname)
viewName=oaScalarName(ns,viewname)
vt=oaViewType.static_get(oaReservedViewType(oacMaskLayout))
design=oaDesign.static_open(libName,cellName,viewName,vt,mode)
return design
|
apache-2.0
|
mediapop/django-spreedly
|
tests/run_tasks.py
|
1
|
2567
|
#!/usr/bin/env python
import os
import sys
from argparse import ArgumentParser
from path import path
from django.conf import settings
from django.core.management import call_command
from site_conf import DICT_CONF
def conf():
settings.configure(**DICT_CONF)
def run_tests():
"""
Setup the environment to run the tests as stand alone
uses sqlite3 in memory db
"""
argument_parser = ArgumentParser(
description="Run all tests for django-spreedly")
#TODO add some configuration here
settings.configure(**DICT_CONF)
call_command("test", 'spreedly.TestViewsExist')
sys.exit(0)
def syncdb():
call_command("syncdb")
def schema_migrate(auto=True, empty=False, update=False):
call_command("schemamigration", "spreedly", auto=auto, empty=empty,
update=update)
def data_migrate(args):
call_command("datamigration", "spreedly", *args)
def migrate():
call_command("migrate", "spreedly")
def sql():
call_command("sql", "spreedly")
if __name__ == "__main__":
sys.path.append(path(__file__).abspath().dirname().dirname())
parser = ArgumentParser(description="run tasks for django-spreedly")
parser.add_argument('--test', action='store_true', default=False,
help="Run the test for django-spreedly")
parser.add_argument('--datamigration', action='append',
help="run datamigration")
parser.add_argument('--sql', action='store_true',
help="run sql")
schemamigration = parser.add_argument_group('migrate')
schemamigration.add_argument('--schemamigration', action='store_true',
default=False,
help="preform a schema migration")
schema_migrate_opts = schemamigration.add_argument_group()
schema_migrate_opts.add_argument('--auto', action='store_true')
schema_migrate_opts.add_argument('--empty', action='store_true')
schema_migrate_opts.add_argument('--update', action='store_true')
args = parser.parse_args()
if args.test:
run_tests()
elif args.schemamigration:
conf()
syncdb()
migrate()
schema_migrate(auto=args.auto, empty=args.empty, update=args.update)
elif args.datamigration:
conf()
syncdb()
migrate()
data_migrate(args.datamigration)
elif args.sql:
conf()
syncdb()
migrate()
sql()
else:
conf()
syncdb()
migrate()
call_command("shell")
|
mit
|
bubenkoff/pytest
|
_pytest/terminal.py
|
3
|
19986
|
""" terminal reporting of the full testing process.
This is a good source for looking at the various reporting hooks.
"""
import pytest
import pluggy
import py
import sys
import time
import platform
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption('-v', '--verbose', action="count",
dest="verbose", default=0, help="increase verbosity."),
group._addoption('-q', '--quiet', action="count",
dest="quiet", default=0, help="decrease verbosity."),
group._addoption('-r',
action="store", dest="reportchars", default=None, metavar="chars",
help="show extra test summary info as specified by chars (f)ailed, "
"(E)error, (s)skipped, (x)failed, (X)passed (w)warnings.")
group._addoption('-l', '--showlocals',
action="store_true", dest="showlocals", default=False,
help="show locals in tracebacks (disabled by default).")
group._addoption('--report',
action="store", dest="report", default=None, metavar="opts",
help="(deprecated, use -r)")
group._addoption('--tb', metavar="style",
action="store", dest="tbstyle", default='auto',
choices=['auto', 'long', 'short', 'no', 'line', 'native'],
help="traceback print mode (auto/long/short/line/native/no).")
group._addoption('--fulltrace', '--full-trace',
action="store_true", default=False,
help="don't cut any tracebacks (default is to cut).")
group._addoption('--color', metavar="color",
action="store", dest="color", default='auto',
choices=['yes', 'no', 'auto'],
help="color terminal output (yes/no/auto).")
def pytest_configure(config):
config.option.verbose -= config.option.quiet
reporter = TerminalReporter(config, sys.stdout)
config.pluginmanager.register(reporter, 'terminalreporter')
if config.option.debug or config.option.traceconfig:
def mywriter(tags, args):
msg = " ".join(map(str, args))
reporter.write_line("[traceconfig] " + msg)
config.trace.root.setprocessor("pytest:config", mywriter)
def getreportopt(config):
reportopts = ""
optvalue = config.option.report
if optvalue:
py.builtin.print_("DEPRECATED: use -r instead of --report option.",
file=sys.stderr)
if optvalue:
for setting in optvalue.split(","):
setting = setting.strip()
if setting == "skipped":
reportopts += "s"
elif setting == "xfailed":
reportopts += "x"
reportchars = config.option.reportchars
if reportchars:
for char in reportchars:
if char not in reportopts:
reportopts += char
return reportopts
def pytest_report_teststatus(report):
if report.passed:
letter = "."
elif report.skipped:
letter = "s"
elif report.failed:
letter = "F"
if report.when != "call":
letter = "f"
return report.outcome, letter, report.outcome.upper()
class WarningReport:
def __init__(self, code, message, nodeid=None, fslocation=None):
self.code = code
self.message = message
self.nodeid = nodeid
self.fslocation = fslocation
class TerminalReporter:
def __init__(self, config, file=None):
self.config = config
self.verbosity = self.config.option.verbose
self.showheader = self.verbosity >= 0
self.showfspath = self.verbosity >= 0
self.showlongtestinfo = self.verbosity > 0
self._numcollected = 0
self.stats = {}
self.startdir = py.path.local()
if file is None:
file = sys.stdout
self._tw = self.writer = py.io.TerminalWriter(file)
if self.config.option.color == 'yes':
self._tw.hasmarkup = True
if self.config.option.color == 'no':
self._tw.hasmarkup = False
self.currentfspath = None
self.reportchars = getreportopt(config)
self.hasmarkup = self._tw.hasmarkup
def hasopt(self, char):
char = {'xfailed': 'x', 'skipped': 's'}.get(char, char)
return char in self.reportchars
def write_fspath_result(self, nodeid, res):
fspath = self.config.rootdir.join(nodeid.split("::")[0])
if fspath != self.currentfspath:
self.currentfspath = fspath
fspath = self.startdir.bestrelpath(fspath)
self._tw.line()
self._tw.write(fspath + " ")
self._tw.write(res)
def write_ensure_prefix(self, prefix, extra="", **kwargs):
if self.currentfspath != prefix:
self._tw.line()
self.currentfspath = prefix
self._tw.write(prefix)
if extra:
self._tw.write(extra, **kwargs)
self.currentfspath = -2
def ensure_newline(self):
if self.currentfspath:
self._tw.line()
self.currentfspath = None
def write(self, content, **markup):
self._tw.write(content, **markup)
def write_line(self, line, **markup):
if not py.builtin._istext(line):
line = py.builtin.text(line, errors="replace")
self.ensure_newline()
self._tw.line(line, **markup)
def rewrite(self, line, **markup):
line = str(line)
self._tw.write("\r" + line, **markup)
def write_sep(self, sep, title=None, **markup):
self.ensure_newline()
self._tw.sep(sep, title, **markup)
def section(self, title, sep="=", **kw):
self._tw.sep(sep, title, **kw)
def line(self, msg, **kw):
self._tw.line(msg, **kw)
def pytest_internalerror(self, excrepr):
for line in py.builtin.text(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line)
return 1
def pytest_logwarning(self, code, fslocation, message, nodeid):
warnings = self.stats.setdefault("warnings", [])
if isinstance(fslocation, tuple):
fslocation = "%s:%d" % fslocation
warning = WarningReport(code=code, fslocation=fslocation,
message=message, nodeid=nodeid)
warnings.append(warning)
def pytest_plugin_registered(self, plugin):
if self.config.option.traceconfig:
msg = "PLUGIN registered: %s" % (plugin,)
# XXX this event may happen during setup/teardown time
# which unfortunately captures our output here
# which garbles our output if we use self.write_line
self.write_line(msg)
def pytest_deselected(self, items):
self.stats.setdefault('deselected', []).extend(items)
def pytest_runtest_logstart(self, nodeid, location):
# ensure that the path is printed before the
# 1st test of a module starts running
if self.showlongtestinfo:
line = self._locationline(nodeid, *location)
self.write_ensure_prefix(line, "")
elif self.showfspath:
fsid = nodeid.split("::")[0]
self.write_fspath_result(fsid, "")
def pytest_runtest_logreport(self, report):
rep = report
res = self.config.hook.pytest_report_teststatus(report=rep)
cat, letter, word = res
self.stats.setdefault(cat, []).append(rep)
self._tests_ran = True
if not letter and not word:
# probably passed setup/teardown
return
if self.verbosity <= 0:
if not hasattr(rep, 'node') and self.showfspath:
self.write_fspath_result(rep.nodeid, letter)
else:
self._tw.write(letter)
else:
if isinstance(word, tuple):
word, markup = word
else:
if rep.passed:
markup = {'green':True}
elif rep.failed:
markup = {'red':True}
elif rep.skipped:
markup = {'yellow':True}
line = self._locationline(rep.nodeid, *rep.location)
if not hasattr(rep, 'node'):
self.write_ensure_prefix(line, word, **markup)
#self._tw.write(word, **markup)
else:
self.ensure_newline()
if hasattr(rep, 'node'):
self._tw.write("[%s] " % rep.node.gateway.id)
self._tw.write(word, **markup)
self._tw.write(" " + line)
self.currentfspath = -2
def pytest_collection(self):
if not self.hasmarkup and self.config.option.verbose >= 1:
self.write("collecting ... ", bold=True)
def pytest_collectreport(self, report):
if report.failed:
self.stats.setdefault("error", []).append(report)
elif report.skipped:
self.stats.setdefault("skipped", []).append(report)
items = [x for x in report.result if isinstance(x, pytest.Item)]
self._numcollected += len(items)
if self.hasmarkup:
#self.write_fspath_result(report.nodeid, 'E')
self.report_collect()
def report_collect(self, final=False):
if self.config.option.verbose < 0:
return
errors = len(self.stats.get('error', []))
skipped = len(self.stats.get('skipped', []))
if final:
line = "collected "
else:
line = "collecting "
line += str(self._numcollected) + " items"
if errors:
line += " / %d errors" % errors
if skipped:
line += " / %d skipped" % skipped
if self.hasmarkup:
if final:
line += " \n"
self.rewrite(line, bold=True)
else:
self.write_line(line)
def pytest_collection_modifyitems(self):
self.report_collect(True)
@pytest.hookimpl(trylast=True)
def pytest_sessionstart(self, session):
self._sessionstarttime = time.time()
if not self.showheader:
return
self.write_sep("=", "test session starts", bold=True)
verinfo = platform.python_version()
msg = "platform %s -- Python %s" % (sys.platform, verinfo)
if hasattr(sys, 'pypy_version_info'):
verinfo = ".".join(map(str, sys.pypy_version_info[:3]))
msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3])
msg += ", pytest-%s, py-%s, pluggy-%s" % (
pytest.__version__, py.__version__, pluggy.__version__)
if self.verbosity > 0 or self.config.option.debug or \
getattr(self.config.option, 'pastebin', None):
msg += " -- " + str(sys.executable)
self.write_line(msg)
lines = self.config.hook.pytest_report_header(
config=self.config, startdir=self.startdir)
lines.reverse()
for line in flatten(lines):
self.write_line(line)
def pytest_report_header(self, config):
inifile = ""
if config.inifile:
inifile = config.rootdir.bestrelpath(config.inifile)
lines = ["rootdir: %s, inifile: %s" %(config.rootdir, inifile)]
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
l = []
for plugin, dist in plugininfo:
name = dist.project_name
if name.startswith("pytest-"):
name = name[7:]
l.append(name)
lines.append("plugins: %s" % ", ".join(l))
return lines
def pytest_collection_finish(self, session):
if self.config.option.collectonly:
self._printcollecteditems(session.items)
if self.stats.get('failed'):
self._tw.sep("!", "collection failures")
for rep in self.stats.get('failed'):
rep.toterminal(self._tw)
return 1
return 0
if not self.showheader:
return
#for i, testarg in enumerate(self.config.args):
# self.write_line("test path %d: %s" %(i+1, testarg))
def _printcollecteditems(self, items):
# to print out items and their parent collectors
# we take care to leave out Instances aka ()
# because later versions are going to get rid of them anyway
if self.config.option.verbose < 0:
if self.config.option.verbose < -1:
counts = {}
for item in items:
name = item.nodeid.split('::', 1)[0]
counts[name] = counts.get(name, 0) + 1
for name, count in sorted(counts.items()):
self._tw.line("%s: %d" % (name, count))
else:
for item in items:
nodeid = item.nodeid
nodeid = nodeid.replace("::()::", "::")
self._tw.line(nodeid)
return
stack = []
indent = ""
for item in items:
needed_collectors = item.listchain()[1:] # strip root node
while stack:
if stack == needed_collectors[:len(stack)]:
break
stack.pop()
for col in needed_collectors[len(stack):]:
stack.append(col)
#if col.name == "()":
# continue
indent = (len(stack) - 1) * " "
self._tw.line("%s%s" % (indent, col))
@pytest.hookimpl(hookwrapper=True)
def pytest_sessionfinish(self, exitstatus):
outcome = yield
outcome.get_result()
self._tw.line("")
if exitstatus in (0, 1, 2, 4):
self.summary_errors()
self.summary_failures()
self.summary_warnings()
self.config.hook.pytest_terminal_summary(terminalreporter=self)
if exitstatus == 2:
self._report_keyboardinterrupt()
del self._keyboardinterrupt_memo
self.summary_deselected()
self.summary_stats()
def pytest_keyboard_interrupt(self, excinfo):
self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
def pytest_unconfigure(self):
if hasattr(self, '_keyboardinterrupt_memo'):
self._report_keyboardinterrupt()
def _report_keyboardinterrupt(self):
excrepr = self._keyboardinterrupt_memo
msg = excrepr.reprcrash.message
self.write_sep("!", msg)
if "KeyboardInterrupt" in msg:
if self.config.option.fulltrace:
excrepr.toterminal(self._tw)
else:
excrepr.reprcrash.toterminal(self._tw)
def _locationline(self, nodeid, fspath, lineno, domain):
def mkrel(nodeid):
line = self.config.cwd_relative_nodeid(nodeid)
if domain and line.endswith(domain):
line = line[:-len(domain)]
l = domain.split("[")
l[0] = l[0].replace('.', '::') # don't replace '.' in params
line += "[".join(l)
return line
# collect_fspath comes from testid which has a "/"-normalized path
if fspath:
res = mkrel(nodeid).replace("::()", "") # parens-normalization
if nodeid.split("::")[0] != fspath.replace("\\", "/"):
res += " <- " + self.startdir.bestrelpath(fspath)
else:
res = "[location]"
return res + " "
def _getfailureheadline(self, rep):
if hasattr(rep, 'location'):
fspath, lineno, domain = rep.location
return domain
else:
return "test session" # XXX?
def _getcrashline(self, rep):
try:
return str(rep.longrepr.reprcrash)
except AttributeError:
try:
return str(rep.longrepr)[:50]
except AttributeError:
return ""
#
# summaries for sessionfinish
#
def getreports(self, name):
l = []
for x in self.stats.get(name, []):
if not hasattr(x, '_pdbshown'):
l.append(x)
return l
def summary_warnings(self):
if self.hasopt("w"):
warnings = self.stats.get("warnings")
if not warnings:
return
self.write_sep("=", "warning summary")
for w in warnings:
self._tw.line("W%s %s %s" % (w.code,
w.fslocation, w.message))
def summary_failures(self):
if self.config.option.tbstyle != "no":
reports = self.getreports('failed')
if not reports:
return
self.write_sep("=", "FAILURES")
for rep in reports:
if self.config.option.tbstyle == "line":
line = self._getcrashline(rep)
self.write_line(line)
else:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg)
self._outrep_summary(rep)
def summary_errors(self):
if self.config.option.tbstyle != "no":
reports = self.getreports('error')
if not reports:
return
self.write_sep("=", "ERRORS")
for rep in self.stats['error']:
msg = self._getfailureheadline(rep)
if not hasattr(rep, 'when'):
# collect
msg = "ERROR collecting " + msg
elif rep.when == "setup":
msg = "ERROR at setup of " + msg
elif rep.when == "teardown":
msg = "ERROR at teardown of " + msg
self.write_sep("_", msg)
self._outrep_summary(rep)
def _outrep_summary(self, rep):
rep.toterminal(self._tw)
for secname, content in rep.sections:
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_stats(self):
session_duration = time.time() - self._sessionstarttime
keys = ("failed passed skipped deselected "
"xfailed xpassed warnings").split()
for key in self.stats.keys():
if key not in keys:
keys.append(key)
parts = []
for key in keys:
if key: # setup/teardown reports have an empty key, ignore them
val = self.stats.get(key, None)
if val:
parts.append("%d %s" % (len(val), key))
line = ", ".join(parts)
msg = "%s in %.2f seconds" % (line, session_duration)
markup = {'bold': True}
if 'failed' in self.stats or 'error' in self.stats:
markup = {'red': True, 'bold': True}
else:
markup = {'green': True, 'bold': True}
if self.verbosity >= 0:
self.write_sep("=", msg, **markup)
if self.verbosity == -1:
self.write_line(msg, **markup)
def summary_deselected(self):
if 'deselected' in self.stats:
l = []
k = self.config.option.keyword
if k:
l.append("-k%s" % k)
m = self.config.option.markexpr
if m:
l.append("-m %r" % m)
if l:
self.write_sep("=", "%d tests deselected by %r" % (
len(self.stats['deselected']), " ".join(l)), bold=True)
def repr_pythonversion(v=None):
if v is None:
v = sys.version_info
try:
return "%s.%s.%s-%s-%s" % v
except (TypeError, ValueError):
return str(v)
def flatten(l):
for x in l:
if isinstance(x, (list, tuple)):
for y in flatten(x):
yield y
else:
yield x
|
mit
|
rnirmal/cinder
|
cinder/tests/test_iscsi.py
|
4
|
3795
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import shutil
import string
import tempfile
from cinder import test
from cinder.volume import iscsi
class TargetAdminTestCase(object):
def setUp(self):
self.cmds = []
self.tid = 1
self.target_name = 'iqn.2011-09.org.foo.bar:blaa'
self.lun = 10
self.path = '/foo'
self.vol_id = 'blaa'
self.script_template = None
self.stubs.Set(os.path, 'isfile', lambda _: True)
self.stubs.Set(os, 'unlink', lambda _: '')
self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target)
def fake_get_target(obj, iqn):
return 1
def get_script_params(self):
return {'tid': self.tid,
'target_name': self.target_name,
'lun': self.lun,
'path': self.path}
def get_script(self):
return self.script_template % self.get_script_params()
def fake_execute(self, *cmd, **kwargs):
self.cmds.append(string.join(cmd))
return "", None
def clear_cmds(self):
cmds = []
def verify_cmds(self, cmds):
self.assertEqual(len(cmds), len(self.cmds))
for a, b in zip(cmds, self.cmds):
self.assertEqual(a, b)
def verify(self):
script = self.get_script()
cmds = []
for line in script.split('\n'):
if not line.strip():
continue
cmds.append(line)
self.verify_cmds(cmds)
def run_commands(self):
tgtadm = iscsi.get_target_admin()
tgtadm.set_execute(self.fake_execute)
tgtadm.create_iscsi_target(self.target_name, self.tid,
self.lun, self.path)
tgtadm.show_target(self.tid, iqn=self.target_name)
tgtadm.remove_iscsi_target(self.tid, self.lun, self.vol_id)
def test_target_admin(self):
self.clear_cmds()
self.run_commands()
self.verify()
class TgtAdmTestCase(test.TestCase, TargetAdminTestCase):
def setUp(self):
super(TgtAdmTestCase, self).setUp()
TargetAdminTestCase.setUp(self)
self.persist_tempdir = tempfile.mkdtemp()
self.flags(iscsi_helper='tgtadm')
self.flags(volumes_dir=self.persist_tempdir)
self.script_template = "\n".join([
'tgt-admin --update iqn.2011-09.org.foo.bar:blaa',
'tgt-admin --delete iqn.2010-10.org.openstack:volume-blaa'])
def tearDown(self):
try:
shutil.rmtree(self.persist_tempdir)
except OSError:
pass
super(TgtAdmTestCase, self).tearDown()
class IetAdmTestCase(test.TestCase, TargetAdminTestCase):
def setUp(self):
super(IetAdmTestCase, self).setUp()
TargetAdminTestCase.setUp(self)
self.flags(iscsi_helper='ietadm')
self.script_template = "\n".join([
'ietadm --op new --tid=%(tid)s --params Name=%(target_name)s',
'ietadm --op new --tid=%(tid)s --lun=%(lun)s '
'--params Path=%(path)s,Type=fileio',
'ietadm --op show --tid=%(tid)s',
'ietadm --op delete --tid=%(tid)s --lun=%(lun)s',
'ietadm --op delete --tid=%(tid)s'])
|
apache-2.0
|
xsynergy510x/android_external_chromium_org
|
chrome/common/extensions/docs/server2/manifest_features.py
|
122
|
1683
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''
Provides a Manifest Feature abstraction, similar to but more strict than the
Feature schema (see feature_utility.py).
Each Manifest Feature has a 'level' in addition to the keys defined in a
Feature. 'level' can be 'required', 'only_one', 'recommended', or 'optional',
indicating how an app or extension should define a manifest property. If 'level'
is missing, 'optional' is assumed.
'''
def ConvertDottedKeysToNested(features):
'''Some Manifest Features are subordinate to others, such as app.background to
app. Subordinate Features can be moved inside the parent Feature under the key
'children'.
Modifies |features|, a Manifest Features dictionary, by moving subordinate
Features with names of the form 'parent.child' into the 'parent' Feature.
Child features are renamed to the 'child' section of their previous name.
Applied recursively so that children can be nested arbitrarily.
'''
def add_child(features, parent, child_name, value):
value['name'] = child_name
if not 'children' in features[parent]:
features[parent]['children'] = {}
features[parent]['children'][child_name] = value
def insert_children(features):
for name in features.keys():
if '.' in name:
value = features.pop(name)
parent, child_name = name.split('.', 1)
add_child(features, parent, child_name, value)
for value in features.values():
if 'children' in value:
insert_children(value['children'])
insert_children(features)
return features
|
bsd-3-clause
|
DLance96/ox-dashboard
|
dashboard/migrations/0022_auto_20160622_0418.py
|
1
|
1940
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0021_auto_20160622_0219'),
]
operations = [
migrations.CreateModel(
name='Excuse',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.TextField(default=b'I will not be attending because', verbose_name=b'Reasoning')),
('response_message', models.TextField(default=b'Your excuse was not approved because')),
('status', models.CharField(default=b'0', max_length=1, choices=[(b'0', b'Pending'), (b'1', b'Approved'), (b'2', b'Denied')])),
('brother', models.ForeignKey(to='dashboard.Brother')),
('event', models.ForeignKey(to='dashboard.ChapterEvent')),
('semester', models.ForeignKey(blank=True, to='dashboard.Semester', null=True)),
],
),
migrations.CreateModel(
name='StudyTableEvent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateField()),
('attendees', models.ManyToManyField(to='dashboard.Brother')),
('semester', models.ForeignKey(blank=True, to='dashboard.Semester', null=True)),
],
),
migrations.RemoveField(
model_name='eventexcuse',
name='brother',
),
migrations.RemoveField(
model_name='eventexcuse',
name='event',
),
migrations.RemoveField(
model_name='eventexcuse',
name='semester',
),
migrations.DeleteModel(
name='EventExcuse',
),
]
|
mit
|
ptitjano/bokeh
|
bokeh/tests/test_document.py
|
2
|
34158
|
from __future__ import absolute_import, print_function
import pytest
import unittest
from copy import copy
import bokeh.document as document
from bokeh.io import curdoc
from bokeh.model import Model
from bokeh.models import ColumnDataSource
from bokeh.core.properties import Int, Instance, String, DistanceSpec
class AnotherModelInTestDocument(Model):
bar = Int(1)
class SomeModelInTestDocument(Model):
foo = Int(2)
child = Instance(Model)
class ModelThatOverridesName(Model):
name = String()
class ModelWithSpecInTestDocument(Model):
foo = DistanceSpec(2)
class TestDocument(unittest.TestCase):
def test_empty(self):
d = document.Document()
assert not d.roots
def test_add_roots(self):
d = document.Document()
assert not d.roots
d.add_root(AnotherModelInTestDocument())
assert len(d.roots) == 1
assert next(iter(d.roots)).document == d
def test_roots_preserves_insertion_order(self):
d = document.Document()
assert not d.roots
roots = [
AnotherModelInTestDocument(),
AnotherModelInTestDocument(),
AnotherModelInTestDocument(),
]
for r in roots:
d.add_root(r)
assert len(d.roots) == 3
assert type(d.roots) is list
roots_iter = iter(d.roots)
assert next(roots_iter) is roots[0]
assert next(roots_iter) is roots[1]
assert next(roots_iter) is roots[2]
def test_set_title(self):
d = document.Document()
assert d.title == document.DEFAULT_TITLE
d.title = "Foo"
assert d.title == "Foo"
def test_all_models(self):
d = document.Document()
assert not d.roots
assert len(d._all_models) == 0
m = SomeModelInTestDocument()
m2 = AnotherModelInTestDocument()
m.child = m2
d.add_root(m)
assert len(d.roots) == 1
assert len(d._all_models) == 2
m.child = None
assert len(d._all_models) == 1
m.child = m2
assert len(d._all_models) == 2
d.remove_root(m)
assert len(d._all_models) == 0
def test_get_model_by_id(self):
d = document.Document()
assert not d.roots
assert len(d._all_models) == 0
m = SomeModelInTestDocument()
m2 = AnotherModelInTestDocument()
m.child = m2
d.add_root(m)
assert len(d.roots) == 1
assert len(d._all_models) == 2
assert d.get_model_by_id(m._id) == m
assert d.get_model_by_id(m2._id) == m2
assert d.get_model_by_id("not a valid ID") is None
def test_get_model_by_name(self):
d = document.Document()
assert not d.roots
assert len(d._all_models) == 0
m = SomeModelInTestDocument(name="foo")
m2 = AnotherModelInTestDocument(name="bar")
m.child = m2
d.add_root(m)
assert len(d.roots) == 1
assert len(d._all_models) == 2
assert len(d._all_models_by_name._dict) == 2
assert d.get_model_by_name(m.name) == m
assert d.get_model_by_name(m2.name) == m2
assert d.get_model_by_name("not a valid name") is None
def test_get_model_by_changed_name(self):
d = document.Document()
m = SomeModelInTestDocument(name="foo")
d.add_root(m)
assert d.get_model_by_name("foo") == m
m.name = "bar"
assert d.get_model_by_name("foo") == None
assert d.get_model_by_name("bar") == m
def test_get_model_by_changed_from_none_name(self):
d = document.Document()
m = SomeModelInTestDocument(name=None)
d.add_root(m)
assert d.get_model_by_name("bar") == None
m.name = "bar"
assert d.get_model_by_name("bar") == m
def test_get_model_by_changed_to_none_name(self):
d = document.Document()
m = SomeModelInTestDocument(name="bar")
d.add_root(m)
assert d.get_model_by_name("bar") == m
m.name = None
assert d.get_model_by_name("bar") == None
def test_can_get_name_overriding_model_by_name(self):
d = document.Document()
m = ModelThatOverridesName(name="foo")
d.add_root(m)
assert d.get_model_by_name("foo") == m
m.name = "bar"
assert d.get_model_by_name("bar") == m
def test_cannot_get_model_with_duplicate_name(self):
d = document.Document()
m = SomeModelInTestDocument(name="foo")
m2 = SomeModelInTestDocument(name="foo")
d.add_root(m)
d.add_root(m2)
got_error = False
try:
d.get_model_by_name("foo")
except ValueError as e:
got_error = True
assert 'Found more than one' in repr(e)
assert got_error
d.remove_root(m)
assert d.get_model_by_name("foo") == m2
def test_select(self):
# we aren't trying to replace test_query here, only test
# our wrappers around it, so no need to try every kind of
# query
d = document.Document()
root1 = SomeModelInTestDocument(foo=42, name='a')
child1 = SomeModelInTestDocument(foo=43, name='b')
root2 = SomeModelInTestDocument(foo=44, name='c')
root3 = SomeModelInTestDocument(foo=44, name='d')
child3 = SomeModelInTestDocument(foo=45, name='c')
root1.child = child1
root3.child = child3
d.add_root(root1)
d.add_root(root2)
d.add_root(root3)
# select()
assert set([root1]) == set(d.select(dict(foo=42)))
assert set([root1]) == set(d.select(dict(name='a')))
assert set([root2, child3]) == set(d.select(dict(name='c')))
assert set() == set(d.select(dict(name='nope')))
# select() on object
assert set() == set(root3.select(dict(name='a')))
assert set([child3]) == set(root3.select(dict(name='c')))
# select_one()
assert root3 == d.select_one(dict(name='d'))
assert None == d.select_one(dict(name='nope'))
got_error = False
try:
d.select_one(dict(name='c'))
except ValueError as e:
got_error = True
assert 'Found more than one' in repr(e)
assert got_error
# select_one() on object
assert None == root3.select_one(dict(name='a'))
assert child3 == root3.select_one(dict(name='c'))
# set_select()
d.set_select(dict(foo=44), dict(name='c'))
assert set([root2, child3, root3]) == set(d.select(dict(name='c')))
# set_select() on object
root3.set_select(dict(name='c'), dict(foo=57))
assert set([child3, root3]) == set(d.select(dict(foo=57)))
assert set([child3, root3]) == set(root3.select(dict(foo=57)))
def test_is_single_string_selector(self):
d = document.Document()
# this is an implementation detail but just ensuring it works
assert d._is_single_string_selector(dict(foo='c'), 'foo')
assert d._is_single_string_selector(dict(foo=u'c'), 'foo')
assert not d._is_single_string_selector(dict(foo='c', bar='d'), 'foo')
assert not d._is_single_string_selector(dict(foo=42), 'foo')
def test_all_models_with_multiple_references(self):
d = document.Document()
assert not d.roots
assert len(d._all_models) == 0
root1 = SomeModelInTestDocument()
root2 = SomeModelInTestDocument()
child1 = AnotherModelInTestDocument()
root1.child = child1
root2.child = child1
d.add_root(root1)
d.add_root(root2)
assert len(d.roots) == 2
assert len(d._all_models) == 3
root1.child = None
assert len(d._all_models) == 3
root2.child = None
assert len(d._all_models) == 2
root1.child = child1
assert len(d._all_models) == 3
root2.child = child1
assert len(d._all_models) == 3
d.remove_root(root1)
assert len(d._all_models) == 2
d.remove_root(root2)
assert len(d._all_models) == 0
def test_all_models_with_cycles(self):
d = document.Document()
assert not d.roots
assert len(d._all_models) == 0
root1 = SomeModelInTestDocument()
root2 = SomeModelInTestDocument()
child1 = SomeModelInTestDocument()
root1.child = child1
root2.child = child1
child1.child = root1
print("adding root1")
d.add_root(root1)
print("adding root2")
d.add_root(root2)
assert len(d.roots) == 2
assert len(d._all_models) == 3
print("clearing child of root1")
root1.child = None
assert len(d._all_models) == 3
print("clearing child of root2")
root2.child = None
assert len(d._all_models) == 2
print("putting child1 back in root1")
root1.child = child1
assert len(d._all_models) == 3
print("Removing root1")
d.remove_root(root1)
assert len(d._all_models) == 1
print("Removing root2")
d.remove_root(root2)
assert len(d._all_models) == 0
def test_change_notification(self):
d = document.Document()
assert not d.roots
m = AnotherModelInTestDocument()
d.add_root(m)
assert len(d.roots) == 1
assert m.bar == 1
assert curdoc() is not d
events = []
curdoc_from_listener = []
def listener(event):
curdoc_from_listener.append(curdoc())
events.append(event)
d.on_change(listener)
m.bar = 42
assert events
event = events[0]
assert isinstance(event, document.ModelChangedEvent)
assert event.document == d
assert event.model == m
assert event.attr == 'bar'
assert event.old == 1
assert event.new == 42
assert len(curdoc_from_listener) == 1
assert curdoc_from_listener[0] is d
def test_stream_notification(self):
d = document.Document()
assert not d.roots
m = ColumnDataSource(data=dict(a=[10], b=[20]))
d.add_root(m)
assert len(d.roots) == 1
assert curdoc() is not d
events = []
curdoc_from_listener = []
def listener(event):
curdoc_from_listener.append(curdoc())
events.append(event)
d.on_change(listener)
m.stream(dict(a=[11, 12], b=[21, 22]), 200)
assert events
event = events[0]
assert isinstance(event, document.ModelChangedEvent)
assert isinstance(event.hint, document.ColumnsStreamedEvent)
assert event.document == d
assert event.model == m
assert event.hint.column_source == m
assert event.hint.data == dict(a=[11, 12], b=[21, 22])
assert event.hint.rollover == 200
assert event.attr == 'data'
# old == new because stream events update in-place
assert event.old == dict(a=[10, 11, 12], b=[20, 21, 22])
assert event.new == dict(a=[10, 11, 12], b=[20, 21, 22])
assert len(curdoc_from_listener) == 1
assert curdoc_from_listener[0] is d
def test_patch_notification(self):
d = document.Document()
assert not d.roots
m = ColumnDataSource(data=dict(a=[10,11], b=[20,21]))
d.add_root(m)
assert len(d.roots) == 1
assert curdoc() is not d
events = []
curdoc_from_listener = []
def listener(event):
curdoc_from_listener.append(curdoc())
events.append(event)
d.on_change(listener)
m.patch(dict(a=[(0, 1)], b=[(0,0), (1,1)]))
assert events
event = events[0]
assert isinstance(event, document.ModelChangedEvent)
assert isinstance(event.hint, document.ColumnsPatchedEvent)
assert event.document == d
assert event.model == m
assert event.hint.column_source == m
assert event.hint.patches == dict(a=[(0, 1)], b=[(0,0), (1,1)])
assert event.attr == 'data'
# old == new because stream events update in-place
assert event.old == dict(a=[1, 11], b=[0, 1])
assert event.new == dict(a=[1, 11], b=[0, 1])
assert len(curdoc_from_listener) == 1
assert curdoc_from_listener[0] is d
def test_change_notification_removal(self):
d = document.Document()
assert not d.roots
m = AnotherModelInTestDocument()
d.add_root(m)
assert len(d.roots) == 1
assert m.bar == 1
events = []
def listener(event):
events.append(event)
d.on_change(listener)
m.bar = 42
assert len(events) == 1
assert events[0].new == 42
d.remove_on_change(listener)
m.bar = 43
assert len(events) == 1
def test_notification_of_roots(self):
d = document.Document()
assert not d.roots
events = []
def listener(event):
events.append(event)
d.on_change(listener)
m = AnotherModelInTestDocument(bar=1)
d.add_root(m)
assert len(d.roots) == 1
assert len(events) == 1
assert isinstance(events[0], document.RootAddedEvent)
assert events[0].model == m
m2 = AnotherModelInTestDocument(bar=2)
d.add_root(m2)
assert len(d.roots) == 2
assert len(events) == 2
assert isinstance(events[1], document.RootAddedEvent)
assert events[1].model == m2
d.remove_root(m)
assert len(d.roots) == 1
assert len(events) == 3
assert isinstance(events[2], document.RootRemovedEvent)
assert events[2].model == m
d.remove_root(m2)
assert len(d.roots) == 0
assert len(events) == 4
assert isinstance(events[3], document.RootRemovedEvent)
assert events[3].model == m2
def test_notification_of_title(self):
d = document.Document()
assert not d.roots
assert d.title == document.DEFAULT_TITLE
events = []
def listener(event):
events.append(event)
d.on_change(listener)
d.title = "Foo"
assert d.title == "Foo"
assert len(events) == 1
assert isinstance(events[0], document.TitleChangedEvent)
assert events[0].document is d
assert events[0].title == "Foo"
def test_add_remove_periodic_callback(self):
d = document.Document()
events = []
def listener(event):
events.append(event)
d.on_change(listener)
assert len(d.session_callbacks) == 0
assert not events
def cb(): pass
callback = d.add_periodic_callback(cb, 1)
assert len(d.session_callbacks) == len(events) == 1
assert isinstance(events[0], document.SessionCallbackAdded)
assert callback == d.session_callbacks[0] == events[0].callback
assert callback.period == 1
callback = d.remove_periodic_callback(cb)
assert len(d.session_callbacks) == 0
assert len(events) == 2
assert isinstance(events[0], document.SessionCallbackAdded)
assert isinstance(events[1], document.SessionCallbackRemoved)
def test_add_remove_timeout_callback(self):
d = document.Document()
events = []
def listener(event):
events.append(event)
d.on_change(listener)
assert len(d.session_callbacks) == 0
assert not events
def cb(): pass
callback = d.add_timeout_callback(cb, 1)
assert len(d.session_callbacks) == len(events) == 1
assert isinstance(events[0], document.SessionCallbackAdded)
assert callback == d.session_callbacks[0] == events[0].callback
assert callback.timeout == 1
callback = d.remove_timeout_callback(cb)
assert len(d.session_callbacks) == 0
assert len(events) == 2
assert isinstance(events[0], document.SessionCallbackAdded)
assert isinstance(events[1], document.SessionCallbackRemoved)
def test_add_partial_callback(self):
from functools import partial
d = document.Document()
events = []
def listener(event):
events.append(event)
d.on_change(listener)
assert len(d.session_callbacks) == 0
assert not events
def _cb(): pass
cb = partial(_cb)
callback = d.add_timeout_callback(cb, 1)
assert len(d.session_callbacks) == len(events) == 1
assert isinstance(events[0], document.SessionCallbackAdded)
assert callback == d.session_callbacks[0] == events[0].callback
assert callback.timeout == 1
def test_add_remove_next_tick_callback(self):
d = document.Document()
events = []
def listener(event):
events.append(event)
d.on_change(listener)
assert len(d.session_callbacks) == 0
assert not events
def cb(): pass
callback = d.add_next_tick_callback(cb)
assert len(d.session_callbacks) == len(events) == 1
assert isinstance(events[0], document.SessionCallbackAdded)
assert callback == d.session_callbacks[0] == events[0].callback
callback = d.remove_next_tick_callback(cb)
assert len(d.session_callbacks) == 0
assert len(events) == 2
assert isinstance(events[0], document.SessionCallbackAdded)
assert isinstance(events[1], document.SessionCallbackRemoved)
def test_periodic_callback_gets_curdoc(self):
d = document.Document()
assert curdoc() is not d
curdoc_from_cb = []
def cb():
curdoc_from_cb.append(curdoc())
callback = d.add_periodic_callback(cb, 1)
callback.callback()
assert len(curdoc_from_cb) == 1
assert curdoc_from_cb[0] is d
def test_timeout_callback_gets_curdoc(self):
d = document.Document()
assert curdoc() is not d
curdoc_from_cb = []
def cb():
curdoc_from_cb.append(curdoc())
callback = d.add_timeout_callback(cb, 1)
callback.callback()
assert len(curdoc_from_cb) == 1
assert curdoc_from_cb[0] is d
def test_next_tick_callback_gets_curdoc(self):
d = document.Document()
assert curdoc() is not d
curdoc_from_cb = []
def cb():
curdoc_from_cb.append(curdoc())
callback = d.add_next_tick_callback(cb)
callback.callback()
assert len(curdoc_from_cb) == 1
assert curdoc_from_cb[0] is d
def test_model_callback_gets_curdoc(self):
d = document.Document()
m = AnotherModelInTestDocument(bar=42)
d.add_root(m)
assert curdoc() is not d
curdoc_from_cb = []
def cb(attr, old, new):
curdoc_from_cb.append(curdoc())
m.on_change('bar', cb)
m.bar = 43
assert len(curdoc_from_cb) == 1
assert curdoc_from_cb[0] is d
def test_clear(self):
d = document.Document()
assert not d.roots
assert d.title == document.DEFAULT_TITLE
d.add_root(AnotherModelInTestDocument())
d.add_root(AnotherModelInTestDocument())
d.title = "Foo"
assert len(d.roots) == 2
assert d.title == "Foo"
d.clear()
assert not d.roots
assert not d._all_models
assert d.title == "Foo" # do not reset title
def test_serialization_one_model(self):
d = document.Document()
assert not d.roots
assert len(d._all_models) == 0
root1 = SomeModelInTestDocument()
d.add_root(root1)
d.title = "Foo"
json = d.to_json_string()
copy = document.Document.from_json_string(json)
assert len(copy.roots) == 1
assert copy.title == "Foo"
def test_serialization_more_models(self):
d = document.Document()
assert not d.roots
assert len(d._all_models) == 0
root1 = SomeModelInTestDocument(foo=42)
root2 = SomeModelInTestDocument(foo=43)
child1 = SomeModelInTestDocument(foo=44)
root1.child = child1
root2.child = child1
d.add_root(root1)
d.add_root(root2)
assert len(d.roots) == 2
json = d.to_json_string()
copy = document.Document.from_json_string(json)
assert len(copy.roots) == 2
foos = []
for r in copy.roots:
foos.append(r.foo)
foos.sort()
assert [42,43] == foos
some_root = next(iter(copy.roots))
assert some_root.child.foo == 44
def test_serialization_has_version(self):
from bokeh import __version__
d = document.Document()
json = d.to_json()
assert json['version'] == __version__
def test_patch_integer_property(self):
d = document.Document()
assert not d.roots
assert len(d._all_models) == 0
root1 = SomeModelInTestDocument(foo=42)
root2 = SomeModelInTestDocument(foo=43)
child1 = SomeModelInTestDocument(foo=44)
root1.child = child1
root2.child = child1
d.add_root(root1)
d.add_root(root2)
assert len(d.roots) == 2
event1 = document.ModelChangedEvent(d, root1, 'foo', root1.foo, 57, 57)
patch1 = d.create_json_patch_string([event1])
d.apply_json_patch_string(patch1)
assert root1.foo == 57
event2 = document.ModelChangedEvent(d, child1, 'foo', child1.foo, 67, 67)
patch2 = d.create_json_patch_string([event2])
d.apply_json_patch_string(patch2)
assert child1.foo == 67
def test_patch_spec_property(self):
d = document.Document()
assert not d.roots
assert len(d._all_models) == 0
root1 = ModelWithSpecInTestDocument(foo=42)
d.add_root(root1)
assert len(d.roots) == 1
def patch_test(new_value):
serializable_new = root1.lookup('foo').descriptor.to_serializable(root1,
'foo',
new_value)
event1 = document.ModelChangedEvent(d, root1, 'foo', root1.foo, new_value,
serializable_new)
patch1 = d.create_json_patch_string([event1])
d.apply_json_patch_string(patch1)
if isinstance(new_value, dict):
expected = copy(new_value)
if 'units' not in expected:
expected['units'] = root1.foo_units
self.assertDictEqual(expected, root1.lookup('foo').serializable_value(root1))
else:
self.assertEqual(new_value, root1.foo)
patch_test(57)
self.assertEqual('data', root1.foo_units)
patch_test(dict(value=58))
self.assertEqual('data', root1.foo_units)
patch_test(dict(value=58, units='screen'))
self.assertEqual('screen', root1.foo_units)
patch_test(dict(value=59, units='screen'))
self.assertEqual('screen', root1.foo_units)
patch_test(dict(value=59, units='data'))
self.assertEqual('data', root1.foo_units)
patch_test(dict(value=60, units='data'))
self.assertEqual('data', root1.foo_units)
patch_test(dict(value=60, units='data'))
self.assertEqual('data', root1.foo_units)
patch_test(61)
self.assertEqual('data', root1.foo_units)
root1.foo = "a_string" # so "woot" gets set as a string
patch_test("woot")
self.assertEqual('data', root1.foo_units)
patch_test(dict(field="woot2"))
self.assertEqual('data', root1.foo_units)
patch_test(dict(field="woot2", units='screen'))
self.assertEqual('screen', root1.foo_units)
patch_test(dict(field="woot3"))
self.assertEqual('screen', root1.foo_units)
patch_test(dict(value=70))
self.assertEqual('screen', root1.foo_units)
root1.foo = 123 # so 71 gets set as a number
patch_test(71)
self.assertEqual('screen', root1.foo_units)
def test_patch_reference_property(self):
d = document.Document()
assert not d.roots
assert len(d._all_models) == 0
root1 = SomeModelInTestDocument(foo=42)
root2 = SomeModelInTestDocument(foo=43)
child1 = SomeModelInTestDocument(foo=44)
child2 = SomeModelInTestDocument(foo=45)
child3 = SomeModelInTestDocument(foo=46, child=child2)
root1.child = child1
root2.child = child1
d.add_root(root1)
d.add_root(root2)
assert len(d.roots) == 2
assert child1._id in d._all_models
assert child2._id not in d._all_models
assert child3._id not in d._all_models
event1 = document.ModelChangedEvent(d, root1, 'child', root1.child, child3, child3)
patch1 = d.create_json_patch_string([event1])
d.apply_json_patch_string(patch1)
assert root1.child._id == child3._id
assert root1.child.child._id == child2._id
assert child1._id in d._all_models
assert child2._id in d._all_models
assert child3._id in d._all_models
# put it back how it was before
event2 = document.ModelChangedEvent(d, root1, 'child', root1.child, child1, child1)
patch2 = d.create_json_patch_string([event2])
d.apply_json_patch_string(patch2)
assert root1.child._id == child1._id
assert root1.child.child is None
assert child1._id in d._all_models
assert child2._id not in d._all_models
assert child3._id not in d._all_models
def test_patch_two_properties_at_once(self):
d = document.Document()
assert not d.roots
assert len(d._all_models) == 0
root1 = SomeModelInTestDocument(foo=42)
child1 = SomeModelInTestDocument(foo=43)
root1.child = child1
d.add_root(root1)
assert len(d.roots) == 1
assert root1.child == child1
assert root1.foo == 42
assert root1.child.foo == 43
child2 = SomeModelInTestDocument(foo=44)
event1 = document.ModelChangedEvent(d, root1, 'foo', root1.foo, 57, 57)
event2 = document.ModelChangedEvent(d, root1, 'child', root1.child, child2, child2)
patch1 = d.create_json_patch_string([event1, event2])
d.apply_json_patch_string(patch1)
assert root1.foo == 57
assert root1.child.foo == 44
# a more realistic set of models instead of fake models
def test_scatter(self):
from bokeh.io import set_curdoc
from bokeh.plotting import figure
import numpy as np
d = document.Document()
set_curdoc(d)
assert not d.roots
assert len(d._all_models) == 0
p1 = figure(tools=[])
N = 10
x = np.linspace(0, 4 * np.pi, N)
y = np.sin(x)
p1.scatter(x, y, color="#FF00FF", nonselection_fill_color="#FFFF00", nonselection_fill_alpha=1)
# figure does not automatically add itself to the document
d.add_root(p1)
assert len(d.roots) == 1
# TODO test serialize/deserialize with list-and-dict-valued properties
# TODO test replace_with_json
def test_compute_one_attribute_patch(self):
from bokeh.document import Document
d = Document()
root1 = SomeModelInTestDocument(foo=42)
child1 = SomeModelInTestDocument(foo=43)
root1.child = child1
d.add_root(root1)
before = d.to_json()
root1.foo = 47
after = d.to_json()
patch = Document._compute_patch_between_json(before, after)
expected = dict(references=[],
events=[
{'attr': u'foo',
'kind': 'ModelChanged',
'model': {'id': None,
'type': 'SomeModelInTestDocument'},
'new': 47}
])
expected['events'][0]['model']['id'] = root1._id
self.assertDictEqual(expected, patch)
d2 = Document.from_json(before)
d2.apply_json_patch(patch)
self.assertEqual(root1.foo, d2.roots[0].foo)
def test_compute_two_attribute_patch(self):
from bokeh.document import Document
d = Document()
root1 = SomeModelInTestDocument(foo=42)
child1 = AnotherModelInTestDocument(bar=43)
root1.child = child1
d.add_root(root1)
before = d.to_json()
root1.foo=47
child1.bar=57
after = d.to_json()
patch = Document._compute_patch_between_json(before, after)
expected = dict(references=[],
events=[
{'attr': u'bar',
'kind': 'ModelChanged',
'model': {'id': None,
'type': 'AnotherModelInTestDocument'},
'new': 57},
{'attr': u'foo',
'kind': 'ModelChanged',
'model': {'id': None,
'type': 'SomeModelInTestDocument'},
'new': 47}
])
expected['events'][0]['model']['id'] = child1._id
expected['events'][1]['model']['id'] = root1._id
# order is undefined, so fix our expectation if needed
self.assertEqual(2, len(patch['events']))
if patch['events'][0]['model']['type'] == 'AnotherModelInTestDocument':
pass
else:
tmp = expected['events'][0]
expected['events'][0] = expected['events'][1]
expected['events'][1] = tmp
self.assertDictEqual(expected, patch)
d2 = Document.from_json(before)
d2.apply_json_patch(patch)
self.assertEqual(root1.foo, d2.roots[0].foo)
self.assertEqual(root1.child.bar, d2.roots[0].child.bar)
def test_compute_remove_root_patch(self):
from bokeh.document import Document
d = Document()
root1 = SomeModelInTestDocument(foo=42)
child1 = AnotherModelInTestDocument(bar=43)
root1.child = child1
d.add_root(root1)
before = d.to_json()
d.remove_root(root1)
after = d.to_json()
patch = Document._compute_patch_between_json(before, after)
expected = dict(references=[],
events= [
{'kind': 'RootRemoved',
'model': {'id': None,
'type': 'SomeModelInTestDocument'}}
])
expected['events'][0]['model']['id'] = root1._id
self.assertDictEqual(expected, patch)
d2 = Document.from_json(before)
d2.apply_json_patch(patch)
self.assertEqual([], d2.roots)
def test_compute_add_root_patch(self):
from bokeh.document import Document
d = Document()
root1 = SomeModelInTestDocument(foo=42)
child1 = AnotherModelInTestDocument(bar=43)
root1.child = child1
d.add_root(root1)
before = d.to_json()
root2 = SomeModelInTestDocument(foo=57)
d.add_root(root2)
after = d.to_json()
patch = Document._compute_patch_between_json(before, after)
expected = {
'references' : [
{ 'attributes': {'child': None, 'foo': 57},
'id': None,
'type': 'SomeModelInTestDocument'}
],
'events' : [
{ 'kind': 'RootAdded',
'model': {'id': None,
'type': 'SomeModelInTestDocument'}
}
]
}
expected['references'][0]['id'] = root2._id
expected['events'][0]['model']['id'] = root2._id
self.assertDictEqual(expected, patch)
d2 = Document.from_json(before)
d2.apply_json_patch(patch)
self.assertEqual(2, len(d2.roots))
self.assertEqual(42, d2.roots[0].foo)
self.assertEqual(57, d2.roots[1].foo)
class TestUnlockedDocumentProxy(unittest.TestCase):
def test_next_tick_callback_works(self):
d = document.UnlockedDocumentProxy(document.Document())
assert curdoc() is not d
curdoc_from_cb = []
def cb():
curdoc_from_cb.append(curdoc())
callback = d.add_next_tick_callback(cb)
callback.callback()
assert len(curdoc_from_cb) == 1
assert curdoc_from_cb[0] is d._doc
def cb2(): pass
callback = d.add_next_tick_callback(cb2)
d.remove_next_tick_callback(cb2)
def test_other_attrs_raise(self):
d = document.UnlockedDocumentProxy(document.Document())
assert curdoc() is not d
with pytest.raises(RuntimeError) as e:
d.foo
assert str(e) == "Only add_next_tick_callback may be used safely without taking the document lock; "
"to make other changes to the document, add a next tick callback and make your changes "
"from that callback."
for attr in dir(d._doc):
if attr in ["add_next_tick_callback", "remove_next_tick_callback"]: continue
with pytest.raises(RuntimeError) as e:
getattr(d, "foo")
def test_without_document_lock(self):
d = document.Document()
assert curdoc() is not d
curdoc_from_cb = []
@document.without_document_lock
def cb():
curdoc_from_cb.append(curdoc())
callback = d.add_next_tick_callback(cb)
callback._callback()
assert callback.callback.nolock == True
assert len(curdoc_from_cb) == 1
assert curdoc_from_cb[0]._doc is d
assert isinstance(curdoc_from_cb[0], document.UnlockedDocumentProxy)
|
bsd-3-clause
|
bdol/bdol-ml
|
neural_networks/mlp_dpp_dropout/MLP.py
|
1
|
18692
|
import ConfigParser
import ast
import cPickle as pickle
import dpp
import numpy as np
import os
import shutil
import sys
import time
import uuid
def save_state(params, paramsFilename):
pickle.dump(params, open(paramsFilename, "wb"))
return 0
def rectified_linear(X):
return np.maximum(X, 0.0)
def d_rectified_linear(X):
gZeroIdx = X>0
return gZeroIdx*1
def softmax(X):
# Use the log-sum-exp trick for numerical stability
m = np.atleast_2d(np.amax(X, axis=1)).T
y_exp = np.exp(X-m)
s = np.atleast_2d(np.sum(y_exp, axis=1)).T
return y_exp/s
def d_softmax(X):
return X
def sigmoid(X):
return 1.0/(1+np.exp(-X))
def d_sigmoid(X):
return sigmoid(X)*(1-sigmoid(X))
def linear(X):
return X
class Parameters:
'''
This class contains the current experiment parameters, including:
Model parameters:
layerSizes: A list of layer sizes
activations: The activation type for each layer
dropoutType
dropoutProb
dropoutInputProb
wLenLimit: The hard limit to a weight vector's L2 norm
Experiment parameters:
checkGradient: True/False, whether or not to check gradient
checkGradientEpochs: List of epochs to check the gradient at
continueFromSaved: Whether or not to continue from a saved state
currentEpoch
currentLearningRate
digits
initialLearningRate
minibatchSize
momentumCurrent
momentumFinal
momentumInitial
momentumT: Specifies the epoch number when momentum=momentumFinal
name: A descriptive name for the experiment
rateDecay
totalEpochs
Program parameters:
logToFile: True/False, whether to log experiment results to a file
logFile: The name of the log file (if applicable)
saveState: True/False, whether or not to save the state at a given time
saveStateInterval: Save the model every x epochs
saveStateUnique: T/F, Save a unique version of the model at each point
saveStateBaseName: The base of the saved state filename
To initialize an experiment, pass it a params.ini file specifying the value
for each option. An example params.ini file is given in this directory.
'''
def __init__(self, paramsFilename=None):
self.model = None
# Model parameters
self.layerSizes = None
self.activations = None
self.dropoutType = None
self.dropoutProb = None
self.dropoutInputProb = None
self.wLenLimit = None
# Experiment parameters
self.checkGradient = None
self.checkGradientEpochs = None
self.currentEpoch = None
self.currentLearningRate = None
self.datasetPath = None
self.initialLearningRate = None
self.minibatchSize = None
self.momentumCurrent = None
self.momentumFinal = None
self.momentumInitial = None
self.momentumT = None
self.name = None
self.rateDecay = None
self.totalEpochs = None
# Program parameters
self.continueFromSaved = None
self.logToFile = None
self.logFile = None
self.saveState = None
self.saveStateInterval = None
self.saveStateUnique = None
self.saveStateBaseName = None
if paramsFilename:
self.parseParamsFile(paramsFilename)
def initLog(self):
self.logFile = open(self.logFileName, "a")
def parseParamsFile(self, paramsFilename):
'''
Initializes all members based on the values in the given file.
'''
cfg = ConfigParser.ConfigParser()
cfg.read(paramsFilename)
# Model parameters
self.layerSizes = list(ast.literal_eval(cfg.get('model', 'layerSizes')))
self.activations = cfg.get('model', 'activations').split(',')
self.dropoutType = cfg.get('model', 'dropoutType')
if self.dropoutType == 'nodropout':
self.doDropout = False
else:
self.doDropout = True
self.dropoutProb = ast.literal_eval(cfg.get('model', 'dropoutProb'))
self.dropoutInputProb = ast.literal_eval(cfg.get('model', 'dropoutInputProb'))
self.wLenLimit = ast.literal_eval(cfg.get('model', 'wLenLimit'))
# Experiment parameters
self.checkGradient = ast.literal_eval(cfg.get('experiment', 'checkGradient'))
self.checkGradientEpochs = list(ast.literal_eval(cfg.get('experiment',
'checkGradientEpochs')))
self.digits = list(ast.literal_eval(cfg.get('experiment', 'digits')))
self.learningRate = ast.literal_eval(cfg.get('experiment', 'learningRate'))
self.currentLearningRate = self.learningRate
self.minibatchSize = ast.literal_eval(cfg.get('experiment', 'minibatchSize'))
self.datasetPath = cfg.get('experiment', 'datasetPath')
self.momentumFinal = ast.literal_eval(cfg.get('experiment', 'momentumFinal'))
self.momentumInitial = ast.literal_eval(cfg.get('experiment', 'momentumInitial'))
self.momentumCurrent = self.momentumInitial
self.momentumT = ast.literal_eval(cfg.get('experiment', 'momentumT'))
self.name = cfg.get('experiment', 'name')
self.rateDecay = ast.literal_eval(cfg.get('experiment', 'rateDecay'))
self.currentEpoch = 0
self.totalEpochs = ast.literal_eval(cfg.get('experiment', 'totalEpochs'))
# Program parameters
self.logToFile = ast.literal_eval(cfg.get('program', 'logToFile'))
self.logFileBaseName = cfg.get('program', 'logFileBaseName')
if self.logToFile:
dateStr = time.strftime('%Y-%m-%d_%H-%M')
# Add a UUID so we can track this experiment
uuidStr = str(uuid.uuid1())
self.logFileName = os.path.abspath(self.logFileBaseName+self.name+"_"+dateStr+"_"+uuidStr+".txt")
self.logFile = open(self.logFileName, "w")
self.logFile.write('Num. Errors Train,Num. Errors Test,learningRate,momentum,elapsedTime\n')
# Also copy the params over for posterity
paramsCopyStr = self.logFileBaseName+"params_"+str(uuidStr)+".ini"
shutil.copyfile(paramsFilename, paramsCopyStr)
self.saveState = ast.literal_eval(cfg.get('program', 'saveState'))
self.saveStateInterval = ast.literal_eval(cfg.get('program', 'saveStateInterval'))
self.saveStateUnique = ast.literal_eval(cfg.get('program', 'saveStateUnique'))
self.saveStateBaseName = cfg.get('program', 'saveStateBaseName')
def update(self):
'''
Updates all the paramters for the next epoch. Also saves the model.
'''
self.currentEpoch += 1
self.currentLearningRate = self.currentLearningRate*self.rateDecay
if self.currentEpoch < self.momentumT:
self.momentumCurrent = \
(1.0-float(self.currentEpoch)/self.momentumT)*self.momentumInitial + \
(float(self.currentEpoch)/self.momentumT)*self.momentumFinal
else:
self.momentumCurrent = self.momentumFinal
if self.saveState and self.currentEpoch%self.saveStateInterval==0:
modelFileName = os.path.join(self.saveStateBaseName, "model")
paramsFileName = os.path.join(self.saveStateBaseName, "params")
self.logFile.close()
self.logFile = None
if self.saveStateUnique:
dateStr = '_'+time.strftime('%Y-%m-%d_%H-%M-%S')
modelFileName += dateStr
paramsFileName += dateStr
modelFileName += '.pkl'
paramsFileName += '.pkl'
print "Saving to {0}...".format(paramsFileName)
save_state(self, paramsFileName)
self.logFile = open(self.logFileName, "a")
def log(self, logStr):
self.logFile.write(logStr)
self.logFile.flush()
def cleanup(self):
self.logFile.close()
def __getstate__(self): return self.__dict__
def __setstate__(self, d): self.__dict__.update(d)
def __str__(self):
s = "{:<40}{:<10}\n".format("Name", self.name)
s += "{:<40}{:<10}\n".format("Layer sizes", str(self.layerSizes))
s += "{:<40}{:<10}\n".format("Activations", str(self.activations))
s += "{:<40}{:<10}\n".format("Dropout type", str(self.dropoutType))
s += "{:<40}{:<10}\n".format("Dropout probability", str(self.dropoutProb))
s += "{:<40}{:<10}\n".format("Input dropout probability", str(self.dropoutInputProb))
s += "{:<40}{:<10}\n".format("Check gradient?", str(self.checkGradient))
s += "{:<40}{:<10}\n".format("Check gradient epochs", str(self.checkGradientEpochs))
s += "{:<40}{:<10}\n".format("MNIST digits", str(self.digits))
s += "{:<40}{:<10}\n".format("Learning rate", str(self.learningRate))
s += "{:<40}{:<10}\n".format("Minibatch size", str(self.minibatchSize))
s += "{:<40}{:<10}\n".format("Dataset path", str(self.datasetPath))
s += "{:<40}{:<10}\n".format("Initial momentum", str(self.momentumInitial))
s += "{:<40}{:<10}\n".format("Final momentum", str(self.momentumFinal))
s += "{:<40}{:<10}\n".format("Momentum T", str(self.momentumT))
s += "{:<40}{:<10}\n".format("Rate decay", str(self.rateDecay))
s += "{:<40}{:<10}\n".format("Total epochs", str(self.totalEpochs))
s += "{:<40}{:<10}\n".format("Weight limit", str(self.wLenLimit))
return s
class Layer:
def __init__(self, size, activation, d_activation):
self.W = 0.01*np.random.randn(size[0], size[1])
self.prev_Z = None
self.activation = activation
self.d_activation = d_activation
self.z = np.zeros((size[0], size[1]))
self.d = np.zeros((size[0], size[1]))
self.a = np.zeros((1, size[1]))
def random_dropout(self, dropoutProb):
if dropoutProb == 0:
return
d_idx = np.random.binomial(1, (1-dropoutProb), self.prevZ.shape[1]-1)
self.prevZ[:, 0:-1] = self.prevZ[:, 0:-1]*d_idx
def dpp_dropin_norm(self, dropoutProb):
if dropoutProb == 0:
return
W_n = self.W[0:-1, :]/(np.atleast_2d(np.linalg.norm(self.W[0:-1, :], axis=1)).T)
L = (W_n.dot(W_n.T))**2
D, V = dpp.decompose_kernel(L)
k = int(np.floor((1-dropoutProb)*self.W.shape[0]))
J = dpp.sample_k(k, D, V)
d_idx = np.zeros((self.W.shape[0]-1, 1))
d_idx[J.astype(int)] = 1
self.prevZ[:, 0:-1] = self.prevZ[:, 0:-1]*d_idx.T
def dpp_dropin(self, dropoutProb):
if dropoutProb == 0:
return
W_n = self.W[0:-1, :]
L = (W_n.dot(W_n.T))**2
D, V = dpp.decompose_kernel(L)
k = int(np.floor((1-dropoutProb)*self.W.shape[0]))
J = dpp.sample_k(k, D, V)
d_idx = np.zeros((self.W.shape[0]-1, 1))
d_idx[J.astype(int)] = 1
self.prevZ[:, 0:-1] = self.prevZ[:, 0:-1]*d_idx.T
def dpp_dropin_EY(self, dropoutProb):
if dropoutProb == 0:
return
p = (1-dropoutProb)
W_n = self.W[0:-1, :]
L = (W_n.dot(W_n.T))**2
D, V = dpp.decompose_kernel(L)
(kmax, d) = dpp.analyze_bDPP(D)
print kmax
if kmax>=(p*L.shape[0]):
J = dpp.sample_EY(D, V, p*L.shape[0])
d_idx = np.zeros((self.W.shape[0]-1, 1))
d_idx[J.astype(int)] = 1
self.prevZ[:, 0:-1] = self.prevZ[:, 0:-1]*d_idx.T
else:
self.random_dropout(dropoutProb)
def dpp_dropout_norm(self, dropoutProb):
if dropoutProb == 0:
return
W_n = self.W[0:-1, :]/(np.atleast_2d(np.linalg.norm(self.W[0:-1, :], axis=1)).T)
L = (W_n.dot(W_n.T))**2
D, V = dpp.decompose_kernel(L)
k = int(np.floor((1-dropoutProb)*self.W.shape[0]))
J = dpp.sample_k(k, D, V)
d_idx = np.ones((self.W.shape[0]-1, 1))
d_idx[J.astype(int)] = 0
self.prevZ[:, 0:-1] = self.prevZ[:, 0:-1]*d_idx.T
def dpp_dropout(self, dropoutProb):
if dropoutProb == 0:
return
W_n = self.W[0:-1, :]
L = (W_n.dot(W_n.T))**2
D, V = dpp.decompose_kernel(L)
k = int(np.floor((1-dropoutProb)*self.W.shape[0]))
J = dpp.sample_k(k, D, V)
d_idx = np.ones((self.W.shape[0]-1, 1))
d_idx[J.astype(int)] = 0
self.prevZ[:, 0:-1] = self.prevZ[:, 0:-1]*d_idx.T
def dpp_dropin_uniform(self, dropoutProb):
if dropoutProb == 0:
return
p = (1-dropoutProb)
L = (p/(1-p))*np.eye(self.W.shape[0]-1)
D, V = dpp.decompose_kernel(L)
J = dpp.sample(D, V)
d_idx = np.zeros((self.W.shape[0]-1, 1))
d_idx[J.astype(int)] = 1
self.prevZ[:, 0:-1] = self.prevZ[:, 0:-1]*d_idx.T
def compute_activation(self, X, doDropout=False, dropoutProb=0.5,
testing=False, dropoutSeed=None):
self.prevZ = np.copy(X)
# I think you should drop out columns here?
if doDropout:
# We are not testing, so do dropout
if not testing:
self.dropoutFunction(dropoutProb)
self.a = self.prevZ.dot(self.W)
# We are testing, so we don't do dropout but we do scale the weights
if testing:
self.a = self.prevZ[:, 0:-1].dot(self.W[0:-1, :]*(1-dropoutProb))
self.a += np.outer(self.prevZ[:, -1], self.W[-1, :])
else:
self.a = self.prevZ.dot(self.W)
self.z = self.activation(self.a)
self.d = self.d_activation(self.a)
return self.z
class MLP:
def __init__(self, params):
self.params = params
self.doDropout = params.doDropout
self.dropoutProb = params.dropoutProb
self.dropoutInputProb = params.dropoutInputProb
self.wLenLimit = params.wLenLimit
# Activations map - we need this to map from the strings in the *.ini file
# to actual function names
activationsMap = {'sigmoid': sigmoid,
'rectified_linear': rectified_linear,
'softmax': softmax}
d_activationsMap = {'sigmoid': d_sigmoid,
'rectified_linear': d_rectified_linear,
'softmax': d_softmax}
# Initialize each layer with the given parameters
self.layers = []
self.currentGrad = []
for i in range(0, len(params.layerSizes)-1):
size = [params.layerSizes[i]+1, params.layerSizes[i+1]]
activation = activationsMap[params.activations[i]]
d_activation = d_activationsMap[params.activations[i]]
l = Layer(size, activation, d_activation)
dropoutTypeMap = {'nodropout': None,
'dpp_dropout_norm': l.dpp_dropout_norm,
'dpp_dropout': l.dpp_dropout,
'dpp_dropin_norm': l.dpp_dropin_norm,
'dpp_dropin': l.dpp_dropin,
'dpp_dropin_uniform': l.dpp_dropin_uniform,
'dpp_dropin_EY': l.dpp_dropin_EY,
'random': l.random_dropout}
l.dropoutFunction = dropoutTypeMap[params.dropoutType]
self.layers.append(l)
self.currentGrad.append(np.zeros(size))
def __getstate__(self): return self.__dict__
def __setstate__(self, d): self.__dict__.update(d)
def forward_propagate(self, X, testing=False, dropoutSeeds=None):
x_l = np.atleast_2d(X)
for i in range(0, len(self.layers)):
x_l = np.append(x_l, np.ones((x_l.shape[0], 1)), 1)
if i==0: # We're at the input layer
if dropoutSeeds:
x_l = self.layers[i].compute_activation(x_l, self.doDropout,
self.dropoutInputProb, testing, dropoutSeeds[i])
else:
x_l = self.layers[i].compute_activation(x_l, self.doDropout,
self.dropoutInputProb, testing)
else:
if dropoutSeeds:
x_l = self.layers[i].compute_activation(x_l, self.doDropout, self.dropoutProb,
testing, dropoutSeeds[i])
else:
x_l = self.layers[i].compute_activation(x_l, self.doDropout, self.dropoutProb,
testing)
return x_l
def xent_cost(self, X, Y, Yhat):
E = np.array([0]).astype(np.float64)
for i in range(0, Y.shape[0]):
y = np.argmax(Y[i, :])
E -= np.log(Yhat[i, y]).astype(np.float64)
return E
def check_gradient(self, X, Y):
eps = 1E-4
dropoutSeeds = [232, 69, 75, 333]
output = self.forward_propagate(X, dropoutSeeds=dropoutSeeds)
W_grad = self.calculate_gradient(output, X, Y, params.currentLearningRate,
params.momentumCurrent)
W_initial = []
for i in range(0, len(self.layers)):
W_initial.append(np.copy(self.layers[i].W))
for i in range(0, len(self.layers)):
W = self.layers[i].W
print " Checking layer",i
layer_err = 0
for j in range(0, W.shape[0]):
for k in range(0, W.shape[1]):
self.layers[i].W[j,k] += eps
out_p = self.forward_propagate(X, dropoutSeeds=dropoutSeeds)
E_p = self.xent_cost(X, Y, out_p)
self.layers[i].W[j,k] = W_initial[i][j,k]
self.layers[i].W[j,k] -= eps
out_m = self.forward_propagate(X, dropoutSeeds=dropoutSeeds)
E_m = self.xent_cost(X, Y, out_m)
self.layers[i].W[j,k] = W_initial[i][j,k]
g_approx = (E_p-E_m)/(2*eps)
g_calc = W_grad[i][j,k]
err = abs(g_approx-g_calc)/(abs(g_approx)+abs(g_calc)+1E-10)
layer_err += err
if err>1E-3:
#if g_approx == 0 and g_calc != 0:
print " Gradient checking failed for ",i,j,k,g_approx,W_grad[i][j,k],E_p, E_m, err
bdp.progBar(j, self.layers[i].W.shape[0])
print layer_err
def calculate_gradient(self, output, X, Y):
# First set up the gradients
W_grad = []
for i in range(0, len(self.layers)):
W_grad.append( np.zeros(self.layers[i].W.shape) )
e = output-Y
# Backpropagate for each training example separately
deltas = [e.T]
for i in range(len(self.layers)-2, -1, -1):
W = self.layers[i+1].W[0:-1, :]
deltas.insert(0, np.multiply(self.layers[i].d.T, W.dot(deltas[0])))
for i in range(0, len(self.layers)):
W_grad[i] = (deltas[i].dot(self.layers[i].prevZ)).T
return W_grad
def backpropagate(self, output, X, Y):
W_grad = self.calculate_gradient(output, X, Y)
# Update the current gradient, and step in that direction
for i in range(0, len(self.layers)):
p = self.params.momentumCurrent
eta = self.params.currentLearningRate
self.currentGrad[i] = p*self.currentGrad[i] - (1.0-p)*eta*W_grad[i]
self.layers[i].W += self.currentGrad[i]
#self.previousGrad[i] = np.copy(self.currentGrad[i])
# Constrain the weights going to the hidden units if necessary
wLens = np.linalg.norm(self.layers[i].W, axis=0)**2
wLenCorrections = np.ones([1, self.layers[i].W.shape[1]])
wLenCorrections[0, np.where(wLens>self.wLenLimit)[0]] = wLens[wLens>self.wLenLimit]/self.wLenLimit
self.layers[i].W = self.layers[i].W/(np.sqrt(wLenCorrections))
# Propagate forward through the network, record the training error, train the
# weights with backpropagation
def train(self, X, Y):
output = self.forward_propagate(X, testing=False)
self.backpropagate(output, X, Y)
# Just pass the data forward through the network and return the predictions
# for the given miniBatch
def test(self, X):
Yhat = np.zeros((X.shape[0], self.layers[-1].W.shape[1]))
Yhat = self.forward_propagate(X, testing=True)
return Yhat
|
lgpl-3.0
|
andreif/django
|
tests/template_tests/syntax_tests/test_spaceless.py
|
521
|
1766
|
from django.test import SimpleTestCase
from ..utils import setup
class SpacelessTagTests(SimpleTestCase):
@setup({'spaceless01': "{% spaceless %} <b> <i> text </i> </b> {% endspaceless %}"})
def test_spaceless01(self):
output = self.engine.render_to_string('spaceless01')
self.assertEqual(output, "<b><i> text </i></b>")
@setup({'spaceless02': "{% spaceless %} <b> \n <i> text </i> \n </b> {% endspaceless %}"})
def test_spaceless02(self):
output = self.engine.render_to_string('spaceless02')
self.assertEqual(output, "<b><i> text </i></b>")
@setup({'spaceless03': "{% spaceless %}<b><i>text</i></b>{% endspaceless %}"})
def test_spaceless03(self):
output = self.engine.render_to_string('spaceless03')
self.assertEqual(output, "<b><i>text</i></b>")
@setup({'spaceless04': "{% spaceless %}<b> <i>{{ text }}</i> </b>{% endspaceless %}"})
def test_spaceless04(self):
output = self.engine.render_to_string('spaceless04', {'text': 'This & that'})
self.assertEqual(output, "<b><i>This & that</i></b>")
@setup({'spaceless05': "{% autoescape off %}{% spaceless %}"
"<b> <i>{{ text }}</i> </b>{% endspaceless %}"
"{% endautoescape %}"})
def test_spaceless05(self):
output = self.engine.render_to_string('spaceless05', {'text': 'This & that'})
self.assertEqual(output, "<b><i>This & that</i></b>")
@setup({'spaceless06': "{% spaceless %}<b> <i>{{ text|safe }}</i> </b>{% endspaceless %}"})
def test_spaceless06(self):
output = self.engine.render_to_string('spaceless06', {'text': 'This & that'})
self.assertEqual(output, "<b><i>This & that</i></b>")
|
bsd-3-clause
|
goldsborough/.emacs
|
.emacs.d/.python-environments/default/lib/python3.5/encodings/cp1250.py
|
272
|
13686
|
""" Python Character Mapping Codec cp1250 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1250.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1250',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\ufffe' # 0x83 -> UNDEFINED
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\ufffe' # 0x88 -> UNDEFINED
'\u2030' # 0x89 -> PER MILLE SIGN
'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u015a' # 0x8C -> LATIN CAPITAL LETTER S WITH ACUTE
'\u0164' # 0x8D -> LATIN CAPITAL LETTER T WITH CARON
'\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON
'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\ufffe' # 0x98 -> UNDEFINED
'\u2122' # 0x99 -> TRADE MARK SIGN
'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u015b' # 0x9C -> LATIN SMALL LETTER S WITH ACUTE
'\u0165' # 0x9D -> LATIN SMALL LETTER T WITH CARON
'\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON
'\u017a' # 0x9F -> LATIN SMALL LETTER Z WITH ACUTE
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u02c7' # 0xA1 -> CARON
'\u02d8' # 0xA2 -> BREVE
'\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE
'\xa4' # 0xA4 -> CURRENCY SIGN
'\u0104' # 0xA5 -> LATIN CAPITAL LETTER A WITH OGONEK
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\u02db' # 0xB2 -> OGONEK
'\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\u0105' # 0xB9 -> LATIN SMALL LETTER A WITH OGONEK
'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u013d' # 0xBC -> LATIN CAPITAL LETTER L WITH CARON
'\u02dd' # 0xBD -> DOUBLE ACUTE ACCENT
'\u013e' # 0xBE -> LATIN SMALL LETTER L WITH CARON
'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u0154' # 0xC0 -> LATIN CAPITAL LETTER R WITH ACUTE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u0139' # 0xC5 -> LATIN CAPITAL LETTER L WITH ACUTE
'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u011a' # 0xCC -> LATIN CAPITAL LETTER E WITH CARON
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u010e' # 0xCF -> LATIN CAPITAL LETTER D WITH CARON
'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
'\u0147' # 0xD2 -> LATIN CAPITAL LETTER N WITH CARON
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\u0158' # 0xD8 -> LATIN CAPITAL LETTER R WITH CARON
'\u016e' # 0xD9 -> LATIN CAPITAL LETTER U WITH RING ABOVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\u0170' # 0xDB -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\u0162' # 0xDE -> LATIN CAPITAL LETTER T WITH CEDILLA
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\u0155' # 0xE0 -> LATIN SMALL LETTER R WITH ACUTE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u013a' # 0xE5 -> LATIN SMALL LETTER L WITH ACUTE
'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\u011b' # 0xEC -> LATIN SMALL LETTER E WITH CARON
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u010f' # 0xEF -> LATIN SMALL LETTER D WITH CARON
'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
'\u0148' # 0xF2 -> LATIN SMALL LETTER N WITH CARON
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\u0159' # 0xF8 -> LATIN SMALL LETTER R WITH CARON
'\u016f' # 0xF9 -> LATIN SMALL LETTER U WITH RING ABOVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\u0171' # 0xFB -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\u0163' # 0xFE -> LATIN SMALL LETTER T WITH CEDILLA
'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
mit
|
TuinfeesT/TuinWolk
|
tuinwolk/tests/algorithm_test.py
|
1
|
5015
|
import random, string, sys
GROWTH_RATE = 2 # we only place repo's in locations that support at least GROWTH_RATE times the size of the repo
CHIP_RATE = 0.9 # if a repo has no locations that support GROWTH_RATE * repo.local_size, we chip off a bit off the desired size to see if we can find a repo anyway
MAX_SERVERS = 10
MAX_LOCATIONS = 10
MAX_REPOS = 10
__test__ = False
class GeoLoc:
def __init__(self, coords=("", "")):
self.coords = coords
class Server:
def __init__(self, ip, port, user, geoloc=GeoLoc()):#, locations=[]): self.geoloc = geoloc self.ip = ip
self.port = port
self.user = user
self.geoloc = geoloc
self.locations = []
def __str__(self):
return repr(self)
def __repr__(self):
s = '{ip}:{port:5}\n'.format(ip=self.ip, port=self.port)
for loc in self.locations:
s += '\t{loc}\n'.format(loc=loc)
for repo in loc.repos:
s += '\t\t{repo}\n'.format(repo=repo)
return s
class Repo:
def __init__(self, name, safe_mode, local_size, min_locations, locations, base_location=None):
self.name = name
self.safe_mode = safe_mode
self.local_size = local_size
self.min_locations = min_locations
self.locations = locations
self.base_location = base_location
def __str__(self):
return repr(self) #'%7s (%3dMB : %s)' %(self.name, self.local_size, self.min_locations)
def __repr__(self):
return '{0} ({1:5d}MB : {2} sites : based in {3})'.format(self.name, self.commit_size(), self.min_locations, self.base_location)
def commit_size(self):
return GROWTH_RATE * self.local_size
class Location:
def __init__(self, max_up, max_down, max_size, path, server):
self.max_up = max_up
self.max_down = max_down
self.max_size = max_size
self.path = path
self.server = server
self.server.locations.append(self)
self.repos = []
def __str__(self):
return repr(self) #'%s@%s:%d(%dMB)' % (self.user, self.ip, self.port, self.max_size)
def __repr__(self):
return '{path} @ {ip}({size_left:5}MB of {max_size:5}MB left)'.format(path=self.path, ip=self.server.ip, size_left=self.max_size - self.committed_size(), max_size=self.max_size)
def committed_size(self):
return sum([r.commit_size() for r in self.repos])
def main():
(servers, repos, locations) = init_test_data()
run = 0
while distribute(servers, repos, locations, run):
print_state(servers)
run += 1
def distribute(servers, repos, locations, run=0):
#below is version 0.2 of the distribution protocol.
placed = False
#set the base locations
for repo in repos:
if repo not in repo.base_location.repos:
repo.base_location.repos.append(repo)
if repo.base_location not in repo.locations:
repo.locations.append(repo.base_location)
repos = sorted(repos, key=lambda r : r.local_size, reverse=True)
locations = sorted(locations, key=lambda l : l.max_size - l.committed_size(), reverse=True)
for repo in repos:
possible_locations = find_possible_locations(repo, locations)
print("Will try to place {repo} in {min_loc} of {pos}".format(repo=repo, min_loc=repo.min_locations - len(repo.locations) if run == 0 else 1, pos=possible_locations))
for placing in (range(repo.min_locations - 1) if run == 0 else [0]):
if len(possible_locations) > 0:
print("Placing {repo} in {loc}".format(repo=repo, loc=possible_locations[-1]))
placed = True
repo.locations.append(possible_locations[-1])
possible_locations[-1].repos.append(repo)
possible_locations = possible_locations[:-1]
elif run == 0:
print('wine!')
break
return placed
def init_test_data():
servers = []
for i in range(MAX_SERVERS):
s = Server(ip='ip_' + str(i), port=random.randint(1024,65535), user=random_str(5))
servers.append(s)
locations = []
for i in range(MAX_LOCATIONS):
locations.append(Location(max_up=random.randint(0,500), max_down=random.randint(0,500), max_size=random.randint(1000,100000), path="/" + random_str(4), server=random.choice(servers)))
repos = []
for i in range(MAX_REPOS):
repos.append(Repo(name='repo_' + str(i), safe_mode=i % 3 == 0, local_size=int(1.02 ** random.randint(100,550)), min_locations=i % 3 + 1, locations=[]))
for repo in repos:
locs = find_possible_locations(repo, locations)
if len(locs) > 0:
loc = random.choice(locs)
repo.base_location=loc
repo.locations.append(repo.base_location)
loc.repos.append(repo)
else:
print("testset broken")
sys.exit(-1)
print_state(servers)
return (servers, repos, locations)
def random_str(number):
return ''.join(random.choice(string.ascii_letters + string.digits) for x in range(number))
def find_possible_locations(repo, locations):
result = []
for loc in locations:
serv_repos = []
for l in loc.server.locations:
serv_repos += l.repos
if repo not in serv_repos and (loc.max_size - loc.committed_size()) > repo.commit_size():
result.append(loc)
return result
def print_state(s):
print('Servers:')
if type(s) == list:
for serv in s:
print(serv)
else:
print(s)
print("*"*200)
if __name__ == '__main__':
main()
|
mit
|
doganov/edx-platform
|
openedx/core/djangoapps/user_api/preferences/views.py
|
60
|
10869
|
"""
NOTE: this API is WIP and has not yet been approved. Do not use this API
without talking to Christina or Andy.
For more information, see:
https://openedx.atlassian.net/wiki/display/TNL/User+API
"""
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import permissions
from django.db import transaction
from django.utils.translation import ugettext as _
from openedx.core.lib.api.authentication import (
SessionAuthenticationAllowInactiveUser,
OAuth2AuthenticationAllowInactiveUser,
)
from openedx.core.lib.api.parsers import MergePatchParser
from openedx.core.lib.api.permissions import IsUserInUrlOrStaff
from ..errors import UserNotFound, UserNotAuthorized, PreferenceValidationError, PreferenceUpdateError
from .api import (
get_user_preference, get_user_preferences, set_user_preference, update_user_preferences, delete_user_preference
)
class PreferencesView(APIView):
"""
**Use Cases**
Get or update the user's preference information. Updates are only
supported through merge patch. Preference values of null in a
patch request are treated as requests to remove the preference.
**Example Requests**
GET /api/user/v1/preferences/{username}/
PATCH /api/user/v1/preferences/{username}/ with content_type "application/merge-patch+json"
**Response Values for GET**
If no user exists with the specified username, an HTTP 404 "Not
Found" response is returned.
If a user without "is_staff" access requests preferences for a
different user, an HTTP 404 "Not Found" message is returned.
If the user makes the request for her own account, or makes a
request for another account and has "is_staff" access, an HTTP 200
"OK" response is returned. The response contains a JSON dictionary
with a key/value pair (of type String) for each preference.
The list of preferences depends on your implementation. By default,
the list includes the following preferences.
* account_privacy: The user's setting for sharing her personal
profile. Possible values are "all_users" or "private".
* pref-lan: The user's preferred language, as set in account
settings.
**Response Values for PATCH**
Users can only modify their own preferences. If the
requesting user does not have the specified username and has staff
access, the request returns an HTTP 403 "Forbidden" response. If
the requesting user does not have staff access, the request
returns an HTTP 404 "Not Found" response to avoid revealing the
existence of the account.
If no user exists with the specified username, an HTTP 404 "Not
Found" response is returned.
If "application/merge-patch+json" is not the specified content
type, a 415 "Unsupported Media Type" response is returned.
If validation errors prevent the update, this method returns a 400
"Bad Request" response that includes a "field_errors" field that
lists all error messages.
If a failure at the time of the update prevents the update, a 400
"Bad Request" error is returned. The JSON collection contains
specific errors.
If the update is successful, an HTTP 204 "No Content" response is
returned with no additional content.
"""
authentication_classes = (OAuth2AuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser)
permission_classes = (permissions.IsAuthenticated, IsUserInUrlOrStaff)
parser_classes = (MergePatchParser,)
def get(self, request, username):
"""
GET /api/user/v1/preferences/{username}/
"""
try:
user_preferences = get_user_preferences(request.user, username=username)
except UserNotAuthorized:
return Response(status=status.HTTP_403_FORBIDDEN)
except UserNotFound:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response(user_preferences)
def patch(self, request, username):
"""
PATCH /api/user/v1/preferences/{username}/
"""
if not request.data or not getattr(request.data, "keys", None):
error_message = _("No data provided for user preference update")
return Response(
{
"developer_message": error_message,
"user_message": error_message
},
status=status.HTTP_400_BAD_REQUEST
)
try:
with transaction.atomic():
update_user_preferences(request.user, request.data, user=username)
except UserNotAuthorized:
return Response(status=status.HTTP_403_FORBIDDEN)
except UserNotFound:
return Response(status=status.HTTP_404_NOT_FOUND)
except PreferenceValidationError as error:
return Response(
{"field_errors": error.preference_errors},
status=status.HTTP_400_BAD_REQUEST
)
except PreferenceUpdateError as error:
return Response(
{
"developer_message": error.developer_message,
"user_message": error.user_message
},
status=status.HTTP_400_BAD_REQUEST
)
return Response(status=status.HTTP_204_NO_CONTENT)
class PreferencesDetailView(APIView):
"""
**Use Cases**
Get, create, update, or delete a specific user preference.
**Example Requests**
GET /api/user/v1/preferences/{username}/{preference_key}
PUT /api/user/v1/preferences/{username}/{preference_key}
DELETE /api/user/v1/preferences/{username}/{preference_key}
**Response Values for GET**
If the specified username or preference does not exist, an HTTP
404 "Not Found" response is returned.
If a user without "is_staff" access requests preferences for a
different user, a 404 error is returned.
If the user makes the request for her own account, or makes a
request for another account and has "is_staff" access, an HTTP 200
"OK" response is returned that contains a JSON string.
**Response Values for PUT**
Users can only modify their own preferences. If the
requesting user does not have the specified username and has staff
access, the request returns an HTTP 403 "Forbidden" response. If
the requesting user does not have staff access, the request
returns an HTTP 404 "Not Found" response to avoid revealing the
existence of the account.
If the specified preference does not exist, an HTTP 404 "Not
Found" response is returned.
If the request is successful, a 204 "No Content" status is returned
with no additional content.
**Response Values for DELETE**
Users can only delete their own preferences. If the
requesting user does not have the specified username and has staff
access, the request returns an HTTP 403 "Forbidden" response. If
the requesting user does not have staff access, the request
returns an HTTP 404 "Not Found" response to avoid revealing the
existence of the account.
If the specified preference does not exist, an HTTP 404 "Not
Found" response is returned.
If the update is successful, an HTTP 204 "No Content" response is
returned with no additional content.
"""
authentication_classes = (OAuth2AuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser)
permission_classes = (permissions.IsAuthenticated, IsUserInUrlOrStaff)
def get(self, request, username, preference_key):
"""
GET /api/user/v1/preferences/{username}/{preference_key}
"""
try:
value = get_user_preference(request.user, preference_key, username=username)
# There was no preference with that key, raise a 404.
if value is None:
return Response(status=status.HTTP_404_NOT_FOUND)
except UserNotAuthorized:
return Response(status=status.HTTP_403_FORBIDDEN)
except UserNotFound:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response(value)
def put(self, request, username, preference_key):
"""
PUT /api/user/v1/preferences/{username}/{preference_key}
"""
try:
set_user_preference(request.user, preference_key, request.data, username=username)
except UserNotAuthorized:
return Response(status=status.HTTP_403_FORBIDDEN)
except UserNotFound:
return Response(status=status.HTTP_404_NOT_FOUND)
except PreferenceValidationError as error:
return Response(
{
"developer_message": error.preference_errors[preference_key]["developer_message"],
"user_message": error.preference_errors[preference_key]["user_message"]
},
status=status.HTTP_400_BAD_REQUEST
)
except PreferenceUpdateError as error:
return Response(
{
"developer_message": error.developer_message,
"user_message": error.user_message
},
status=status.HTTP_400_BAD_REQUEST
)
return Response(status=status.HTTP_204_NO_CONTENT)
def delete(self, request, username, preference_key):
"""
DELETE /api/user/v1/preferences/{username}/{preference_key}
"""
try:
preference_existed = delete_user_preference(request.user, preference_key, username=username)
except UserNotAuthorized:
return Response(status=status.HTTP_403_FORBIDDEN)
except UserNotFound:
return Response(status=status.HTTP_404_NOT_FOUND)
except PreferenceUpdateError as error:
return Response(
{
"developer_message": error.developer_message,
"user_message": error.user_message
},
status=status.HTTP_400_BAD_REQUEST
)
if not preference_existed:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response(status=status.HTTP_204_NO_CONTENT)
|
agpl-3.0
|
christophlsa/odoo
|
openerp/cli/server.py
|
187
|
5869
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
OpenERP - Server
OpenERP is an ERP+CRM program for small and medium businesses.
The whole source code is distributed under the terms of the
GNU Public Licence.
(c) 2003-TODAY, Fabien Pinckaers - OpenERP SA
"""
import atexit
import csv
import logging
import os
import signal
import sys
import threading
import traceback
import time
import openerp
from . import Command
__author__ = openerp.release.author
__version__ = openerp.release.version
# Also use the `openerp` logger for the main script.
_logger = logging.getLogger('openerp')
def check_root_user():
""" Exit if the process's user is 'root' (on POSIX system)."""
if os.name == 'posix':
import pwd
if pwd.getpwuid(os.getuid())[0] == 'root' :
sys.stderr.write("Running as user 'root' is a security risk, aborting.\n")
sys.exit(1)
def check_postgres_user():
""" Exit if the configured database user is 'postgres'.
This function assumes the configuration has been initialized.
"""
config = openerp.tools.config
if config['db_user'] == 'postgres':
sys.stderr.write("Using the database user 'postgres' is a security risk, aborting.")
sys.exit(1)
def report_configuration():
""" Log the server version and some configuration values.
This function assumes the configuration has been initialized.
"""
config = openerp.tools.config
_logger.info("OpenERP version %s", __version__)
for name, value in [('addons paths', openerp.modules.module.ad_paths),
('database hostname', config['db_host'] or 'localhost'),
('database port', config['db_port'] or '5432'),
('database user', config['db_user'])]:
_logger.info("%s: %s", name, value)
def rm_pid_file():
config = openerp.tools.config
if not openerp.evented and config['pidfile']:
try:
os.unlink(config['pidfile'])
except OSError:
pass
def setup_pid_file():
""" Create a file with the process id written in it.
This function assumes the configuration has been initialized.
"""
config = openerp.tools.config
if not openerp.evented and config['pidfile']:
with open(config['pidfile'], 'w') as fd:
pidtext = "%d" % (os.getpid())
fd.write(pidtext)
atexit.register(rm_pid_file)
def export_translation():
config = openerp.tools.config
dbname = config['db_name']
if config["language"]:
msg = "language %s" % (config["language"],)
else:
msg = "new language"
_logger.info('writing translation file for %s to %s', msg,
config["translate_out"])
fileformat = os.path.splitext(config["translate_out"])[-1][1:].lower()
with open(config["translate_out"], "w") as buf:
registry = openerp.modules.registry.RegistryManager.new(dbname)
with openerp.api.Environment.manage():
with registry.cursor() as cr:
openerp.tools.trans_export(config["language"],
config["translate_modules"] or ["all"], buf, fileformat, cr)
_logger.info('translation file written successfully')
def import_translation():
config = openerp.tools.config
context = {'overwrite': config["overwrite_existing_translations"]}
dbname = config['db_name']
registry = openerp.modules.registry.RegistryManager.new(dbname)
with openerp.api.Environment.manage():
with registry.cursor() as cr:
openerp.tools.trans_load(
cr, config["translate_in"], config["language"], context=context,
)
def main(args):
check_root_user()
openerp.tools.config.parse_config(args)
check_postgres_user()
report_configuration()
config = openerp.tools.config
# the default limit for CSV fields in the module is 128KiB, which is not
# quite sufficient to import images to store in attachment. 500MiB is a
# bit overkill, but better safe than sorry I guess
csv.field_size_limit(500 * 1024 * 1024)
if config["test_file"]:
config["test_enable"] = True
if config["translate_out"]:
export_translation()
sys.exit(0)
if config["translate_in"]:
import_translation()
sys.exit(0)
# This needs to be done now to ensure the use of the multiprocessing
# signaling mecanism for registries loaded with -d
if config['workers']:
openerp.multi_process = True
preload = []
if config['db_name']:
preload = config['db_name'].split(',')
stop = config["stop_after_init"]
setup_pid_file()
rc = openerp.service.server.start(preload=preload, stop=stop)
sys.exit(rc)
class Server(Command):
"""Start the odoo server (default command)"""
def run(self, args):
main(args)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
kuriositeetti/wamp-tikki
|
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/universaldetector.py
|
1776
|
6840
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM_UTF8:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
|
mit
|
Brazelton-Lab/bio_utils
|
bio_utils/blast_tools/filter_b6_evalue.py
|
1
|
3410
|
#! /usr/bin/env python3
from __future__ import print_function
"""Writes lines from the B6/M8 file under the given E-value to output
Usage:
b6_evalue_filter.py --b6 <b6 file> --e_value <max e_value>
--output <output file>
Copyright:
filter_b6_evalue.py filter lines of B6/M8 file by e-value
Copyright (C) 2015 William Brazelton, Alex Hyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
from bio_utils.iterators import B6Reader
import sys
__author__ = 'William Brazelton, Alex Hyer'
__email__ = '[email protected]'
__license__ = 'GPLv3'
__maintainer__ = 'Alex Hyer'
__status__ = 'Production'
__version__ = '2.0.1'
def b6_evalue_filter(handle, e_value, *args, **kwargs):
"""Yields lines from handle with E-value less than or equal to e_value
Args:
handle (file): B6/M8 file handle, can be any iterator so long as it
it returns subsequent "lines" of a B6/M8 entry
e_value (float): max E-value to return
*args: Variable length argument list for b6_iter
**kwargs: Arbitrary keyword arguments for b6_iter
Yields:
B6Entry: class containing all B6/M8 data
Example:
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> b6_handle = open('test.b6')
>>> for entry in b6_evalue_filter(b6_handle, 1e5)
... print(entry.evalue) # Print E-value of filtered entry
"""
b6_reader = B6Reader(handle)
for entry in b6_reader.iterate(*args, **kwargs):
if entry.evalue <= e_value:
yield entry
def main():
"""Open B6/M8 file, filter entries by E-Value, and write said entries"""
for entry in b6_evalue_filter(args.b6, args.e_value):
args.output.write(entry.write())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.
RawDescriptionHelpFormatter)
parser.add_argument('-b', '--b6',
nargs='?',
type=argparse.FileType('rU'),
default=sys.stdin,
help='M8 (B6 in BLAST+) file with alignment data'
'[Default: STDIN]')
parser.add_argument('-e', '--e_value',
type=float,
help='upper E-Value cutoff')
parser.add_argument('-o', '--output',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout,
help='optional output file [Default: STDOUT]')
args = parser.parse_args()
main()
sys.exit(0)
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.