repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
mpenning/exscript | tests/Exscript/FileLoggerTest.py | 6 | 3087 | import sys, unittest, re, os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'src'))
from tempfile import mkdtemp
from shutil import rmtree
from Exscript import Host
from Exscript.FileLogger import FileLogger
from LoggerTest import LoggerTest, FakeJob
class FakeError(Exception):
pass
class FileLoggerTest(LoggerTest):
CORRELATE = FileLogger
def setUp(self):
self.tempdir = mkdtemp()
self.logdir = os.path.join(self.tempdir, 'non-existent')
self.logger = FileLogger(self.logdir, clearmem = False)
self.job = FakeJob('fake')
self.logfile = os.path.join(self.logdir, 'fake.log')
self.errfile = self.logfile + '.error'
def tearDown(self):
LoggerTest.tearDown(self)
rmtree(self.tempdir)
def testConstructor(self):
self.assert_(os.path.isdir(self.tempdir))
self.failIf(os.path.exists(self.logfile))
self.failIf(os.path.exists(self.errfile))
def testAddLog(self):
log = LoggerTest.testAddLog(self)
self.assert_(os.path.isfile(self.logfile), 'No such file: ' + self.logfile)
self.failIf(os.path.exists(self.errfile))
return log
def testLog(self):
log = LoggerTest.testLog(self)
self.assert_(os.path.isfile(self.logfile))
self.failIf(os.path.exists(self.errfile))
return log
def testLogAborted(self):
log = LoggerTest.testLogAborted(self)
self.assert_(os.path.isfile(self.logfile))
self.assert_(os.path.isfile(self.errfile))
return log
def testLogSucceeded(self):
log = LoggerTest.testLogSucceeded(self)
self.assert_(os.path.isfile(self.logfile))
self.failIf(os.path.isfile(self.errfile))
return log
def testAddLog2(self):
# Like testAddLog(), but with attempt = 2.
self.logfile = os.path.join(self.logdir, self.job.name + '_retry1.log')
self.errfile = self.logfile + '.error'
self.failIf(os.path.exists(self.logfile))
self.failIf(os.path.exists(self.errfile))
self.logger.add_log(id(self.job), self.job.name, 2)
self.assert_(os.path.isfile(self.logfile))
self.failIf(os.path.exists(self.errfile))
content = open(self.logfile).read()
self.assertEqual(content, '')
def testLog2(self):
# Like testLog(), but with attempt = 2.
self.testAddLog2()
self.logger.log(id(self.job), 'hello world')
self.assert_(os.path.isfile(self.logfile))
self.failIf(os.path.exists(self.errfile))
content = open(self.logfile).read()
self.assertEqual(content, 'hello world')
def testLogSucceeded2(self):
# With attempt = 2.
self.testLog2()
self.logger.log_succeeded(id(self.job))
self.assert_(os.path.isfile(self.logfile))
self.failIf(os.path.exists(self.errfile))
def suite():
return unittest.TestLoader().loadTestsFromTestCase(FileLoggerTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity = 2).run(suite())
| gpl-2.0 | 1,653,137,395,347,908,400 | 34.079545 | 83 | 0.643667 | false |
Suninus/NewsBlur | apps/profile/migrations/0019_dashboard_date.py | 18 | 6206 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Profile.dashboard_date'
db.add_column('profile_profile', 'dashboard_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now), keep_default=False)
def backwards(self, orm):
# Deleting field 'Profile.dashboard_date'
db.delete_column('profile_profile', 'dashboard_date')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profile.profile': {
'Meta': {'object_name': 'Profile'},
'collapsed_folders': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'dashboard_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feed_pane_size': ('django.db.models.fields.IntegerField', [], {'default': '240'}),
'has_found_friends': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'has_setup_feeds': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'has_trained_intelligence': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'hide_getting_started': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'hide_mobile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_premium': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_seen_ip': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'last_seen_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'preferences': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'secret_token': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'send_emails': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'stripe_4_digits': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'stripe_id': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'timezone': ('vendor.timezones.fields.TimeZoneField', [], {'default': "'America/New_York'", 'max_length': '100'}),
'tutorial_finished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'view_settings': ('django.db.models.fields.TextField', [], {'default': "'{}'"})
}
}
complete_apps = ['profile']
| mit | -1,951,755,805,177,654,300 | 72.880952 | 182 | 0.564131 | false |
gianina-ingenuity/titanium-branch-deep-linking | testbed/x/mobilesdk/osx/5.5.1.GA/common/markdown/preprocessors.py | 112 | 7128 |
"""
PRE-PROCESSORS
=============================================================================
Preprocessors work on source text before we start doing anything too
complicated.
"""
import re
import markdown
HTML_PLACEHOLDER_PREFIX = markdown.STX+"wzxhzdk:"
HTML_PLACEHOLDER = HTML_PLACEHOLDER_PREFIX + "%d" + markdown.ETX
class Processor:
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class Preprocessor (Processor):
"""
Preprocessors are run after the text is broken into lines.
Each preprocessor implements a "run" method that takes a pointer to a
list of lines of the document, modifies it as necessary and returns
either the same pointer or a pointer to a new list.
Preprocessors must extend markdown.Preprocessor.
"""
def run(self, lines):
"""
Each subclass of Preprocessor should override the `run` method, which
takes the document as a list of strings split by newlines and returns
the (possibly modified) list of lines.
"""
pass
class HtmlStash:
"""
This class is used for stashing HTML objects that we extract
in the beginning and replace with place-holders.
"""
def __init__ (self):
""" Create a HtmlStash. """
self.html_counter = 0 # for counting inline html segments
self.rawHtmlBlocks=[]
def store(self, html, safe=False):
"""
Saves an HTML segment for later reinsertion. Returns a
placeholder string that needs to be inserted into the
document.
Keyword arguments:
* html: an html segment
* safe: label an html segment as safe for safemode
Returns : a placeholder string
"""
self.rawHtmlBlocks.append((html, safe))
placeholder = HTML_PLACEHOLDER % self.html_counter
self.html_counter += 1
return placeholder
def reset(self):
self.html_counter = 0
self.rawHtmlBlocks = []
class HtmlBlockPreprocessor(Preprocessor):
"""Remove html blocks from the text and store them for later retrieval."""
right_tag_patterns = ["</%s>", "%s>"]
def _get_left_tag(self, block):
return block[1:].replace(">", " ", 1).split()[0].lower()
def _get_right_tag(self, left_tag, block):
for p in self.right_tag_patterns:
tag = p % left_tag
i = block.rfind(tag)
if i > 2:
return tag.lstrip("<").rstrip(">"), i + len(p)-2 + len(left_tag)
return block.rstrip()[-len(left_tag)-2:-1].lower(), len(block)
def _equal_tags(self, left_tag, right_tag):
if left_tag == 'div' or left_tag[0] in ['?', '@', '%']: # handle PHP, etc.
return True
if ("/" + left_tag) == right_tag:
return True
if (right_tag == "--" and left_tag == "--"):
return True
elif left_tag == right_tag[1:] \
and right_tag[0] != "<":
return True
else:
return False
def _is_oneliner(self, tag):
return (tag in ['hr', 'hr/'])
def run(self, lines):
text = "\n".join(lines)
new_blocks = []
text = text.split("\n\n")
items = []
left_tag = ''
right_tag = ''
in_tag = False # flag
while text:
block = text[0]
if block.startswith("\n"):
block = block[1:]
text = text[1:]
if block.startswith("\n"):
block = block[1:]
if not in_tag:
if block.startswith("<"):
left_tag = self._get_left_tag(block)
right_tag, data_index = self._get_right_tag(left_tag, block)
if block[1] == "!":
# is a comment block
left_tag = "--"
right_tag, data_index = self._get_right_tag(left_tag, block)
# keep checking conditions below and maybe just append
if data_index < len(block) \
and markdown.isBlockLevel(left_tag):
text.insert(0, block[data_index:])
block = block[:data_index]
if not (markdown.isBlockLevel(left_tag) \
or block[1] in ["!", "?", "@", "%"]):
new_blocks.append(block)
continue
if self._is_oneliner(left_tag):
new_blocks.append(block.strip())
continue
if block.rstrip().endswith(">") \
and self._equal_tags(left_tag, right_tag):
new_blocks.append(
self.markdown.htmlStash.store(block.strip()))
continue
else: #if not block[1] == "!":
# if is block level tag and is not complete
if markdown.isBlockLevel(left_tag) or left_tag == "--" \
and not block.rstrip().endswith(">"):
items.append(block.strip())
in_tag = True
else:
new_blocks.append(
self.markdown.htmlStash.store(block.strip()))
continue
new_blocks.append(block)
else:
items.append(block.strip())
right_tag, data_index = self._get_right_tag(left_tag, block)
if self._equal_tags(left_tag, right_tag):
# if find closing tag
in_tag = False
new_blocks.append(
self.markdown.htmlStash.store('\n\n'.join(items)))
items = []
if items:
new_blocks.append(self.markdown.htmlStash.store('\n\n'.join(items)))
new_blocks.append('\n')
new_text = "\n\n".join(new_blocks)
return new_text.split("\n")
class ReferencePreprocessor(Preprocessor):
""" Remove reference definitions from text and store for later use. """
RE = re.compile(r'^(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)$', re.DOTALL)
def run (self, lines):
new_text = [];
for line in lines:
m = self.RE.match(line)
if m:
id = m.group(2).strip().lower()
t = m.group(4).strip() # potential title
if not t:
self.markdown.references[id] = (m.group(3), t)
elif (len(t) >= 2
and (t[0] == t[-1] == "\""
or t[0] == t[-1] == "\'"
or (t[0] == "(" and t[-1] == ")") ) ):
self.markdown.references[id] = (m.group(3), t[1:-1])
else:
new_text.append(line)
else:
new_text.append(line)
return new_text #+ "\n"
| mit | 6,484,113,970,539,729,000 | 32.153488 | 84 | 0.483165 | false |
r-mart/scikit-learn | examples/tree/plot_tree_regression.py | 206 | 1476 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause | 1,004,995,304,244,559,700 | 29.122449 | 73 | 0.648374 | false |
arista-eosplus/ansible | test/units/module_utils/basic/test_exit_json.py | 18 | 7252 | # -*- coding: utf-8 -*-
# (c) 2015, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import copy
import json
import sys
from ansible.compat.tests import unittest
from ansible.module_utils import basic
from units.mock.procenv import swap_stdin_and_argv, swap_stdout
empty_invocation = {u'module_args': {}}
class TestAnsibleModuleExitJson(unittest.TestCase):
def setUp(self):
args = json.dumps(dict(ANSIBLE_MODULE_ARGS={}))
self.stdin_swap_ctx = swap_stdin_and_argv(stdin_data=args)
self.stdin_swap_ctx.__enter__()
# since we can't use context managers and "with" without overriding run(), call them directly
self.stdout_swap_ctx = swap_stdout()
self.fake_stream = self.stdout_swap_ctx.__enter__()
basic._ANSIBLE_ARGS = None
self.module = basic.AnsibleModule(argument_spec=dict())
def tearDown(self):
# since we can't use context managers and "with" without overriding run(), call them directly to clean up
self.stdin_swap_ctx.__exit__(None, None, None)
self.stdout_swap_ctx.__exit__(None, None, None)
def test_exit_json_no_args_exits(self):
with self.assertRaises(SystemExit) as ctx:
self.module.exit_json()
if isinstance(ctx.exception, int):
# Python2.6... why does sys.exit behave this way?
self.assertEquals(ctx.exception, 0)
else:
self.assertEquals(ctx.exception.code, 0)
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(changed=False, invocation=empty_invocation))
def test_exit_json_args_exits(self):
with self.assertRaises(SystemExit) as ctx:
self.module.exit_json(msg='message')
if isinstance(ctx.exception, int):
# Python2.6... why does sys.exit behave this way?
self.assertEquals(ctx.exception, 0)
else:
self.assertEquals(ctx.exception.code, 0)
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(msg="message", changed=False, invocation=empty_invocation))
def test_fail_json_exits(self):
with self.assertRaises(SystemExit) as ctx:
self.module.fail_json(msg='message')
if isinstance(ctx.exception, int):
# Python2.6... why does sys.exit behave this way?
self.assertEquals(ctx.exception, 1)
else:
self.assertEquals(ctx.exception.code, 1)
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(msg="message", changed=False, failed=True, invocation=empty_invocation))
def test_exit_json_proper_changed(self):
with self.assertRaises(SystemExit) as ctx:
self.module.exit_json(changed=True, msg='success')
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(changed=True, msg='success', invocation=empty_invocation))
class TestAnsibleModuleExitValuesRemoved(unittest.TestCase):
OMIT = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
dataset = (
(
dict(username='person', password='$ecret k3y'),
dict(one=1, pwd='$ecret k3y', url='https://username:[email protected]/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd=OMIT, url='https://username:[email protected]/login/',
not_secret='following the leader', changed=False, msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
(
dict(username='person', password='password12345'),
dict(one=1, pwd='$ecret k3y', url='https://username:[email protected]/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd='$ecret k3y', url='https://username:********@foo.com/login/',
not_secret='following the leader', changed=False, msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
(
dict(username='person', password='$ecret k3y'),
dict(one=1, pwd='$ecret k3y', url='https://username:$ecret [email protected]/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd=OMIT, url='https://username:********@foo.com/login/',
not_secret='following the leader', changed=False, msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
)
def test_exit_json_removes_values(self):
self.maxDiff = None
for args, return_val, expected in self.dataset:
params = dict(ANSIBLE_MODULE_ARGS=args)
params = json.dumps(params)
with swap_stdin_and_argv(stdin_data=params):
with swap_stdout():
basic._ANSIBLE_ARGS = None
module = basic.AnsibleModule(
argument_spec=dict(
username=dict(),
password=dict(no_log=True),
token=dict(no_log=True),
),
)
with self.assertRaises(SystemExit) as ctx:
self.assertEquals(module.exit_json(**return_val), expected)
self.assertEquals(json.loads(sys.stdout.getvalue()), expected)
def test_fail_json_removes_values(self):
self.maxDiff = None
for args, return_val, expected in self.dataset:
expected = copy.deepcopy(expected)
expected['failed'] = True
params = dict(ANSIBLE_MODULE_ARGS=args)
params = json.dumps(params)
with swap_stdin_and_argv(stdin_data=params):
with swap_stdout():
basic._ANSIBLE_ARGS = None
module = basic.AnsibleModule(
argument_spec=dict(
username=dict(),
password=dict(no_log=True),
token=dict(no_log=True),
),
)
with self.assertRaises(SystemExit) as ctx:
self.assertEquals(module.fail_json(**return_val), expected)
self.assertEquals(json.loads(sys.stdout.getvalue()), expected)
| gpl-3.0 | -4,311,280,825,724,075,500 | 44.043478 | 115 | 0.603558 | false |
jendap/tensorflow | tensorflow/python/util/dispatch.py | 22 | 6842 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Type-based dispatch for TensorFlow ops.
"Operation dispatchers" can be used to override the behavior for TensorFlow ops
when they are called with otherwise unsupported argument types. In particular,
when an operation is called with arguments that would cause it to raise a
TypeError, it falls back on its registered operation dispatchers. If any
registered dispatchers can handle the arguments, then its result is returned.
Otherwise, the original TypeError is raised.
By default, dispatch support is added to the generated op wrappers for any
visible ops by default. Ops that are implemented in Python can opt in to
dispatch support using the `add_dispatch_support` decorator.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
# Private function attribute used to store a list of dispatchers.
DISPATCH_ATTR = "_tf_dispatchers"
class OpDispatcher(object):
"""Abstract base class for TensorFlow operator dispatchers.
Each operation dispatcher acts as an override handler for a single
TensorFlow operation, and its results are used when the handler indicates
that it can handle the operation's arguments (by returning any value other
than `OpDispatcher.NOT_SUPPORTED`).
"""
# Sentinel value that can be returned to indicate that an operation
# dispatcher does not support a given set of arguments.
NOT_SUPPORTED = object()
def handle(self, args, kwargs): # pylint: disable=unused-argument
"""Handle this dispatcher's operation with the specified arguments.
If this operation dispatcher can handle the given arguments, then
return an appropriate value (or raise an appropriate exception).
Args:
args: The arguments to the operation.
kwargs: They keyword arguments to the operation.
Returns:
The result of the operation, or `OpDispatcher.NOT_SUPPORTED` if this
dispatcher can not handle the given arguments.
"""
return self.NOT_SUPPORTED
def register(self, op):
"""Register this dispatcher as a handler for `op`.
Args:
op: Python function: the TensorFlow operation that should be handled. Must
have a dispatch list (which is added automatically for generated ops,
and can be added to Python ops using the `add_dispatch_support`
decorator).
"""
if not hasattr(op, DISPATCH_ATTR):
raise AssertionError("Dispatching not enabled for %s" % op)
getattr(op, DISPATCH_ATTR).append(self)
def dispatch(op, *args, **kwargs):
"""Returns the result from the first successful dispatcher for a given op.
Calls the `handle` method of each `OpDispatcher` that has been registered
to handle `op`, and returns the value from the first successful handler.
Args:
op: Python function: the operation to dispatch for.
*args: The arguments to the operation.
**kwargs: They keyword arguments to the operation.
Returns:
The result of the operation, or `NOT_SUPPORTED` if no registered
dispatcher can handle the given arguments.
"""
for dispatcher in getattr(op, DISPATCH_ATTR):
result = dispatcher.handle(args, kwargs)
if result is not OpDispatcher.NOT_SUPPORTED:
return result
return OpDispatcher.NOT_SUPPORTED
class _TypeBasedDispatcher(OpDispatcher):
"""Dispatcher that handles op if any arguments have a specified type.
Checks the types of the arguments and keyword arguments (including elements
of lists or tuples), and if any argument values have the indicated type(s),
then delegates to an override function.
"""
def __init__(self, override_func, types):
self._types = types
self._override_func = override_func
def _handles(self, args, kwargs):
for arg in itertools.chain(args, kwargs.values()):
if (isinstance(arg, self._types) or
(isinstance(arg, (list, tuple)) and
any(isinstance(elt, self._types) for elt in arg))):
return True
return False
def handle(self, args, kwargs):
if self._handles(args, kwargs):
return self._override_func(*args, **kwargs)
else:
return self.NOT_SUPPORTED
# pylint: disable=g-doc-return-or-yield
def dispatch_for_types(op, *types):
"""Decorator to declare that a Python function overrides an op for a type.
The decorated function is used to override `op` if any of the arguments or
keyword arguments (including elements of lists or tuples) have one of the
specified types.
Example:
```python
@dispatch_for_types(math_ops.add, RaggedTensor, RaggedTensorValue)
def ragged_add(x, y, name=None): ...
```
Args:
op: Python function: the operation that should be overridden.
*types: The argument types for which this function should be used.
"""
def decorator(func):
if tf_inspect.getargspec(func) != tf_inspect.getargspec(op):
raise AssertionError("The decorated function's signature must exactly "
"match the signature of the overridden op.")
_TypeBasedDispatcher(func, types).register(op)
return func
return decorator
# pylint: enable=g-doc-return-or-yield
def add_dispatch_list(target):
"""Decorator that adds a dispatch_list attribute to an op."""
if hasattr(target, DISPATCH_ATTR):
raise AssertionError("%s already has a dispatch list" % target)
setattr(target, DISPATCH_ATTR, [])
return target
def add_dispatch_support(target):
"""Decorator that adds a dispatch handling wrapper to an op."""
def wrapper(*args, **kwargs):
"""Call target, and fall back on dispatchers if there is a TypeError."""
try:
return target(*args, **kwargs)
except (TypeError, ValueError):
# Note: convert_to_eager_tensor currently raises a ValueError, not a
# TypeError, when given unexpected types. So we need to catch both.
result = dispatch(wrapper, *args, **kwargs)
if result is not OpDispatcher.NOT_SUPPORTED:
return result
else:
raise
add_dispatch_list(wrapper)
return tf_decorator.make_decorator(target, wrapper)
| apache-2.0 | -307,418,906,811,884,800 | 34.82199 | 80 | 0.715726 | false |
ClydeSpace-GroundStation/GroundStation | Utilities/Supporting_Libraries/gr-bruninga-master/python/fsk_demod.py | 1 | 5330 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import digital
from gnuradio import filter
from gnuradio.filter import firdes
from scipy import signal
import math
import bruninga
class fsk_demod(gr.hier_block2):
"""
Tidied up version of the demodulator found in examples/aprs-rx.grc
samp_rate should be the incoming audio sample rate.
"""
def __init__(self, inc_samp_rate):
gr.hier_block2.__init__(self,
"fsk_demod",
gr.io_signature(1, 1, gr.sizeof_float), # Input signature
gr.io_signature(1, 1, gr.sizeof_char) # Output signature
)
self.inc_samp_rate = inc_samp_rate
self.sps = sps = 4
self.baud_rate = baud_rate = 1200
self.samp_rate = samp_rate = sps * baud_rate * 4
self.mark = mark = 2200
self.space = space = 1200
self.center = center = int((mark + space) / 2)
##################################################
# Blocks
##################################################
# Stage 1: Force resampling to 19.2ksps
self.rational_resampler_xxx_0 = filter.rational_resampler_fff(
interpolation=samp_rate,
decimation=self.inc_samp_rate,
taps=None,
fractional_bw=None,
)
# Stage 2: Bandpass Filter
self.bpf_width = bpf_width = 800
self.bpf_trans = bpf_trans = 200
self.band_pass_filter_0 = filter.fir_filter_fff(1, firdes.band_pass(
1, samp_rate, 1700-bpf_width, 1700+bpf_width, bpf_trans, firdes.WIN_RECTANGULAR, 6.76))
# Stage 3: Tone Detection
self.window_len = window_len = self.samp_rate/self.baud_rate*2
self.window = window = signal.windows.cosine(window_len)
self.freq_xlating_fir_filter_xxx_0_0 = filter.freq_xlating_fir_filter_fcf(4, (window), mark, samp_rate)
self.freq_xlating_fir_filter_xxx_0 = filter.freq_xlating_fir_filter_fcf(4, (window), space, samp_rate)
self.blocks_complex_to_mag_0_0 = blocks.complex_to_mag(1)
self.blocks_complex_to_mag_0 = blocks.complex_to_mag(1)
# Stage 4: AGC
self.decay = decay = 0.00022
self.attack = attack = 0.8
self.bruninga_direwolf_agc_0_0 = bruninga.direwolf_agc(attack, decay)
self.bruninga_direwolf_agc_0 = bruninga.direwolf_agc(attack, decay)
self.blocks_sub_xx_1 = blocks.sub_ff(1)
# Stage 5: Clock Recovery
self.gain_mu = gain_mu = 0.45
self.digital_clock_recovery_mm_xx_0 = digital.clock_recovery_mm_ff(self.sps*(1+0.0), 0.25*gain_mu*gain_mu, 0.5, gain_mu, 0.05)
# Stage 6: Differential Decoding
self.digital_diff_decoder_bb_0 = digital.diff_decoder_bb(2)
self.blocks_not_xx_0 = blocks.not_bb()
self.blocks_and_const_xx_0 = blocks.and_const_bb(1)
# Stage 7: Output
self.digital_binary_slicer_fb_0 = digital.binary_slicer_fb()
##################################################
# Connections
##################################################
self.connect((self, 0), (self.rational_resampler_xxx_0, 0))
self.connect((self.rational_resampler_xxx_0, 0), (self.band_pass_filter_0, 0))
self.connect((self.band_pass_filter_0, 0), (self.freq_xlating_fir_filter_xxx_0_0, 0))
self.connect((self.band_pass_filter_0, 0), (self.freq_xlating_fir_filter_xxx_0, 0))
self.connect((self.freq_xlating_fir_filter_xxx_0, 0), (self.blocks_complex_to_mag_0, 0))
self.connect((self.freq_xlating_fir_filter_xxx_0_0, 0), (self.blocks_complex_to_mag_0_0, 0))
self.connect((self.blocks_complex_to_mag_0, 0), (self.bruninga_direwolf_agc_0, 0))
self.connect((self.blocks_complex_to_mag_0_0, 0), (self.bruninga_direwolf_agc_0_0, 0))
self.connect((self.bruninga_direwolf_agc_0_0, 0), (self.blocks_sub_xx_1, 1))
self.connect((self.bruninga_direwolf_agc_0, 0), (self.blocks_sub_xx_1, 0))
self.connect((self.blocks_sub_xx_1, 0), (self.digital_clock_recovery_mm_xx_0, 0))
self.connect((self.digital_clock_recovery_mm_xx_0, 0), (self.digital_binary_slicer_fb_0, 0))
self.connect((self.digital_diff_decoder_bb_0, 0), (self.blocks_not_xx_0, 0))
self.connect((self.blocks_not_xx_0, 0), (self.blocks_and_const_xx_0, 0))
self.connect((self.blocks_and_const_xx_0, 0), (self, 0))
self.connect((self.digital_binary_slicer_fb_0, 0), (self.digital_diff_decoder_bb_0, 0))
| mit | 8,797,737,933,035,898,000 | 41.983871 | 134 | 0.616135 | false |
ideaworld/BioDesigner | design/views.py | 1 | 13070 | """
@author: Bowen
"""
from django.shortcuts import render
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404, render_to_response
from django.template import RequestContext, loader
from django.core.mail import send_mail
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
import hashlib
import json
import datetime
import random
import traceback
from search_part import ambiguousSearch, getPart
from accounts.models import User
from design.models import project, functions, tracks, user_project, tracks, chain, track_functions
from design.project import searchProject, getUserProject, getChain, getChainList
from design.recommend import getApriorRecommend, getMarkovRecommend, getBetweenMarkovRecommend
from design.file import getSequenceResultImage
from design.simulation import reaction_simulator
@csrf_exempt
def searchParts(request):
keyword = request.GET.get('keyword')
try:
funcs = request.GET.get('funcs')
except:
funcs = ''
results = ambiguousSearch(keyword, funcs)
return HttpResponse(json.dumps(results), content_type="text/json")
@csrf_exempt
def getParts(request):
partName = request.GET.get('partname')
results = getPart(partName)
return HttpResponse(json.dumps(results), content_type="text/json")
@csrf_exempt
def dashboardView(request):
try:
isLoggedIn = request.session['isLoggedIn']
if isLoggedIn:
chainId = int(request.GET.get('id'))
template = loader.get_template('home/dashboard.html')
context = RequestContext(request, {
'username':str(request.session['username']),
'id' : chainId
})
return HttpResponse(template.render(context))
else:
return HttpResponseRedirect('/')
except KeyError:
return HttpResponseRedirect('/')
@csrf_exempt
def projectView(request):
try:
isLoggedIn = request.session['isLoggedIn']
if isLoggedIn:
template = loader.get_template('home/project_new.html')
context = RequestContext(request, {
'username':request.session['username']
})
return HttpResponse(template.render(context))
else:
return HttpResponseRedirect('/')
except KeyError:
return HttpResponseRedirect('/')
@csrf_exempt
def oldprojectView(request):
try:
isLoggedIn = request.session['isLoggedIn']
if isLoggedIn:
template = loader.get_template('home/project.html')
context = RequestContext(request, {
'username':request.session['username']
})
return HttpResponse(template.render(context))
else:
return HttpResponseRedirect('/')
except KeyError:
return HttpResponseRedirect('/')
def isAccountActive(request):
try:
username = request.session['username']
return User.objects.get(username=username).is_confirmed
except KeyError:
return False
def isLoggedIn(request):
try:
isLoggedIn = request.session['isLoggedIn']
return isLoggedIn
except KeyError:
return False
def getCurrentUserObj(request):
try:
username = request.session['username']
userObj = User.objects.get(username=username)
return userObj
except:
return None
def newProject(name, user, track):
try:
projectObj = project(project_name=name, creator=user, track_id=track)
projectObj.save()
userPjctObj = user_project(user=user, project=projectObj)
userPjctObj.save()
return True, projectObj
except:
return False, null
@csrf_exempt
def createProject(request):
result = {
'isSuccessful': False,
}
if not isLoggedIn(request):
return HttpResponse(json.dumps(result), content_type="application/json")
received_json_data = json.loads(request.body)
name = received_json_data['name']
userObj = getCurrentUserObj(request)
#function_id = int(request.POST.get('function', ''))
# track_id = int(request.POST.get('track', ''))
track_id = int(received_json_data['track'])
createResult = newProject(name, userObj, track_id)
result['isSuccessful'] = createResult[0]
result['project_name'] = name
result['id'] = createResult[1].id
result['track'] = createResult[1].track.track
result['creator'] = userObj.username
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def getProjectChain(request):
result = {
'isSuccessful' : False,
}
if not isLoggedIn(request):
return HttpResponse(json.dumps(result), content_type="application/json")
project_id = request.GET.get('id', '')
chainResult = getChain(project_id)
result['isSuccessful'] = chainResult[0]
result['chain'] = chainResult[1]
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def getProject(request):
keyword = request.GET.get('keyword')
userObj = getCurrentUserObj(request)
if not userObj:
result = {'isSuccessful' : False}
return HttpResponse(json.dumps(result), content_type="application/json")
result = searchProject(keyword, userObj)
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def getUserProjects(request):
userObj = getCurrentUserObj(request)
if not userObj:
result = {'isSuccessful' : False}
return HttpResponse(json.dumps(result), content_type="application/json")
result = getUserProject(userObj)
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def getProjectChains(request):
projectId = request.GET.get('id','')
result = getChainList(int(projectId))
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def createNewDevice(request):
result = {
'isSuccessful': False,
}
if not isLoggedIn(request):
return HttpResponse(json.dumps(result), content_type="application/json")
received_json_data = json.loads(request.body)
name = received_json_data['name']
projectId = received_json_data['id']
newChain = chain(name=name, project_id=int(projectId))
try:
newChain.save()
result['isSuccessful'] = True
result['name'] = name
result['id'] = projectId
except:
pass
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def saveChain(request):
result = {'isSuccessful':True,}
chainContent = request.POST.get('chain','')
chainId = int(request.POST.get('id',''))
try:
chainObj = chain.objects.get(id=chainId)
chainObj.sequence = chainContent
chainObj.isModified = True
chainObj.save()
except:
result['isSuccessful'] = False
return HttpResponse(json.dumps(result),content_type="application/json")
@csrf_exempt
def getARecommend(request):
chainStr = request.GET.get('seq', '')
funcStr = request.GET.get('funcs', '')
if chainStr.startswith('_'):
chainStr = chainStr[1:]
if chainStr.endswith('_'):
chainStr = chainStr[:-1]
return HttpResponse(json.dumps(getApriorRecommend(chainStr, funcStr)), content_type="application/json")
@csrf_exempt
def getMRecommend(request):
part_id = request.GET.get('part')
return HttpResponse(json.dumps(getMarkovRecommend(part_id)), content_type="application/json")
@csrf_exempt
def getTracks(request):
trackList = tracks.objects.all().order_by('id');
result = {
'isSuccessful' : False,
}
trackInfos = list()
for t in trackList:
tmp = {
'id' : t.id,
'track' : t.track
}
trackInfos.append(tmp)
result['isSuccessful'] = True
result['tracks'] = trackInfos
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def getChainLength(request):
chainId = request.GET.get('id')
result = {
'isSuccessful' : True,
}
try:
chainObj = chain.objects.get(id=chainId)
se = chainObj.sequence
if se.startswith('_'):
se = se[1:]
chainLength = len(se.split('_'))
result['length'] = chainLength
except:
result['isSuccessful'] = False
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def changeProjectName(request):
projectId = request.POST.get('id','')
newName = request.POST.get('name','')
result = {
'isSuccessful': True,
}
try:
projectObj = project.objects.get(id=projectId)
projectObj.project_name = newName
projectObj.save()
except:
result['isSuccessful'] = False
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def changeProjectTrack(request):
projectId = request.POST.get('id', '')
newTrackId = request.POST.get('track_id', '')
result = {
'isSuccessful': True,
}
try:
projectObj = project.objects.get(id=projectId)
trackObj = tracks.objects.get(id=newTrackId)
projectObj.track = trackObj
projectObj.save()
except:
result['isSuccessful'] = False
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def get_between_recommend(request):
part_twin = request.GET.get('pairs', '').split('_')
first = part_twin[0]
second = part_twin[1]
return HttpResponse(json.dumps(getBetweenMarkovRecommend(first)), content_type="application/json")
@csrf_exempt
def deleteProject(request):
request_json = json.loads(request.body)
# projectId = request.POST.get('id', '')
projectId = request_json['id']
result = {
'isSuccessful' : True,
}
try:
projectObj = project.objects.get(id=projectId)
projectObj.is_deleted = 1
projectObj.save()
except:
result['isSuccessful'] = False
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def getTrackFunctions(request):
track_id = request.GET.get('track_id')
result = {
'isSuccessful' : True
}
try:
tf_list = track_functions.objects.filter(track_id=track_id)
function_list = list()
for tf_obj in tf_list:
function_info = {
'id': tf_obj.function.id,
'name': tf_obj.function.function,
}
function_list.append(function_info)
result['functions'] = function_list
except:
result['isSuccessful'] = False
traceback.print_exc()
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getResultImage(request):
result = {
'isSuccessful': True,
}
chainId = request.GET.get('id')
try:
chainObj = chain.objects.get(id=chainId)
if not chainObj.isModified:
result['filepath'] = chainObj.image_file_path
else:
chainStr = chainObj.sequence
if chainStr.startswith('_'):
chainStr = chainStr[1:]
if chainStr == "" or chainStr == None:
result['isSuccessful'] = False
else:
chainName = chainObj.name
width = 80 * len(chainStr.split('_'))
height = 100
if width > 800:
width = 800
height = 100 * (len(chainStr.split('_')) / 10);
result['filepath'] = getSequenceResultImage(chainStr, width, height, chainName)
chainObj.isModified = False
chainObj.image_file_path = result['filepath']
chainObj.save()
except:
traceback.print_exc()
result['isSuccessful'] = False
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def simulationView(request):
try:
isLoggedIn = request.session['isLoggedIn']
if isLoggedIn:
template = loader.get_template('home/simulation.html')
context = RequestContext(request, {
})
return HttpResponse(template.render(context))
else:
return HttpResponseRedirect('/')
except KeyError:
return HttpResponseRedirect('/')
@csrf_exempt
def simulate(request):
#received_json_data = request.POST
received_json_data = json.loads(request.body)
reactions = received_json_data['reactions']
martials = received_json_data['martials']
reation_time = received_json_data['reaction_time']
try:
rs = reaction_simulator(reactions, martials, reation_time)
rs.doSimulation()
result = rs.getProcess()
except:
result = {}
return HttpResponse(json.dumps(result), content_type="application/json")
| apache-2.0 | 56,523,890,087,245,930 | 31.431762 | 107 | 0.64254 | false |
smesdaghi/geonode | geonode/geoserver/context_processors.py | 31 | 1889 | #########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf import settings
from django.core.urlresolvers import reverse
from geonode.geoserver.helpers import ogc_server_settings
def geoserver_urls(request):
"""Global values to pass to templates"""
defaults = dict(
GEOSERVER_BASE_URL=ogc_server_settings.public_url,
UPLOADER_URL=reverse('data_upload') if getattr(
settings,
'UPLOADER',
dict()).get(
'BACKEND',
'geonode.rest') == 'geonode.importer' else reverse('layer_upload'),
MAPFISH_PRINT_ENABLED=ogc_server_settings.MAPFISH_PRINT_ENABLED,
PRINT_NG_ENABLED=ogc_server_settings.PRINT_NG_ENABLED,
GEONODE_SECURITY_ENABLED=ogc_server_settings.GEONODE_SECURITY_ENABLED,
GEOGIG_ENABLED=ogc_server_settings.GEOGIG_ENABLED,
TIME_ENABLED=getattr(
settings,
'UPLOADER',
dict()).get(
'OPTIONS',
dict()).get(
'TIME_ENABLED',
False),
)
return defaults
| gpl-3.0 | -8,452,740,180,229,032,000 | 38.354167 | 79 | 0.601376 | false |
wallnerryan/quantum_migrate | quantum/openstack/common/context.py | 14 | 2619 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple class that stores security context information in the web request.
Projects should subclass this class if they wish to enhance the request
context or provide additional information in their specific WSGI pipeline.
"""
import itertools
import uuid
def generate_request_id():
return 'req-' + str(uuid.uuid4())
class RequestContext(object):
"""
Stores information about the security context under which the user
accesses the system, as well as additional request information.
"""
def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
read_only=False, show_deleted=False, request_id=None):
self.auth_token = auth_token
self.user = user
self.tenant = tenant
self.is_admin = is_admin
self.read_only = read_only
self.show_deleted = show_deleted
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
def to_dict(self):
return {'user': self.user,
'tenant': self.tenant,
'is_admin': self.is_admin,
'read_only': self.read_only,
'show_deleted': self.show_deleted,
'auth_token': self.auth_token,
'request_id': self.request_id}
def get_admin_context(show_deleted="no"):
context = RequestContext(None,
tenant=None,
is_admin=True,
show_deleted=show_deleted)
return context
def get_context_from_function_and_args(function, args, kwargs):
"""Find an arg of type RequestContext and return it.
This is useful in a couple of decorators where we don't
know much about the function we're wrapping.
"""
for arg in itertools.chain(kwargs.values(), args):
if isinstance(arg, RequestContext):
return arg
return None
| apache-2.0 | 5,608,581,146,053,800,000 | 31.333333 | 79 | 0.641848 | false |
vrenaville/OCB | addons/association/__init__.py | 886 | 1054 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -2,297,920,104,377,928,700 | 46.909091 | 79 | 0.606262 | false |
peterb12/nand2tetris | src/JackCompiler/VMWriter.py | 1 | 1417 | from jc_types import Scope
class VMWriter:
arithmeticOperations = ["add", "sub", "and", "or", "not", "neg", "eq", "gt", "lt"]
scopeTable = { Scope.STATIC : "static",
Scope.FIELD : "dunno?",
Scope.ARG : "argument",
Scope.VAR : "local" }
def __init__(self, dirName, fname):
self.f = open(dirName + "/" + fname, "w")
# For convenience in debugging.
def _emit(self, string):
# print(string)
self.f.write(string + "\n")
def writePush(self, segment, index):
self._emit("push " + segment + " " + str(index))
def writePop(self, segment, index):
self._emit("pop " + segment + " " + str(index))
def writeArithmetic(self, command):
if (command in self.arithmeticOperations):
self._emit(command)
else:
assert False, "Internal compiler error."
def writeLabel(self, label):
self._emit("label " + label)
def writeGoto(self, label):
self._emit("goto " + label)
def writeIf(self, label):
self._emit("if-goto " + label)
def writeCall(self, name, nArgs):
self._emit("call " + name + " " + str(nArgs))
def writeFunction(self, name, nLocals):
self._emit("function " + name + " " + str(nLocals))
def writeReturn(self):
self._emit("return")
def close(self):
self.f.close()
| bsd-3-clause | 9,177,419,786,270,427,000 | 27.34 | 86 | 0.541284 | false |
madgik/exareme | Exareme-Docker/src/exareme/exareme-tools/madis/src/lib/pyreadline/modes/notemacs.py | 1 | 24681 | # -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (C) 2003-2006 Gary Bishop.
# Copyright (C) 2006 Jorgen Stenarson. <[email protected]>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
# *****************************************************************************
import pyreadline.lineeditor.lineobj as lineobj
from pyreadline.logger import log
import basemode
class NotEmacsMode(basemode.BaseMode):
mode = "notemacs"
def __init__(self, rlobj):
super(NotEmacsMode, self).__init__(rlobj)
def __repr__(self):
return "<NotEmacsMode>"
def _readline_from_keyboard(self):
c = self.console
while 1:
self._update_line()
event = c.getkeypress()
if self.next_meta:
self.next_meta = False
control, meta, shift, code = event.keyinfo
event.keyinfo = (control, True, shift, code)
# Process exit keys. Only exit on empty line
if event.keyinfo in self.exit_dispatch:
if lineobj.EndOfLine(self.l_buffer) == 0:
raise EOFError
dispatch_func = self.key_dispatch.get(event.keyinfo, self.self_insert)
log("readline from keyboard:%s" % (event.keyinfo,))
r = None
if dispatch_func:
r = dispatch_func(event)
self.l_buffer.push_undo()
self.previous_func = dispatch_func
if r:
self._update_line()
break
def readline(self, prompt=''):
'''Try to act like GNU readline.'''
# handle startup_hook
if self.first_prompt:
self.first_prompt = False
if self.startup_hook:
try:
self.startup_hook()
except:
print 'startup hook failed'
traceback.print_exc()
c = self.console
self.l_buffer.reset_line()
self.prompt = prompt
self._print_prompt()
if self.pre_input_hook:
try:
self.pre_input_hook()
except:
print 'pre_input_hook failed'
traceback.print_exc()
self.pre_input_hook = None
log("in readline: %s" % self.paste_line_buffer)
if len(self.paste_line_buffer) > 0:
self.l_buffer = lineobj.ReadlineTextBuffer(self.paste_line_buffer[0])
self._update_line()
self.paste_line_buffer = self.paste_line_buffer[1:]
c.write('\r\n')
else:
self._readline_from_keyboard()
c.write('\r\n')
self.add_history(self.l_buffer.copy())
log('returning(%s)' % self.l_buffer.get_line_text())
return self.l_buffer.get_line_text() + '\n'
### Methods below here are bindable emacs functions
def beginning_of_line(self, e): # (C-a)
'''Move to the start of the current line. '''
self.l_buffer.beginning_of_line()
def end_of_line(self, e): # (C-e)
'''Move to the end of the line. '''
self.l_buffer.end_of_line()
def forward_char(self, e): # (C-f)
'''Move forward a character. '''
self.l_buffer.forward_char()
def backward_char(self, e): # (C-b)
'''Move back a character. '''
self.l_buffer.backward_char()
def forward_word(self, e): # (M-f)
'''Move forward to the end of the next word. Words are composed of
letters and digits.'''
self.l_buffer.forward_word()
def backward_word(self, e): # (M-b)
'''Move back to the start of the current or previous word. Words are
composed of letters and digits.'''
self.l_buffer.backward_word()
def clear_screen(self, e): # (C-l)
'''Clear the screen and redraw the current line, leaving the current
line at the top of the screen.'''
self.console.page()
def redraw_current_line(self, e): # ()
'''Refresh the current line. By default, this is unbound.'''
pass
def accept_line(self, e): # (Newline or Return)
'''Accept the line regardless of where the cursor is. If this line
is non-empty, it may be added to the history list for future recall
with add_history(). If this line is a modified history line, the
history line is restored to its original state.'''
return True
######### History commands
def previous_history(self, e): # (C-p)
'''Move back through the history list, fetching the previous command. '''
self._history.previous_history(self.l_buffer)
def next_history(self, e): # (C-n)
'''Move forward through the history list, fetching the next command. '''
self._history.next_history(self.l_buffer)
def beginning_of_history(self, e): # (M-<)
'''Move to the first line in the history.'''
self._history.beginning_of_history()
def end_of_history(self, e): # (M->)
'''Move to the end of the input history, i.e., the line currently
being entered.'''
self._history.end_of_history(self.l_buffer)
def _i_search(self, searchfun, direction, init_event):
c = self.console
line = self.get_line_buffer()
query = ''
hc_start = self._history.history_cursor # + direction
while 1:
x, y = self.prompt_end_pos
c.pos(0, y)
if direction < 0:
prompt = 'reverse-i-search'
else:
prompt = 'forward-i-search'
scroll = c.write_scrolling("%s`%s': %s" % (prompt, query, line))
self._update_prompt_pos(scroll)
self._clear_after()
event = c.getkeypress()
if event.keysym == 'BackSpace':
if len(query) > 0:
query = query[:-1]
self._history.history_cursor = hc_start
else:
self._bell()
elif event.char in string.letters + string.digits + string.punctuation + ' ':
self._history.history_cursor = hc_start
query += event.char
elif event.keyinfo == init_event.keyinfo:
self._history.history_cursor += direction
line = searchfun(query)
pass
else:
if event.keysym != 'Return':
self._bell()
break
line = searchfun(query)
px, py = self.prompt_begin_pos
c.pos(0, py)
self.l_buffer.set_line(line)
self._print_prompt()
self._history.history_cursor = len(self._history.history)
def reverse_search_history(self, e): # (C-r)
'''Search backward starting at the current line and moving up
through the history as necessary. This is an incremental search.'''
# print "HEJ"
# self.console.bell()
self._i_search(self._history.reverse_search_history, -1, e)
def forward_search_history(self, e): # (C-s)
'''Search forward starting at the current line and moving down
through the the history as necessary. This is an incremental search.'''
# print "HEJ"
# self.console.bell()
self._i_search(self._history.forward_search_history, 1, e)
def non_incremental_reverse_search_history(self, e): # (M-p)
'''Search backward starting at the current line and moving up
through the history as necessary using a non-incremental search for
a string supplied by the user.'''
self._history.non_incremental_reverse_search_history(self.l_buffer)
def non_incremental_forward_search_history(self, e): # (M-n)
'''Search forward starting at the current line and moving down
through the the history as necessary using a non-incremental search
for a string supplied by the user.'''
self._history.non_incremental_reverse_search_history(self.l_buffer)
def history_search_forward(self, e): # ()
'''Search forward through the history for the string of characters
between the start of the current line and the point. This is a
non-incremental search. By default, this command is unbound.'''
self.l_buffer = self._history.history_search_forward(self.l_buffer)
def history_search_backward(self, e): # ()
'''Search backward through the history for the string of characters
between the start of the current line and the point. This is a
non-incremental search. By default, this command is unbound.'''
self.l_buffer = self._history.history_search_backward(self.l_buffer)
def yank_nth_arg(self, e): # (M-C-y)
'''Insert the first argument to the previous command (usually the
second word on the previous line) at point. With an argument n,
insert the nth word from the previous command (the words in the
previous command begin with word 0). A negative argument inserts the
nth word from the end of the previous command.'''
pass
def yank_last_arg(self, e): # (M-. or M-_)
'''Insert last argument to the previous command (the last word of
the previous history entry). With an argument, behave exactly like
yank-nth-arg. Successive calls to yank-last-arg move back through
the history list, inserting the last argument of each line in turn.'''
pass
def delete_char(self, e): # (C-d)
'''Delete the character at point. If point is at the beginning of
the line, there are no characters in the line, and the last
character typed was not bound to delete-char, then return EOF.'''
self.l_buffer.delete_char()
def backward_delete_char(self, e): # (Rubout)
'''Delete the character behind the cursor. A numeric argument means
to kill the characters instead of deleting them.'''
self.l_buffer.backward_delete_char()
def forward_backward_delete_char(self, e): # ()
'''Delete the character under the cursor, unless the cursor is at
the end of the line, in which case the character behind the cursor
is deleted. By default, this is not bound to a key.'''
pass
def quoted_insert(self, e): # (C-q or C-v)
'''Add the next character typed to the line verbatim. This is how to
insert key sequences like C-q, for example.'''
e = self.console.getkeypress()
self.insert_text(e.char)
def tab_insert(self, e): # (M-TAB)
'''Insert a tab character. '''
ws = ' ' * (self.tabstop - (self.line_cursor % self.tabstop))
self.insert_text(ws)
def self_insert(self, e): # (a, b, A, 1, !, ...)
'''Insert yourself. '''
if ord(e.char) != 0: # don't insert null character in buffer, can happen with dead keys.
self.insert_text(e.char)
def transpose_chars(self, e): # (C-t)
'''Drag the character before the cursor forward over the character
at the cursor, moving the cursor forward as well. If the insertion
point is at the end of the line, then this transposes the last two
characters of the line. Negative arguments have no effect.'''
self.l_buffer.transpose_chars()
def transpose_words(self, e): # (M-t)
'''Drag the word before point past the word after point, moving
point past that word as well. If the insertion point is at the end
of the line, this transposes the last two words on the line.'''
self.l_buffer.transpose_words()
def upcase_word(self, e): # (M-u)
'''Uppercase the current (or following) word. With a negative
argument, uppercase the previous word, but do not move the cursor.'''
self.l_buffer.upcase_word()
def downcase_word(self, e): # (M-l)
'''Lowercase the current (or following) word. With a negative
argument, lowercase the previous word, but do not move the cursor.'''
self.l_buffer.downcase_word()
def capitalize_word(self, e): # (M-c)
'''Capitalize the current (or following) word. With a negative
argument, capitalize the previous word, but do not move the cursor.'''
self.l_buffer.capitalize_word()
def overwrite_mode(self, e): # ()
'''Toggle overwrite mode. With an explicit positive numeric
argument, switches to overwrite mode. With an explicit non-positive
numeric argument, switches to insert mode. This command affects only
emacs mode; vi mode does overwrite differently. Each call to
readline() starts in insert mode. In overwrite mode, characters
bound to self-insert replace the text at point rather than pushing
the text to the right. Characters bound to backward-delete-char
replace the character before point with a space.'''
pass
def kill_line(self, e): # (C-k)
'''Kill the text from point to the end of the line. '''
self.l_buffer.kill_line()
def backward_kill_line(self, e): # (C-x Rubout)
'''Kill backward to the beginning of the line. '''
self.l_buffer.backward_kill_line()
def unix_line_discard(self, e): # (C-u)
'''Kill backward from the cursor to the beginning of the current line. '''
# how is this different from backward_kill_line?
self.l_buffer.unix_line_discard()
def kill_whole_line(self, e): # ()
'''Kill all characters on the current line, no matter where point
is. By default, this is unbound.'''
self.l_buffer.kill_whole_line()
def kill_word(self, e): # (M-d)
'''Kill from point to the end of the current word, or if between
words, to the end of the next word. Word boundaries are the same as
forward-word.'''
self.l_buffer.kill_word()
def backward_kill_word(self, e): # (M-DEL)
'''Kill the word behind point. Word boundaries are the same as
backward-word. '''
self.l_buffer.backward_kill_word()
def unix_word_rubout(self, e): # (C-w)
'''Kill the word behind point, using white space as a word
boundary. The killed text is saved on the kill-ring.'''
self.l_buffer.unix_word_rubout()
def delete_horizontal_space(self, e): # ()
'''Delete all spaces and tabs around point. By default, this is unbound. '''
pass
def kill_region(self, e): # ()
'''Kill the text in the current region. By default, this command is unbound. '''
pass
def copy_region_as_kill(self, e): # ()
'''Copy the text in the region to the kill buffer, so it can be
yanked right away. By default, this command is unbound.'''
pass
def copy_region_to_clipboard(self, e): # ()
'''Copy the text in the region to the windows clipboard.'''
if self.enable_win32_clipboard:
mark = min(self.l_buffer.mark, len(self.l_buffer.line_buffer))
cursor = min(self.l_buffer.point, len(self.l_buffer.line_buffer))
if self.l_buffer.mark == -1:
return
begin = min(cursor, mark)
end = max(cursor, mark)
toclipboard = "".join(self.l_buffer.line_buffer[begin:end])
clipboard.SetClipboardText(str(toclipboard))
def copy_backward_word(self, e): # ()
'''Copy the word before point to the kill buffer. The word
boundaries are the same as backward-word. By default, this command
is unbound.'''
pass
def copy_forward_word(self, e): # ()
'''Copy the word following point to the kill buffer. The word
boundaries are the same as forward-word. By default, this command is
unbound.'''
pass
def paste(self, e):
'''Paste windows clipboard'''
if self.enable_win32_clipboard:
txt = clipboard.get_clipboard_text_and_convert(False)
self.insert_text(txt)
def paste_mulitline_code(self, e):
'''Paste windows clipboard'''
reg = re.compile("\r?\n")
if self.enable_win32_clipboard:
txt = clipboard.get_clipboard_text_and_convert(False)
t = reg.split(txt)
t = [row for row in t if row.strip() != ""] # remove empty lines
if t != [""]:
self.insert_text(t[0])
self.add_history(self.l_buffer.copy())
self.paste_line_buffer = t[1:]
log("multi: %s" % self.paste_line_buffer)
return True
else:
return False
def ipython_paste(self, e):
'''Paste windows clipboard. If enable_ipython_paste_list_of_lists is
True then try to convert tabseparated data to repr of list of lists or
repr of array'''
if self.enable_win32_clipboard:
txt = clipboard.get_clipboard_text_and_convert(
self.enable_ipython_paste_list_of_lists)
if self.enable_ipython_paste_for_paths:
if len(txt) < 300 and ("\t" not in txt) and ("\n" not in txt):
txt = txt.replace("\\", "/").replace(" ", r"\ ")
self.insert_text(txt)
def yank(self, e): # (C-y)
'''Yank the top of the kill ring into the buffer at point. '''
pass
def yank_pop(self, e): # (M-y)
'''Rotate the kill-ring, and yank the new top. You can only do this
if the prior command is yank or yank-pop.'''
pass
def digit_argument(self, e): # (M-0, M-1, ... M--)
'''Add this digit to the argument already accumulating, or start a
new argument. M-- starts a negative argument.'''
pass
def universal_argument(self, e): # ()
'''This is another way to specify an argument. If this command is
followed by one or more digits, optionally with a leading minus
sign, those digits define the argument. If the command is followed
by digits, executing universal-argument again ends the numeric
argument, but is otherwise ignored. As a special case, if this
command is immediately followed by a character that is neither a
digit or minus sign, the argument count for the next command is
multiplied by four. The argument count is initially one, so
executing this function the first time makes the argument count
four, a second time makes the argument count sixteen, and so on. By
default, this is not bound to a key.'''
pass
def delete_char_or_list(self, e): # ()
'''Deletes the character under the cursor if not at the beginning or
end of the line (like delete-char). If at the end of the line,
behaves identically to possible-completions. This command is unbound
by default.'''
pass
def start_kbd_macro(self, e): # (C-x ()
'''Begin saving the characters typed into the current keyboard macro. '''
pass
def end_kbd_macro(self, e): # (C-x ))
'''Stop saving the characters typed into the current keyboard macro
and save the definition.'''
pass
def call_last_kbd_macro(self, e): # (C-x e)
'''Re-execute the last keyboard macro defined, by making the
characters in the macro appear as if typed at the keyboard.'''
pass
def re_read_init_file(self, e): # (C-x C-r)
'''Read in the contents of the inputrc file, and incorporate any
bindings or variable assignments found there.'''
pass
def abort(self, e): # (C-g)
'''Abort the current editing command and ring the terminals bell
(subject to the setting of bell-style).'''
self._bell()
def do_uppercase_version(self, e): # (M-a, M-b, M-x, ...)
'''If the metafied character x is lowercase, run the command that is
bound to the corresponding uppercase character.'''
pass
def prefix_meta(self, e): # (ESC)
'''Metafy the next character typed. This is for keyboards without a
meta key. Typing ESC f is equivalent to typing M-f. '''
self.next_meta = True
def undo(self, e): # (C-_ or C-x C-u)
'''Incremental undo, separately remembered for each line.'''
self.l_buffer.pop_undo()
def revert_line(self, e): # (M-r)
'''Undo all changes made to this line. This is like executing the
undo command enough times to get back to the beginning.'''
pass
def tilde_expand(self, e): # (M-~)
'''Perform tilde expansion on the current word.'''
pass
def set_mark(self, e): # (C-@)
'''Set the mark to the point. If a numeric argument is supplied, the
mark is set to that position.'''
self.l_buffer.set_mark()
def exchange_point_and_mark(self, e): # (C-x C-x)
'''Swap the point with the mark. The current cursor position is set
to the saved position, and the old cursor position is saved as the
mark.'''
pass
def character_search(self, e): # (C-])
'''A character is read and point is moved to the next occurrence of
that character. A negative count searches for previous occurrences.'''
pass
def character_search_backward(self, e): # (M-C-])
'''A character is read and point is moved to the previous occurrence
of that character. A negative count searches for subsequent
occurrences.'''
pass
def insert_comment(self, e): # (M-#)
'''Without a numeric argument, the value of the comment-begin
variable is inserted at the beginning of the current line. If a
numeric argument is supplied, this command acts as a toggle: if the
characters at the beginning of the line do not match the value of
comment-begin, the value is inserted, otherwise the characters in
comment-begin are deleted from the beginning of the line. In either
case, the line is accepted as if a newline had been typed.'''
pass
def dump_functions(self, e): # ()
'''Print all of the functions and their key bindings to the Readline
output stream. If a numeric argument is supplied, the output is
formatted in such a way that it can be made part of an inputrc
file. This command is unbound by default.'''
pass
def dump_variables(self, e): # ()
'''Print all of the settable variables and their values to the
Readline output stream. If a numeric argument is supplied, the
output is formatted in such a way that it can be made part of an
inputrc file. This command is unbound by default.'''
pass
def dump_macros(self, e): # ()
'''Print all of the Readline key sequences bound to macros and the
strings they output. If a numeric argument is supplied, the output
is formatted in such a way that it can be made part of an inputrc
file. This command is unbound by default.'''
pass
# Create key bindings:
def init_editing_mode(self, e): # (C-e)
'''When in vi command mode, this causes a switch to emacs editing
mode.'''
self._bind_exit_key('Control-d')
self._bind_exit_key('Control-z')
# I often accidentally hold the shift or control while typing space
self._bind_key('Shift-space', self.self_insert)
self._bind_key('Control-space', self.self_insert)
self._bind_key('Return', self.accept_line)
self._bind_key('Left', self.backward_char)
self._bind_key('Control-b', self.backward_char)
self._bind_key('Right', self.forward_char)
self._bind_key('Control-f', self.forward_char)
self._bind_key('BackSpace', self.backward_delete_char)
self._bind_key('Home', self.beginning_of_line)
self._bind_key('End', self.end_of_line)
self._bind_key('Delete', self.delete_char)
self._bind_key('Control-d', self.delete_char)
self._bind_key('Clear', self.clear_screen)
# make it case insensitive
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
prefix = m[0]
for item in m:
for i in range(len(prefix)):
if prefix[:i + 1].lower() != item[:i + 1].lower():
prefix = prefix[:i]
if i == 0: return ''
break
return prefix
| mit | -3,124,943,255,285,124,000 | 40.341709 | 97 | 0.59641 | false |
tangyiyong/odoo | addons/document/std_index.py | 430 | 7457 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from content_index import indexer, cntIndex
from subprocess import Popen, PIPE
import StringIO
import odt2txt
import sys, zipfile, xml.dom.minidom
import logging
_logger = logging.getLogger(__name__)
def _to_unicode(s):
try:
return s.decode('utf-8')
except UnicodeError:
try:
return s.decode('latin')
except UnicodeError:
try:
return s.encode('ascii')
except UnicodeError:
return s
def textToString(element):
buffer = u""
for node in element.childNodes :
if node.nodeType == xml.dom.Node.TEXT_NODE :
buffer += node.nodeValue
elif node.nodeType == xml.dom.Node.ELEMENT_NODE :
buffer += textToString(node)
return buffer
class TxtIndex(indexer):
def _getMimeTypes(self):
return ['text/plain','text/html','text/diff','text/xml', 'text/*',
'application/xml']
def _getExtensions(self):
return ['.txt', '.py']
def _doIndexContent(self, content):
return content
cntIndex.register(TxtIndex())
class PptxIndex(indexer):
def _getMimeTypes(self):
return [ 'application/vnd.openxmlformats-officedocument.presentationml.presentation']
def _getExtensions(self):
return ['.pptx']
def _doIndexFile(self, fname):
def toString () :
""" Converts the document to a string. """
buffer = u""
for val in ["a:t"]:
for paragraph in content.getElementsByTagName(val) :
buffer += textToString(paragraph) + "\n"
return buffer
data = []
zip = zipfile.ZipFile(fname)
files = filter(lambda x: x.startswith('ppt/slides/slide'), zip.namelist())
for i in range(1, len(files) + 1):
content = xml.dom.minidom.parseString(zip.read('ppt/slides/slide%s.xml' % str(i)))
res = toString().encode('ascii','replace')
data.append(res)
return _to_unicode('\n'.join(data))
cntIndex.register(PptxIndex())
class DocIndex(indexer):
def _getMimeTypes(self):
return [ 'application/ms-word']
def _getExtensions(self):
return ['.doc']
def _doIndexFile(self, fname):
try:
pop = Popen(['antiword', fname], shell=False, stdout=PIPE)
(data, _) = pop.communicate()
return _to_unicode(data)
except OSError:
_logger.warning("Failed attempt to execute antiword (MS Word reader). Antiword is necessary to index the file %s of MIME type %s. Detailed error available at DEBUG level.", fname, self._getMimeTypes()[0])
_logger.debug("Trace of the failed file indexing attempt.", exc_info=True)
return u''
cntIndex.register(DocIndex())
class DocxIndex(indexer):
def _getMimeTypes(self):
return [ 'application/vnd.openxmlformats-officedocument.wordprocessingml.document']
def _getExtensions(self):
return ['.docx']
def _doIndexFile(self, fname):
zip = zipfile.ZipFile(fname)
content = xml.dom.minidom.parseString(zip.read("word/document.xml"))
def toString () :
""" Converts the document to a string. """
buffer = u""
for val in ["w:p", "w:h", "text:list"]:
for paragraph in content.getElementsByTagName(val) :
buffer += textToString(paragraph) + "\n"
return buffer
res = toString().encode('ascii','replace')
return _to_unicode(res)
cntIndex.register(DocxIndex())
class XlsxIndex(indexer):
def _getMimeTypes(self):
return [ 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet']
def _getExtensions(self):
return ['.xlsx']
def _doIndexFile(self, fname):
zip = zipfile.ZipFile(fname)
content = xml.dom.minidom.parseString(zip.read("xl/sharedStrings.xml"))
def toString () :
""" Converts the document to a string. """
buffer = u""
for val in ["t"]:
for paragraph in content.getElementsByTagName(val) :
buffer += textToString(paragraph) + "\n"
return buffer
res = toString().encode('ascii','replace')
return _to_unicode(res)
cntIndex.register(XlsxIndex())
class PdfIndex(indexer):
def _getMimeTypes(self):
return [ 'application/pdf']
def _getExtensions(self):
return ['.pdf']
def _doIndexFile(self, fname):
try:
pop = Popen(['pdftotext', '-enc', 'UTF-8', '-nopgbrk', fname, '-'], shell=False, stdout=PIPE)
(data, _) = pop.communicate()
return _to_unicode(data)
except OSError:
_logger.warning("Failed attempt to execute pdftotext. This program is necessary to index the file %s of MIME type %s. Detailed error available at DEBUG level.", fname, self._getMimeTypes()[0])
_logger.debug("Trace of the failed file indexing attempt.", exc_info=True)
return u''
cntIndex.register(PdfIndex())
class ImageNoIndex(indexer):
def _getMimeTypes(self):
return [ 'image/*']
def _getExtensions(self):
#better return no extension, and let 'file' do its magic
return []
#return ['.png','.jpg','.gif','.jpeg','.bmp','.tiff']
def _doIndexContent(self, content):
return 'image'
cntIndex.register(ImageNoIndex())
# other opendocument formats:
# chart-template chart database
# formula-template formula graphics-template graphics
# image
# presentation-template presentation spreadsheet-template spreadsheet
class OpenDoc(indexer):
""" Index OpenDocument files.
Q: is it really worth it to index spreadsheets, or do we only get a
meaningless list of numbers (cell contents) ?
"""
def _getMimeTypes(self):
otypes = [ 'text', 'text-web', 'text-template', 'text-master' ]
return map(lambda a: 'application/vnd.oasis.opendocument.'+a, otypes)
def _getExtensions(self):
return ['.odt', '.ott', ] # '.ods'
def _doIndexContent(self, content):
s = StringIO.StringIO(content)
o = odt2txt.OpenDocumentTextFile(s)
result = _to_unicode(o.toString())
s.close()
return result
cntIndex.register(OpenDoc())
#eof
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -1,737,127,724,816,838,400 | 31.995575 | 216 | 0.601314 | false |
c0defreak/python-for-android | python3-alpha/extra_modules/gdata/Crypto/Util/RFC1751.py | 43 | 20130 | #!/usr/bin/python
# rfc1751.py : Converts between 128-bit strings and a human-readable
# sequence of words, as defined in RFC1751: "A Convention for
# Human-Readable 128-bit Keys", by Daniel L. McDonald.
__revision__ = "$Id: RFC1751.py,v 1.6 2003/04/04 15:15:10 akuchling Exp $"
import string, binascii
from functools import reduce
binary={0:'0000', 1:'0001', 2:'0010', 3:'0011', 4:'0100', 5:'0101',
6:'0110', 7:'0111', 8:'1000', 9:'1001', 10:'1010', 11:'1011',
12:'1100', 13:'1101', 14:'1110', 15:'1111'}
def _key2bin(s):
"Convert a key into a string of binary digits"
kl=[ord(x) for x in s]
kl=[binary[x/16]+binary[x&15] for x in kl]
return ''.join(kl)
def _extract(key, start, length):
"""Extract a bitstring from a string of binary digits, and return its
numeric value."""
k=key[start:start+length]
return reduce(lambda x,y: x*2+ord(y)-48, k, 0)
def key_to_english (key):
"""key_to_english(key:string) : string
Transform an arbitrary key into a string containing English words.
The key length must be a multiple of 8.
"""
english=''
for index in range(0, len(key), 8): # Loop over 8-byte subkeys
subkey=key[index:index+8]
# Compute the parity of the key
skbin=_key2bin(subkey) ; p=0
for i in range(0, 64, 2): p=p+_extract(skbin, i, 2)
# Append parity bits to the subkey
skbin=_key2bin(subkey+chr((p<<6) & 255))
for i in range(0, 64, 11):
english=english+wordlist[_extract(skbin, i, 11)]+' '
return english[:-1] # Remove the trailing space
def english_to_key (str):
"""english_to_key(string):string
Transform a string into a corresponding key.
The string must contain words separated by whitespace; the number
of words must be a multiple of 6.
"""
L=string.split(string.upper(str)) ; key=''
for index in range(0, len(L), 6):
sublist=L[index:index+6] ; char=9*[0] ; bits=0
for i in sublist:
index = wordlist.index(i)
shift = (8-(bits+11)%8) %8
y = index << shift
cl, cc, cr = (y>>16), (y>>8)&0xff, y & 0xff
if (shift>5):
char[bits/8] = char[bits/8] | cl
char[bits/8+1] = char[bits/8+1] | cc
char[bits/8+2] = char[bits/8+2] | cr
elif shift>-3:
char[bits/8] = char[bits/8] | cc
char[bits/8+1] = char[bits/8+1] | cr
else: char[bits/8] = char[bits/8] | cr
bits=bits+11
subkey=reduce(lambda x,y:x+chr(y), char, '')
# Check the parity of the resulting key
skbin=_key2bin(subkey)
p=0
for i in range(0, 64, 2): p=p+_extract(skbin, i, 2)
if (p&3) != _extract(skbin, 64, 2):
raise ValueError("Parity error in resulting key")
key=key+subkey[0:8]
return key
wordlist=[ "A", "ABE", "ACE", "ACT", "AD", "ADA", "ADD",
"AGO", "AID", "AIM", "AIR", "ALL", "ALP", "AM", "AMY", "AN", "ANA",
"AND", "ANN", "ANT", "ANY", "APE", "APS", "APT", "ARC", "ARE", "ARK",
"ARM", "ART", "AS", "ASH", "ASK", "AT", "ATE", "AUG", "AUK", "AVE",
"AWE", "AWK", "AWL", "AWN", "AX", "AYE", "BAD", "BAG", "BAH", "BAM",
"BAN", "BAR", "BAT", "BAY", "BE", "BED", "BEE", "BEG", "BEN", "BET",
"BEY", "BIB", "BID", "BIG", "BIN", "BIT", "BOB", "BOG", "BON", "BOO",
"BOP", "BOW", "BOY", "BUB", "BUD", "BUG", "BUM", "BUN", "BUS", "BUT",
"BUY", "BY", "BYE", "CAB", "CAL", "CAM", "CAN", "CAP", "CAR", "CAT",
"CAW", "COD", "COG", "COL", "CON", "COO", "COP", "COT", "COW", "COY",
"CRY", "CUB", "CUE", "CUP", "CUR", "CUT", "DAB", "DAD", "DAM", "DAN",
"DAR", "DAY", "DEE", "DEL", "DEN", "DES", "DEW", "DID", "DIE", "DIG",
"DIN", "DIP", "DO", "DOE", "DOG", "DON", "DOT", "DOW", "DRY", "DUB",
"DUD", "DUE", "DUG", "DUN", "EAR", "EAT", "ED", "EEL", "EGG", "EGO",
"ELI", "ELK", "ELM", "ELY", "EM", "END", "EST", "ETC", "EVA", "EVE",
"EWE", "EYE", "FAD", "FAN", "FAR", "FAT", "FAY", "FED", "FEE", "FEW",
"FIB", "FIG", "FIN", "FIR", "FIT", "FLO", "FLY", "FOE", "FOG", "FOR",
"FRY", "FUM", "FUN", "FUR", "GAB", "GAD", "GAG", "GAL", "GAM", "GAP",
"GAS", "GAY", "GEE", "GEL", "GEM", "GET", "GIG", "GIL", "GIN", "GO",
"GOT", "GUM", "GUN", "GUS", "GUT", "GUY", "GYM", "GYP", "HA", "HAD",
"HAL", "HAM", "HAN", "HAP", "HAS", "HAT", "HAW", "HAY", "HE", "HEM",
"HEN", "HER", "HEW", "HEY", "HI", "HID", "HIM", "HIP", "HIS", "HIT",
"HO", "HOB", "HOC", "HOE", "HOG", "HOP", "HOT", "HOW", "HUB", "HUE",
"HUG", "HUH", "HUM", "HUT", "I", "ICY", "IDA", "IF", "IKE", "ILL",
"INK", "INN", "IO", "ION", "IQ", "IRA", "IRE", "IRK", "IS", "IT",
"ITS", "IVY", "JAB", "JAG", "JAM", "JAN", "JAR", "JAW", "JAY", "JET",
"JIG", "JIM", "JO", "JOB", "JOE", "JOG", "JOT", "JOY", "JUG", "JUT",
"KAY", "KEG", "KEN", "KEY", "KID", "KIM", "KIN", "KIT", "LA", "LAB",
"LAC", "LAD", "LAG", "LAM", "LAP", "LAW", "LAY", "LEA", "LED", "LEE",
"LEG", "LEN", "LEO", "LET", "LEW", "LID", "LIE", "LIN", "LIP", "LIT",
"LO", "LOB", "LOG", "LOP", "LOS", "LOT", "LOU", "LOW", "LOY", "LUG",
"LYE", "MA", "MAC", "MAD", "MAE", "MAN", "MAO", "MAP", "MAT", "MAW",
"MAY", "ME", "MEG", "MEL", "MEN", "MET", "MEW", "MID", "MIN", "MIT",
"MOB", "MOD", "MOE", "MOO", "MOP", "MOS", "MOT", "MOW", "MUD", "MUG",
"MUM", "MY", "NAB", "NAG", "NAN", "NAP", "NAT", "NAY", "NE", "NED",
"NEE", "NET", "NEW", "NIB", "NIL", "NIP", "NIT", "NO", "NOB", "NOD",
"NON", "NOR", "NOT", "NOV", "NOW", "NU", "NUN", "NUT", "O", "OAF",
"OAK", "OAR", "OAT", "ODD", "ODE", "OF", "OFF", "OFT", "OH", "OIL",
"OK", "OLD", "ON", "ONE", "OR", "ORB", "ORE", "ORR", "OS", "OTT",
"OUR", "OUT", "OVA", "OW", "OWE", "OWL", "OWN", "OX", "PA", "PAD",
"PAL", "PAM", "PAN", "PAP", "PAR", "PAT", "PAW", "PAY", "PEA", "PEG",
"PEN", "PEP", "PER", "PET", "PEW", "PHI", "PI", "PIE", "PIN", "PIT",
"PLY", "PO", "POD", "POE", "POP", "POT", "POW", "PRO", "PRY", "PUB",
"PUG", "PUN", "PUP", "PUT", "QUO", "RAG", "RAM", "RAN", "RAP", "RAT",
"RAW", "RAY", "REB", "RED", "REP", "RET", "RIB", "RID", "RIG", "RIM",
"RIO", "RIP", "ROB", "ROD", "ROE", "RON", "ROT", "ROW", "ROY", "RUB",
"RUE", "RUG", "RUM", "RUN", "RYE", "SAC", "SAD", "SAG", "SAL", "SAM",
"SAN", "SAP", "SAT", "SAW", "SAY", "SEA", "SEC", "SEE", "SEN", "SET",
"SEW", "SHE", "SHY", "SIN", "SIP", "SIR", "SIS", "SIT", "SKI", "SKY",
"SLY", "SO", "SOB", "SOD", "SON", "SOP", "SOW", "SOY", "SPA", "SPY",
"SUB", "SUD", "SUE", "SUM", "SUN", "SUP", "TAB", "TAD", "TAG", "TAN",
"TAP", "TAR", "TEA", "TED", "TEE", "TEN", "THE", "THY", "TIC", "TIE",
"TIM", "TIN", "TIP", "TO", "TOE", "TOG", "TOM", "TON", "TOO", "TOP",
"TOW", "TOY", "TRY", "TUB", "TUG", "TUM", "TUN", "TWO", "UN", "UP",
"US", "USE", "VAN", "VAT", "VET", "VIE", "WAD", "WAG", "WAR", "WAS",
"WAY", "WE", "WEB", "WED", "WEE", "WET", "WHO", "WHY", "WIN", "WIT",
"WOK", "WON", "WOO", "WOW", "WRY", "WU", "YAM", "YAP", "YAW", "YE",
"YEA", "YES", "YET", "YOU", "ABED", "ABEL", "ABET", "ABLE", "ABUT",
"ACHE", "ACID", "ACME", "ACRE", "ACTA", "ACTS", "ADAM", "ADDS",
"ADEN", "AFAR", "AFRO", "AGEE", "AHEM", "AHOY", "AIDA", "AIDE",
"AIDS", "AIRY", "AJAR", "AKIN", "ALAN", "ALEC", "ALGA", "ALIA",
"ALLY", "ALMA", "ALOE", "ALSO", "ALTO", "ALUM", "ALVA", "AMEN",
"AMES", "AMID", "AMMO", "AMOK", "AMOS", "AMRA", "ANDY", "ANEW",
"ANNA", "ANNE", "ANTE", "ANTI", "AQUA", "ARAB", "ARCH", "AREA",
"ARGO", "ARID", "ARMY", "ARTS", "ARTY", "ASIA", "ASKS", "ATOM",
"AUNT", "AURA", "AUTO", "AVER", "AVID", "AVIS", "AVON", "AVOW",
"AWAY", "AWRY", "BABE", "BABY", "BACH", "BACK", "BADE", "BAIL",
"BAIT", "BAKE", "BALD", "BALE", "BALI", "BALK", "BALL", "BALM",
"BAND", "BANE", "BANG", "BANK", "BARB", "BARD", "BARE", "BARK",
"BARN", "BARR", "BASE", "BASH", "BASK", "BASS", "BATE", "BATH",
"BAWD", "BAWL", "BEAD", "BEAK", "BEAM", "BEAN", "BEAR", "BEAT",
"BEAU", "BECK", "BEEF", "BEEN", "BEER",
"BEET", "BELA", "BELL", "BELT", "BEND", "BENT", "BERG", "BERN",
"BERT", "BESS", "BEST", "BETA", "BETH", "BHOY", "BIAS", "BIDE",
"BIEN", "BILE", "BILK", "BILL", "BIND", "BING", "BIRD", "BITE",
"BITS", "BLAB", "BLAT", "BLED", "BLEW", "BLOB", "BLOC", "BLOT",
"BLOW", "BLUE", "BLUM", "BLUR", "BOAR", "BOAT", "BOCA", "BOCK",
"BODE", "BODY", "BOGY", "BOHR", "BOIL", "BOLD", "BOLO", "BOLT",
"BOMB", "BONA", "BOND", "BONE", "BONG", "BONN", "BONY", "BOOK",
"BOOM", "BOON", "BOOT", "BORE", "BORG", "BORN", "BOSE", "BOSS",
"BOTH", "BOUT", "BOWL", "BOYD", "BRAD", "BRAE", "BRAG", "BRAN",
"BRAY", "BRED", "BREW", "BRIG", "BRIM", "BROW", "BUCK", "BUDD",
"BUFF", "BULB", "BULK", "BULL", "BUNK", "BUNT", "BUOY", "BURG",
"BURL", "BURN", "BURR", "BURT", "BURY", "BUSH", "BUSS", "BUST",
"BUSY", "BYTE", "CADY", "CAFE", "CAGE", "CAIN", "CAKE", "CALF",
"CALL", "CALM", "CAME", "CANE", "CANT", "CARD", "CARE", "CARL",
"CARR", "CART", "CASE", "CASH", "CASK", "CAST", "CAVE", "CEIL",
"CELL", "CENT", "CERN", "CHAD", "CHAR", "CHAT", "CHAW", "CHEF",
"CHEN", "CHEW", "CHIC", "CHIN", "CHOU", "CHOW", "CHUB", "CHUG",
"CHUM", "CITE", "CITY", "CLAD", "CLAM", "CLAN", "CLAW", "CLAY",
"CLOD", "CLOG", "CLOT", "CLUB", "CLUE", "COAL", "COAT", "COCA",
"COCK", "COCO", "CODA", "CODE", "CODY", "COED", "COIL", "COIN",
"COKE", "COLA", "COLD", "COLT", "COMA", "COMB", "COME", "COOK",
"COOL", "COON", "COOT", "CORD", "CORE", "CORK", "CORN", "COST",
"COVE", "COWL", "CRAB", "CRAG", "CRAM", "CRAY", "CREW", "CRIB",
"CROW", "CRUD", "CUBA", "CUBE", "CUFF", "CULL", "CULT", "CUNY",
"CURB", "CURD", "CURE", "CURL", "CURT", "CUTS", "DADE", "DALE",
"DAME", "DANA", "DANE", "DANG", "DANK", "DARE", "DARK", "DARN",
"DART", "DASH", "DATA", "DATE", "DAVE", "DAVY", "DAWN", "DAYS",
"DEAD", "DEAF", "DEAL", "DEAN", "DEAR", "DEBT", "DECK", "DEED",
"DEEM", "DEER", "DEFT", "DEFY", "DELL", "DENT", "DENY", "DESK",
"DIAL", "DICE", "DIED", "DIET", "DIME", "DINE", "DING", "DINT",
"DIRE", "DIRT", "DISC", "DISH", "DISK", "DIVE", "DOCK", "DOES",
"DOLE", "DOLL", "DOLT", "DOME", "DONE", "DOOM", "DOOR", "DORA",
"DOSE", "DOTE", "DOUG", "DOUR", "DOVE", "DOWN", "DRAB", "DRAG",
"DRAM", "DRAW", "DREW", "DRUB", "DRUG", "DRUM", "DUAL", "DUCK",
"DUCT", "DUEL", "DUET", "DUKE", "DULL", "DUMB", "DUNE", "DUNK",
"DUSK", "DUST", "DUTY", "EACH", "EARL", "EARN", "EASE", "EAST",
"EASY", "EBEN", "ECHO", "EDDY", "EDEN", "EDGE", "EDGY", "EDIT",
"EDNA", "EGAN", "ELAN", "ELBA", "ELLA", "ELSE", "EMIL", "EMIT",
"EMMA", "ENDS", "ERIC", "EROS", "EVEN", "EVER", "EVIL", "EYED",
"FACE", "FACT", "FADE", "FAIL", "FAIN", "FAIR", "FAKE", "FALL",
"FAME", "FANG", "FARM", "FAST", "FATE", "FAWN", "FEAR", "FEAT",
"FEED", "FEEL", "FEET", "FELL", "FELT", "FEND", "FERN", "FEST",
"FEUD", "FIEF", "FIGS", "FILE", "FILL", "FILM", "FIND", "FINE",
"FINK", "FIRE", "FIRM", "FISH", "FISK", "FIST", "FITS", "FIVE",
"FLAG", "FLAK", "FLAM", "FLAT", "FLAW", "FLEA", "FLED", "FLEW",
"FLIT", "FLOC", "FLOG", "FLOW", "FLUB", "FLUE", "FOAL", "FOAM",
"FOGY", "FOIL", "FOLD", "FOLK", "FOND", "FONT", "FOOD", "FOOL",
"FOOT", "FORD", "FORE", "FORK", "FORM", "FORT", "FOSS", "FOUL",
"FOUR", "FOWL", "FRAU", "FRAY", "FRED", "FREE", "FRET", "FREY",
"FROG", "FROM", "FUEL", "FULL", "FUME", "FUND", "FUNK", "FURY",
"FUSE", "FUSS", "GAFF", "GAGE", "GAIL", "GAIN", "GAIT", "GALA",
"GALE", "GALL", "GALT", "GAME", "GANG", "GARB", "GARY", "GASH",
"GATE", "GAUL", "GAUR", "GAVE", "GAWK", "GEAR", "GELD", "GENE",
"GENT", "GERM", "GETS", "GIBE", "GIFT", "GILD", "GILL", "GILT",
"GINA", "GIRD", "GIRL", "GIST", "GIVE", "GLAD", "GLEE", "GLEN",
"GLIB", "GLOB", "GLOM", "GLOW", "GLUE", "GLUM", "GLUT", "GOAD",
"GOAL", "GOAT", "GOER", "GOES", "GOLD", "GOLF", "GONE", "GONG",
"GOOD", "GOOF", "GORE", "GORY", "GOSH", "GOUT", "GOWN", "GRAB",
"GRAD", "GRAY", "GREG", "GREW", "GREY", "GRID", "GRIM", "GRIN",
"GRIT", "GROW", "GRUB", "GULF", "GULL", "GUNK", "GURU", "GUSH",
"GUST", "GWEN", "GWYN", "HAAG", "HAAS", "HACK", "HAIL", "HAIR",
"HALE", "HALF", "HALL", "HALO", "HALT", "HAND", "HANG", "HANK",
"HANS", "HARD", "HARK", "HARM", "HART", "HASH", "HAST", "HATE",
"HATH", "HAUL", "HAVE", "HAWK", "HAYS", "HEAD", "HEAL", "HEAR",
"HEAT", "HEBE", "HECK", "HEED", "HEEL", "HEFT", "HELD", "HELL",
"HELM", "HERB", "HERD", "HERE", "HERO", "HERS", "HESS", "HEWN",
"HICK", "HIDE", "HIGH", "HIKE", "HILL", "HILT", "HIND", "HINT",
"HIRE", "HISS", "HIVE", "HOBO", "HOCK", "HOFF", "HOLD", "HOLE",
"HOLM", "HOLT", "HOME", "HONE", "HONK", "HOOD", "HOOF", "HOOK",
"HOOT", "HORN", "HOSE", "HOST", "HOUR", "HOVE", "HOWE", "HOWL",
"HOYT", "HUCK", "HUED", "HUFF", "HUGE", "HUGH", "HUGO", "HULK",
"HULL", "HUNK", "HUNT", "HURD", "HURL", "HURT", "HUSH", "HYDE",
"HYMN", "IBIS", "ICON", "IDEA", "IDLE", "IFFY", "INCA", "INCH",
"INTO", "IONS", "IOTA", "IOWA", "IRIS", "IRMA", "IRON", "ISLE",
"ITCH", "ITEM", "IVAN", "JACK", "JADE", "JAIL", "JAKE", "JANE",
"JAVA", "JEAN", "JEFF", "JERK", "JESS", "JEST", "JIBE", "JILL",
"JILT", "JIVE", "JOAN", "JOBS", "JOCK", "JOEL", "JOEY", "JOHN",
"JOIN", "JOKE", "JOLT", "JOVE", "JUDD", "JUDE", "JUDO", "JUDY",
"JUJU", "JUKE", "JULY", "JUNE", "JUNK", "JUNO", "JURY", "JUST",
"JUTE", "KAHN", "KALE", "KANE", "KANT", "KARL", "KATE", "KEEL",
"KEEN", "KENO", "KENT", "KERN", "KERR", "KEYS", "KICK", "KILL",
"KIND", "KING", "KIRK", "KISS", "KITE", "KLAN", "KNEE", "KNEW",
"KNIT", "KNOB", "KNOT", "KNOW", "KOCH", "KONG", "KUDO", "KURD",
"KURT", "KYLE", "LACE", "LACK", "LACY", "LADY", "LAID", "LAIN",
"LAIR", "LAKE", "LAMB", "LAME", "LAND", "LANE", "LANG", "LARD",
"LARK", "LASS", "LAST", "LATE", "LAUD", "LAVA", "LAWN", "LAWS",
"LAYS", "LEAD", "LEAF", "LEAK", "LEAN", "LEAR", "LEEK", "LEER",
"LEFT", "LEND", "LENS", "LENT", "LEON", "LESK", "LESS", "LEST",
"LETS", "LIAR", "LICE", "LICK", "LIED", "LIEN", "LIES", "LIEU",
"LIFE", "LIFT", "LIKE", "LILA", "LILT", "LILY", "LIMA", "LIMB",
"LIME", "LIND", "LINE", "LINK", "LINT", "LION", "LISA", "LIST",
"LIVE", "LOAD", "LOAF", "LOAM", "LOAN", "LOCK", "LOFT", "LOGE",
"LOIS", "LOLA", "LONE", "LONG", "LOOK", "LOON", "LOOT", "LORD",
"LORE", "LOSE", "LOSS", "LOST", "LOUD", "LOVE", "LOWE", "LUCK",
"LUCY", "LUGE", "LUKE", "LULU", "LUND", "LUNG", "LURA", "LURE",
"LURK", "LUSH", "LUST", "LYLE", "LYNN", "LYON", "LYRA", "MACE",
"MADE", "MAGI", "MAID", "MAIL", "MAIN", "MAKE", "MALE", "MALI",
"MALL", "MALT", "MANA", "MANN", "MANY", "MARC", "MARE", "MARK",
"MARS", "MART", "MARY", "MASH", "MASK", "MASS", "MAST", "MATE",
"MATH", "MAUL", "MAYO", "MEAD", "MEAL", "MEAN", "MEAT", "MEEK",
"MEET", "MELD", "MELT", "MEMO", "MEND", "MENU", "MERT", "MESH",
"MESS", "MICE", "MIKE", "MILD", "MILE", "MILK", "MILL", "MILT",
"MIMI", "MIND", "MINE", "MINI", "MINK", "MINT", "MIRE", "MISS",
"MIST", "MITE", "MITT", "MOAN", "MOAT", "MOCK", "MODE", "MOLD",
"MOLE", "MOLL", "MOLT", "MONA", "MONK", "MONT", "MOOD", "MOON",
"MOOR", "MOOT", "MORE", "MORN", "MORT", "MOSS", "MOST", "MOTH",
"MOVE", "MUCH", "MUCK", "MUDD", "MUFF", "MULE", "MULL", "MURK",
"MUSH", "MUST", "MUTE", "MUTT", "MYRA", "MYTH", "NAGY", "NAIL",
"NAIR", "NAME", "NARY", "NASH", "NAVE", "NAVY", "NEAL", "NEAR",
"NEAT", "NECK", "NEED", "NEIL", "NELL", "NEON", "NERO", "NESS",
"NEST", "NEWS", "NEWT", "NIBS", "NICE", "NICK", "NILE", "NINA",
"NINE", "NOAH", "NODE", "NOEL", "NOLL", "NONE", "NOOK", "NOON",
"NORM", "NOSE", "NOTE", "NOUN", "NOVA", "NUDE", "NULL", "NUMB",
"OATH", "OBEY", "OBOE", "ODIN", "OHIO", "OILY", "OINT", "OKAY",
"OLAF", "OLDY", "OLGA", "OLIN", "OMAN", "OMEN", "OMIT", "ONCE",
"ONES", "ONLY", "ONTO", "ONUS", "ORAL", "ORGY", "OSLO", "OTIS",
"OTTO", "OUCH", "OUST", "OUTS", "OVAL", "OVEN", "OVER", "OWLY",
"OWNS", "QUAD", "QUIT", "QUOD", "RACE", "RACK", "RACY", "RAFT",
"RAGE", "RAID", "RAIL", "RAIN", "RAKE", "RANK", "RANT", "RARE",
"RASH", "RATE", "RAVE", "RAYS", "READ", "REAL", "REAM", "REAR",
"RECK", "REED", "REEF", "REEK", "REEL", "REID", "REIN", "RENA",
"REND", "RENT", "REST", "RICE", "RICH", "RICK", "RIDE", "RIFT",
"RILL", "RIME", "RING", "RINK", "RISE", "RISK", "RITE", "ROAD",
"ROAM", "ROAR", "ROBE", "ROCK", "RODE", "ROIL", "ROLL", "ROME",
"ROOD", "ROOF", "ROOK", "ROOM", "ROOT", "ROSA", "ROSE", "ROSS",
"ROSY", "ROTH", "ROUT", "ROVE", "ROWE", "ROWS", "RUBE", "RUBY",
"RUDE", "RUDY", "RUIN", "RULE", "RUNG", "RUNS", "RUNT", "RUSE",
"RUSH", "RUSK", "RUSS", "RUST", "RUTH", "SACK", "SAFE", "SAGE",
"SAID", "SAIL", "SALE", "SALK", "SALT", "SAME", "SAND", "SANE",
"SANG", "SANK", "SARA", "SAUL", "SAVE", "SAYS", "SCAN", "SCAR",
"SCAT", "SCOT", "SEAL", "SEAM", "SEAR", "SEAT", "SEED", "SEEK",
"SEEM", "SEEN", "SEES", "SELF", "SELL", "SEND", "SENT", "SETS",
"SEWN", "SHAG", "SHAM", "SHAW", "SHAY", "SHED", "SHIM", "SHIN",
"SHOD", "SHOE", "SHOT", "SHOW", "SHUN", "SHUT", "SICK", "SIDE",
"SIFT", "SIGH", "SIGN", "SILK", "SILL", "SILO", "SILT", "SINE",
"SING", "SINK", "SIRE", "SITE", "SITS", "SITU", "SKAT", "SKEW",
"SKID", "SKIM", "SKIN", "SKIT", "SLAB", "SLAM", "SLAT", "SLAY",
"SLED", "SLEW", "SLID", "SLIM", "SLIT", "SLOB", "SLOG", "SLOT",
"SLOW", "SLUG", "SLUM", "SLUR", "SMOG", "SMUG", "SNAG", "SNOB",
"SNOW", "SNUB", "SNUG", "SOAK", "SOAR", "SOCK", "SODA", "SOFA",
"SOFT", "SOIL", "SOLD", "SOME", "SONG", "SOON", "SOOT", "SORE",
"SORT", "SOUL", "SOUR", "SOWN", "STAB", "STAG", "STAN", "STAR",
"STAY", "STEM", "STEW", "STIR", "STOW", "STUB", "STUN", "SUCH",
"SUDS", "SUIT", "SULK", "SUMS", "SUNG", "SUNK", "SURE", "SURF",
"SWAB", "SWAG", "SWAM", "SWAN", "SWAT", "SWAY", "SWIM", "SWUM",
"TACK", "TACT", "TAIL", "TAKE", "TALE", "TALK", "TALL", "TANK",
"TASK", "TATE", "TAUT", "TEAL", "TEAM", "TEAR", "TECH", "TEEM",
"TEEN", "TEET", "TELL", "TEND", "TENT", "TERM", "TERN", "TESS",
"TEST", "THAN", "THAT", "THEE", "THEM", "THEN", "THEY", "THIN",
"THIS", "THUD", "THUG", "TICK", "TIDE", "TIDY", "TIED", "TIER",
"TILE", "TILL", "TILT", "TIME", "TINA", "TINE", "TINT", "TINY",
"TIRE", "TOAD", "TOGO", "TOIL", "TOLD", "TOLL", "TONE", "TONG",
"TONY", "TOOK", "TOOL", "TOOT", "TORE", "TORN", "TOTE", "TOUR",
"TOUT", "TOWN", "TRAG", "TRAM", "TRAY", "TREE", "TREK", "TRIG",
"TRIM", "TRIO", "TROD", "TROT", "TROY", "TRUE", "TUBA", "TUBE",
"TUCK", "TUFT", "TUNA", "TUNE", "TUNG", "TURF", "TURN", "TUSK",
"TWIG", "TWIN", "TWIT", "ULAN", "UNIT", "URGE", "USED", "USER",
"USES", "UTAH", "VAIL", "VAIN", "VALE", "VARY", "VASE", "VAST",
"VEAL", "VEDA", "VEIL", "VEIN", "VEND", "VENT", "VERB", "VERY",
"VETO", "VICE", "VIEW", "VINE", "VISE", "VOID", "VOLT", "VOTE",
"WACK", "WADE", "WAGE", "WAIL", "WAIT", "WAKE", "WALE", "WALK",
"WALL", "WALT", "WAND", "WANE", "WANG", "WANT", "WARD", "WARM",
"WARN", "WART", "WASH", "WAST", "WATS", "WATT", "WAVE", "WAVY",
"WAYS", "WEAK", "WEAL", "WEAN", "WEAR", "WEED", "WEEK", "WEIR",
"WELD", "WELL", "WELT", "WENT", "WERE", "WERT", "WEST", "WHAM",
"WHAT", "WHEE", "WHEN", "WHET", "WHOA", "WHOM", "WICK", "WIFE",
"WILD", "WILL", "WIND", "WINE", "WING", "WINK", "WINO", "WIRE",
"WISE", "WISH", "WITH", "WOLF", "WONT", "WOOD", "WOOL", "WORD",
"WORE", "WORK", "WORM", "WORN", "WOVE", "WRIT", "WYNN", "YALE",
"YANG", "YANK", "YARD", "YARN", "YAWL", "YAWN", "YEAH", "YEAR",
"YELL", "YOGA", "YOKE" ]
if __name__=='__main__':
data = [('EB33F77EE73D4053', 'TIDE ITCH SLOW REIN RULE MOT'),
('CCAC2AED591056BE4F90FD441C534766',
'RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE'),
('EFF81F9BFBC65350920CDD7416DE8009',
'TROD MUTE TAIL WARM CHAR KONG HAAG CITY BORE O TEAL AWL')
]
for key, words in data:
print('Trying key', key)
key=binascii.a2b_hex(key)
w2=key_to_english(key)
if w2!=words:
print('key_to_english fails on key', repr(key), ', producing', str(w2))
k2=english_to_key(words)
if k2!=key:
print('english_to_key fails on key', repr(key), ', producing', repr(k2))
| apache-2.0 | -2,186,938,277,706,024,700 | 57.688047 | 84 | 0.479185 | false |
tp-openstack-m2dl/TP-MapReduce | python/mapreduce.py | 1 | 1232 | from multiprocessing import Pool
"""
A generator function for chopping up a given list into chunks of
length n.
"""
def chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
def MapReduce(Map, Reduce, text):
l = min(len(text),8)
# Fragment the string data into 8 chunks
partitioned_text = list(chunks(text, len(text) / l))
# Build a pool of l processes
pool = Pool(processes=l,)
# Generate count tuples for title-cased tokens
single_count_tuples = pool.map(Map, partitioned_text)
# Organize the count tuples; lists of tuples by token key
token_to_tuples = Partition(single_count_tuples)
# Collapse the lists of tuples into total term frequencies
term_frequencies = pool.map(Reduce, token_to_tuples.items())
return term_frequencies
"""
Group the sublists of (token, 1) pairs into a term-frequency-list
map, so that the Reduce operation later can work on sorted
term counts. The returned result is a dictionary with the structure
{token : [(token, 1), ...] .. }
"""
def Partition(L):
tf = {}
for sublist in L:
for p in sublist:
# Append the tuple to the list in the map
try:
tf[p[0]].append (p)
except KeyError:
tf[p[0]] = [p]
return tf
| apache-2.0 | -7,623,530,765,930,327,000 | 24.142857 | 67 | 0.673701 | false |
pitch-sands/i-MPI | flask/Lib/site-packages/openid/message.py | 146 | 21562 | """Extension argument processing code
"""
__all__ = ['Message', 'NamespaceMap', 'no_default', 'registerNamespaceAlias',
'OPENID_NS', 'BARE_NS', 'OPENID1_NS', 'OPENID2_NS', 'SREG_URI',
'IDENTIFIER_SELECT']
import copy
import warnings
import urllib
from openid import oidutil
from openid import kvform
try:
ElementTree = oidutil.importElementTree()
except ImportError:
# No elementtree found, so give up, but don't fail to import,
# since we have fallbacks.
ElementTree = None
# This doesn't REALLY belong here, but where is better?
IDENTIFIER_SELECT = 'http://specs.openid.net/auth/2.0/identifier_select'
# URI for Simple Registration extension, the only commonly deployed
# OpenID 1.x extension, and so a special case
SREG_URI = 'http://openid.net/sreg/1.0'
# The OpenID 1.X namespace URI
OPENID1_NS = 'http://openid.net/signon/1.0'
THE_OTHER_OPENID1_NS = 'http://openid.net/signon/1.1'
OPENID1_NAMESPACES = OPENID1_NS, THE_OTHER_OPENID1_NS
# The OpenID 2.0 namespace URI
OPENID2_NS = 'http://specs.openid.net/auth/2.0'
# The namespace consisting of pairs with keys that are prefixed with
# "openid." but not in another namespace.
NULL_NAMESPACE = oidutil.Symbol('Null namespace')
# The null namespace, when it is an allowed OpenID namespace
OPENID_NS = oidutil.Symbol('OpenID namespace')
# The top-level namespace, excluding all pairs with keys that start
# with "openid."
BARE_NS = oidutil.Symbol('Bare namespace')
# Limit, in bytes, of identity provider and return_to URLs, including
# response payload. See OpenID 1.1 specification, Appendix D.
OPENID1_URL_LIMIT = 2047
# All OpenID protocol fields. Used to check namespace aliases.
OPENID_PROTOCOL_FIELDS = [
'ns', 'mode', 'error', 'return_to', 'contact', 'reference',
'signed', 'assoc_type', 'session_type', 'dh_modulus', 'dh_gen',
'dh_consumer_public', 'claimed_id', 'identity', 'realm',
'invalidate_handle', 'op_endpoint', 'response_nonce', 'sig',
'assoc_handle', 'trust_root', 'openid',
]
class UndefinedOpenIDNamespace(ValueError):
"""Raised if the generic OpenID namespace is accessed when there
is no OpenID namespace set for this message."""
class InvalidOpenIDNamespace(ValueError):
"""Raised if openid.ns is not a recognized value.
For recognized values, see L{Message.allowed_openid_namespaces}
"""
def __str__(self):
s = "Invalid OpenID Namespace"
if self.args:
s += " %r" % (self.args[0],)
return s
# Sentinel used for Message implementation to indicate that getArg
# should raise an exception instead of returning a default.
no_default = object()
# Global namespace / alias registration map. See
# registerNamespaceAlias.
registered_aliases = {}
class NamespaceAliasRegistrationError(Exception):
"""
Raised when an alias or namespace URI has already been registered.
"""
pass
def registerNamespaceAlias(namespace_uri, alias):
"""
Registers a (namespace URI, alias) mapping in a global namespace
alias map. Raises NamespaceAliasRegistrationError if either the
namespace URI or alias has already been registered with a
different value. This function is required if you want to use a
namespace with an OpenID 1 message.
"""
global registered_aliases
if registered_aliases.get(alias) == namespace_uri:
return
if namespace_uri in registered_aliases.values():
raise NamespaceAliasRegistrationError, \
'Namespace uri %r already registered' % (namespace_uri,)
if alias in registered_aliases:
raise NamespaceAliasRegistrationError, \
'Alias %r already registered' % (alias,)
registered_aliases[alias] = namespace_uri
class Message(object):
"""
In the implementation of this object, None represents the global
namespace as well as a namespace with no key.
@cvar namespaces: A dictionary specifying specific
namespace-URI to alias mappings that should be used when
generating namespace aliases.
@ivar ns_args: two-level dictionary of the values in this message,
grouped by namespace URI. The first level is the namespace
URI.
"""
allowed_openid_namespaces = [OPENID1_NS, THE_OTHER_OPENID1_NS, OPENID2_NS]
def __init__(self, openid_namespace=None):
"""Create an empty Message.
@raises InvalidOpenIDNamespace: if openid_namespace is not in
L{Message.allowed_openid_namespaces}
"""
self.args = {}
self.namespaces = NamespaceMap()
if openid_namespace is None:
self._openid_ns_uri = None
else:
implicit = openid_namespace in OPENID1_NAMESPACES
self.setOpenIDNamespace(openid_namespace, implicit)
def fromPostArgs(cls, args):
"""Construct a Message containing a set of POST arguments.
"""
self = cls()
# Partition into "openid." args and bare args
openid_args = {}
for key, value in args.items():
if isinstance(value, list):
raise TypeError("query dict must have one value for each key, "
"not lists of values. Query is %r" % (args,))
try:
prefix, rest = key.split('.', 1)
except ValueError:
prefix = None
if prefix != 'openid':
self.args[(BARE_NS, key)] = value
else:
openid_args[rest] = value
self._fromOpenIDArgs(openid_args)
return self
fromPostArgs = classmethod(fromPostArgs)
def fromOpenIDArgs(cls, openid_args):
"""Construct a Message from a parsed KVForm message.
@raises InvalidOpenIDNamespace: if openid.ns is not in
L{Message.allowed_openid_namespaces}
"""
self = cls()
self._fromOpenIDArgs(openid_args)
return self
fromOpenIDArgs = classmethod(fromOpenIDArgs)
def _fromOpenIDArgs(self, openid_args):
ns_args = []
# Resolve namespaces
for rest, value in openid_args.iteritems():
try:
ns_alias, ns_key = rest.split('.', 1)
except ValueError:
ns_alias = NULL_NAMESPACE
ns_key = rest
if ns_alias == 'ns':
self.namespaces.addAlias(value, ns_key)
elif ns_alias == NULL_NAMESPACE and ns_key == 'ns':
# null namespace
self.setOpenIDNamespace(value, False)
else:
ns_args.append((ns_alias, ns_key, value))
# Implicitly set an OpenID namespace definition (OpenID 1)
if not self.getOpenIDNamespace():
self.setOpenIDNamespace(OPENID1_NS, True)
# Actually put the pairs into the appropriate namespaces
for (ns_alias, ns_key, value) in ns_args:
ns_uri = self.namespaces.getNamespaceURI(ns_alias)
if ns_uri is None:
# we found a namespaced arg without a namespace URI defined
ns_uri = self._getDefaultNamespace(ns_alias)
if ns_uri is None:
ns_uri = self.getOpenIDNamespace()
ns_key = '%s.%s' % (ns_alias, ns_key)
else:
self.namespaces.addAlias(ns_uri, ns_alias, implicit=True)
self.setArg(ns_uri, ns_key, value)
def _getDefaultNamespace(self, mystery_alias):
"""OpenID 1 compatibility: look for a default namespace URI to
use for this alias."""
global registered_aliases
# Only try to map an alias to a default if it's an
# OpenID 1.x message.
if self.isOpenID1():
return registered_aliases.get(mystery_alias)
else:
return None
def setOpenIDNamespace(self, openid_ns_uri, implicit):
"""Set the OpenID namespace URI used in this message.
@raises InvalidOpenIDNamespace: if the namespace is not in
L{Message.allowed_openid_namespaces}
"""
if openid_ns_uri not in self.allowed_openid_namespaces:
raise InvalidOpenIDNamespace(openid_ns_uri)
self.namespaces.addAlias(openid_ns_uri, NULL_NAMESPACE, implicit)
self._openid_ns_uri = openid_ns_uri
def getOpenIDNamespace(self):
return self._openid_ns_uri
def isOpenID1(self):
return self.getOpenIDNamespace() in OPENID1_NAMESPACES
def isOpenID2(self):
return self.getOpenIDNamespace() == OPENID2_NS
def fromKVForm(cls, kvform_string):
"""Create a Message from a KVForm string"""
return cls.fromOpenIDArgs(kvform.kvToDict(kvform_string))
fromKVForm = classmethod(fromKVForm)
def copy(self):
return copy.deepcopy(self)
def toPostArgs(self):
"""Return all arguments with openid. in front of namespaced arguments.
"""
args = {}
# Add namespace definitions to the output
for ns_uri, alias in self.namespaces.iteritems():
if self.namespaces.isImplicit(ns_uri):
continue
if alias == NULL_NAMESPACE:
ns_key = 'openid.ns'
else:
ns_key = 'openid.ns.' + alias
args[ns_key] = ns_uri
for (ns_uri, ns_key), value in self.args.iteritems():
key = self.getKey(ns_uri, ns_key)
args[key] = value.encode('UTF-8')
return args
def toArgs(self):
"""Return all namespaced arguments, failing if any
non-namespaced arguments exist."""
# FIXME - undocumented exception
post_args = self.toPostArgs()
kvargs = {}
for k, v in post_args.iteritems():
if not k.startswith('openid.'):
raise ValueError(
'This message can only be encoded as a POST, because it '
'contains arguments that are not prefixed with "openid."')
else:
kvargs[k[7:]] = v
return kvargs
def toFormMarkup(self, action_url, form_tag_attrs=None,
submit_text="Continue"):
"""Generate HTML form markup that contains the values in this
message, to be HTTP POSTed as x-www-form-urlencoded UTF-8.
@param action_url: The URL to which the form will be POSTed
@type action_url: str
@param form_tag_attrs: Dictionary of attributes to be added to
the form tag. 'accept-charset' and 'enctype' have defaults
that can be overridden. If a value is supplied for
'action' or 'method', it will be replaced.
@type form_tag_attrs: {unicode: unicode}
@param submit_text: The text that will appear on the submit
button for this form.
@type submit_text: unicode
@returns: A string containing (X)HTML markup for a form that
encodes the values in this Message object.
@rtype: str or unicode
"""
if ElementTree is None:
raise RuntimeError('This function requires ElementTree.')
assert action_url is not None
form = ElementTree.Element('form')
if form_tag_attrs:
for name, attr in form_tag_attrs.iteritems():
form.attrib[name] = attr
form.attrib['action'] = action_url
form.attrib['method'] = 'post'
form.attrib['accept-charset'] = 'UTF-8'
form.attrib['enctype'] = 'application/x-www-form-urlencoded'
for name, value in self.toPostArgs().iteritems():
attrs = {'type': 'hidden',
'name': name,
'value': value}
form.append(ElementTree.Element('input', attrs))
submit = ElementTree.Element(
'input', {'type':'submit', 'value':submit_text})
form.append(submit)
return ElementTree.tostring(form)
def toURL(self, base_url):
"""Generate a GET URL with the parameters in this message
attached as query parameters."""
return oidutil.appendArgs(base_url, self.toPostArgs())
def toKVForm(self):
"""Generate a KVForm string that contains the parameters in
this message. This will fail if the message contains arguments
outside of the 'openid.' prefix.
"""
return kvform.dictToKV(self.toArgs())
def toURLEncoded(self):
"""Generate an x-www-urlencoded string"""
args = self.toPostArgs().items()
args.sort()
return urllib.urlencode(args)
def _fixNS(self, namespace):
"""Convert an input value into the internally used values of
this object
@param namespace: The string or constant to convert
@type namespace: str or unicode or BARE_NS or OPENID_NS
"""
if namespace == OPENID_NS:
if self._openid_ns_uri is None:
raise UndefinedOpenIDNamespace('OpenID namespace not set')
else:
namespace = self._openid_ns_uri
if namespace != BARE_NS and type(namespace) not in [str, unicode]:
raise TypeError(
"Namespace must be BARE_NS, OPENID_NS or a string. got %r"
% (namespace,))
if namespace != BARE_NS and ':' not in namespace:
fmt = 'OpenID 2.0 namespace identifiers SHOULD be URIs. Got %r'
warnings.warn(fmt % (namespace,), DeprecationWarning)
if namespace == 'sreg':
fmt = 'Using %r instead of "sreg" as namespace'
warnings.warn(fmt % (SREG_URI,), DeprecationWarning,)
return SREG_URI
return namespace
def hasKey(self, namespace, ns_key):
namespace = self._fixNS(namespace)
return (namespace, ns_key) in self.args
def getKey(self, namespace, ns_key):
"""Get the key for a particular namespaced argument"""
namespace = self._fixNS(namespace)
if namespace == BARE_NS:
return ns_key
ns_alias = self.namespaces.getAlias(namespace)
# No alias is defined, so no key can exist
if ns_alias is None:
return None
if ns_alias == NULL_NAMESPACE:
tail = ns_key
else:
tail = '%s.%s' % (ns_alias, ns_key)
return 'openid.' + tail
def getArg(self, namespace, key, default=None):
"""Get a value for a namespaced key.
@param namespace: The namespace in the message for this key
@type namespace: str
@param key: The key to get within this namespace
@type key: str
@param default: The value to use if this key is absent from
this message. Using the special value
openid.message.no_default will result in this method
raising a KeyError instead of returning the default.
@rtype: str or the type of default
@raises KeyError: if default is no_default
@raises UndefinedOpenIDNamespace: if the message has not yet
had an OpenID namespace set
"""
namespace = self._fixNS(namespace)
args_key = (namespace, key)
try:
return self.args[args_key]
except KeyError:
if default is no_default:
raise KeyError((namespace, key))
else:
return default
def getArgs(self, namespace):
"""Get the arguments that are defined for this namespace URI
@returns: mapping from namespaced keys to values
@returntype: dict
"""
namespace = self._fixNS(namespace)
return dict([
(ns_key, value)
for ((pair_ns, ns_key), value)
in self.args.iteritems()
if pair_ns == namespace
])
def updateArgs(self, namespace, updates):
"""Set multiple key/value pairs in one call
@param updates: The values to set
@type updates: {unicode:unicode}
"""
namespace = self._fixNS(namespace)
for k, v in updates.iteritems():
self.setArg(namespace, k, v)
def setArg(self, namespace, key, value):
"""Set a single argument in this namespace"""
assert key is not None
assert value is not None
namespace = self._fixNS(namespace)
self.args[(namespace, key)] = value
if not (namespace is BARE_NS):
self.namespaces.add(namespace)
def delArg(self, namespace, key):
namespace = self._fixNS(namespace)
del self.args[(namespace, key)]
def __repr__(self):
return "<%s.%s %r>" % (self.__class__.__module__,
self.__class__.__name__,
self.args)
def __eq__(self, other):
return self.args == other.args
def __ne__(self, other):
return not (self == other)
def getAliasedArg(self, aliased_key, default=None):
if aliased_key == 'ns':
return self.getOpenIDNamespace()
if aliased_key.startswith('ns.'):
uri = self.namespaces.getNamespaceURI(aliased_key[3:])
if uri is None:
if default == no_default:
raise KeyError
else:
return default
else:
return uri
try:
alias, key = aliased_key.split('.', 1)
except ValueError:
# need more than x values to unpack
ns = None
else:
ns = self.namespaces.getNamespaceURI(alias)
if ns is None:
key = aliased_key
ns = self.getOpenIDNamespace()
return self.getArg(ns, key, default)
class NamespaceMap(object):
"""Maintains a bijective map between namespace uris and aliases.
"""
def __init__(self):
self.alias_to_namespace = {}
self.namespace_to_alias = {}
self.implicit_namespaces = []
def getAlias(self, namespace_uri):
return self.namespace_to_alias.get(namespace_uri)
def getNamespaceURI(self, alias):
return self.alias_to_namespace.get(alias)
def iterNamespaceURIs(self):
"""Return an iterator over the namespace URIs"""
return iter(self.namespace_to_alias)
def iterAliases(self):
"""Return an iterator over the aliases"""
return iter(self.alias_to_namespace)
def iteritems(self):
"""Iterate over the mapping
@returns: iterator of (namespace_uri, alias)
"""
return self.namespace_to_alias.iteritems()
def addAlias(self, namespace_uri, desired_alias, implicit=False):
"""Add an alias from this namespace URI to the desired alias
"""
# Check that desired_alias is not an openid protocol field as
# per the spec.
assert desired_alias not in OPENID_PROTOCOL_FIELDS, \
"%r is not an allowed namespace alias" % (desired_alias,)
# Check that desired_alias does not contain a period as per
# the spec.
if type(desired_alias) in [str, unicode]:
assert '.' not in desired_alias, \
"%r must not contain a dot" % (desired_alias,)
# Check that there is not a namespace already defined for
# the desired alias
current_namespace_uri = self.alias_to_namespace.get(desired_alias)
if (current_namespace_uri is not None
and current_namespace_uri != namespace_uri):
fmt = ('Cannot map %r to alias %r. '
'%r is already mapped to alias %r')
msg = fmt % (
namespace_uri,
desired_alias,
current_namespace_uri,
desired_alias)
raise KeyError(msg)
# Check that there is not already a (different) alias for
# this namespace URI
alias = self.namespace_to_alias.get(namespace_uri)
if alias is not None and alias != desired_alias:
fmt = ('Cannot map %r to alias %r. '
'It is already mapped to alias %r')
raise KeyError(fmt % (namespace_uri, desired_alias, alias))
assert (desired_alias == NULL_NAMESPACE or
type(desired_alias) in [str, unicode]), repr(desired_alias)
assert namespace_uri not in self.implicit_namespaces
self.alias_to_namespace[desired_alias] = namespace_uri
self.namespace_to_alias[namespace_uri] = desired_alias
if implicit:
self.implicit_namespaces.append(namespace_uri)
return desired_alias
def add(self, namespace_uri):
"""Add this namespace URI to the mapping, without caring what
alias it ends up with"""
# See if this namespace is already mapped to an alias
alias = self.namespace_to_alias.get(namespace_uri)
if alias is not None:
return alias
# Fall back to generating a numerical alias
i = 0
while True:
alias = 'ext' + str(i)
try:
self.addAlias(namespace_uri, alias)
except KeyError:
i += 1
else:
return alias
assert False, "Not reached"
def isDefined(self, namespace_uri):
return namespace_uri in self.namespace_to_alias
def __contains__(self, namespace_uri):
return self.isDefined(namespace_uri)
def isImplicit(self, namespace_uri):
return namespace_uri in self.implicit_namespaces
| bsd-3-clause | 7,786,178,539,857,533,000 | 33.171157 | 79 | 0.60013 | false |
edisonlz/fruit | web_project/app/content/views/city.py | 1 | 3136 | # coding=utf-8
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
import json
from app.content.models import City,ShoppingAddress
from django.db import transaction
from app.content.models import Status
@login_required
def cms_city(request):
if request.method == 'GET':
citys = City.all()
return render(request, 'city/city.html', {
'citys': citys,
})
@login_required
def cms_city_create(request):
if request.method == 'POST':
name = request.POST.get("name")
city_code = request.POST.get("city_code")
phone = request.POST.get("phone")
manager = request.POST.get("manager")
city = City()
city.name = name
city.phone = phone
city.city_code = city_code
city.manager = manager
city.status = Status.StatusOpen
city.save()
response = {'status': 'success'}
return HttpResponse(json.dumps(response), content_type="application/json")
else:
response = {'status': 'fail'}
return HttpResponse(json.dumps(response), content_type="application/json")
@login_required
def cms_city_update(request):
if request.method == 'POST':
pk = request.POST.get("pk")
name = request.POST.get("name")
city_code = request.POST.get("city_code")
phone = request.POST.get("phone")
manager = request.POST.get("manager")
city = City.objects.get(id=pk)
city.name = name
city.phone = phone
city.city_code = city_code
city.manager = manager
city.save()
response = {'status': 'success'}
return HttpResponse(json.dumps(response), content_type="application/json")
else:
pk = request.GET.get("pk")
city = City.objects.get(id=pk)
return render(request, 'city/edit_city.html', {
'city': city,
})
@login_required
def delete(request):
if request.method == 'POST':
pk = request.POST.get("id")
city = City.objects.get(id=pk)
count = ShoppingAddress.objects.filter(city=city).count();
if count > 0:
response = {'status': 'fail', "message":"城市有关联记录不能删除!"}
return HttpResponse(json.dumps(response), content_type="application/json")
city.is_delete = True
city.save()
response = {'status': 'success'}
return HttpResponse(json.dumps(response), content_type="application/json")
else:
response = {'status': 'fail'}
return HttpResponse(json.dumps(response), content_type="application/json")
@login_required
def update_status(request):
pk = request.POST.get("pk")
value = int(request.POST.get("value")[0])
city = City.objects.get(id=pk)
city.state = value
city.save()
response = {'status': 'success'}
return HttpResponse(json.dumps(response), content_type="application/json")
| apache-2.0 | -2,391,039,635,734,429,700 | 26.078261 | 86 | 0.62203 | false |
cbanta/pjproject | tests/pjsua/scripts-recvfrom/208_reg_good_retry_nonce_ok.py | 42 | 1029 | # $Id$
import inc_sip as sip
import inc_sdp as sdp
pjsua = "--null-audio --id=sip:CLIENT --registrar sip:127.0.0.1:$PORT " + \
"--realm=python --user=username --password=password " + \
"--auto-update-nat=0"
req1 = sip.RecvfromTransaction("Initial request", 401,
include=["REGISTER sip"],
exclude=["Authorization"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"1\""]
)
req2 = sip.RecvfromTransaction("REGISTER first retry", 401,
include=["REGISTER sip", "Authorization", "nonce=\"1\""],
exclude=["Authorization:[\\s\\S]+Authorization:"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"2\", stale=true"]
)
req3 = sip.RecvfromTransaction("REGISTER retry with new nonce", 200,
include=["REGISTER sip", "Authorization", "nonce=\"2\""],
exclude=["Authorization:[\\s\\S]+Authorization:"],
expect="registration success"
)
recvfrom_cfg = sip.RecvfromCfg("Authentication okay after retry with new nonce",
pjsua, [req1, req2, req3])
| gpl-2.0 | -4,093,929,208,641,515 | 34.482759 | 83 | 0.649174 | false |
acshan/odoo | addons/sale_layout/models/sale_layout.py | 180 | 5037 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from itertools import groupby
def grouplines(self, ordered_lines, sortkey):
"""Return lines from a specified invoice or sale order grouped by category"""
grouped_lines = []
for key, valuesiter in groupby(ordered_lines, sortkey):
group = {}
group['category'] = key
group['lines'] = list(v for v in valuesiter)
if 'subtotal' in key and key.subtotal is True:
group['subtotal'] = sum(line.price_subtotal for line in group['lines'])
grouped_lines.append(group)
return grouped_lines
class SaleLayoutCategory(osv.Model):
_name = 'sale_layout.category'
_order = 'sequence, id'
_columns = {
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', required=True),
'subtotal': fields.boolean('Add subtotal'),
'separator': fields.boolean('Add separator'),
'pagebreak': fields.boolean('Add pagebreak')
}
_defaults = {
'subtotal': True,
'separator': True,
'pagebreak': False,
'sequence': 10
}
class AccountInvoice(osv.Model):
_inherit = 'account.invoice'
def sale_layout_lines(self, cr, uid, ids, invoice_id=None, context=None):
"""
Returns invoice lines from a specified invoice ordered by
sale_layout_category sequence. Used in sale_layout module.
:Parameters:
-'invoice_id' (int): specify the concerned invoice.
"""
ordered_lines = self.browse(cr, uid, invoice_id, context=context).invoice_line
# We chose to group first by category model and, if not present, by invoice name
sortkey = lambda x: x.sale_layout_cat_id if x.sale_layout_cat_id else ''
return grouplines(self, ordered_lines, sortkey)
import openerp
class AccountInvoiceLine(osv.Model):
_inherit = 'account.invoice.line'
_order = 'invoice_id, categ_sequence, sequence, id'
sale_layout_cat_id = openerp.fields.Many2one('sale_layout.category', string='Section')
categ_sequence = openerp.fields.Integer(related='sale_layout_cat_id.sequence',
string='Layout Sequence', store=True)
_defaults = {
'categ_sequence': 0
}
class SaleOrder(osv.Model):
_inherit = 'sale.order'
def sale_layout_lines(self, cr, uid, ids, order_id=None, context=None):
"""
Returns order lines from a specified sale ordered by
sale_layout_category sequence. Used in sale_layout module.
:Parameters:
-'order_id' (int): specify the concerned sale order.
"""
ordered_lines = self.browse(cr, uid, order_id, context=context).order_line
sortkey = lambda x: x.sale_layout_cat_id if x.sale_layout_cat_id else ''
return grouplines(self, ordered_lines, sortkey)
class SaleOrderLine(osv.Model):
_inherit = 'sale.order.line'
_columns = {
'sale_layout_cat_id': fields.many2one('sale_layout.category',
string='Section'),
'categ_sequence': fields.related('sale_layout_cat_id',
'sequence', type='integer',
string='Layout Sequence', store=True)
# Store is intentionally set in order to keep the "historic" order.
}
_defaults = {
'categ_sequence': 0
}
_order = 'order_id, categ_sequence, sale_layout_cat_id, sequence, id'
def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):
"""Save the layout when converting to an invoice line."""
invoice_vals = super(SaleOrderLine, self)._prepare_order_line_invoice_line(cr, uid, line, account_id=account_id, context=context)
if line.sale_layout_cat_id:
invoice_vals['sale_layout_cat_id'] = line.sale_layout_cat_id.id
if line.categ_sequence:
invoice_vals['categ_sequence'] = line.categ_sequence
return invoice_vals
| agpl-3.0 | 3,736,219,365,102,525,000 | 36.87218 | 137 | 0.613857 | false |
blossomica/airmozilla | airmozilla/main/migrations/0002_auto_20150903_1343.py | 9 | 9951 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
('uploads', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='suggestedevent',
name='upload',
field=models.ForeignKey(related_name='upload', to='uploads.Upload', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='suggestedevent',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='recruitmentmessage',
name='modified_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='picture',
name='event',
field=models.ForeignKey(related_name='picture_event', to='main.Event', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='picture',
name='modified_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='locationdefaultenvironment',
name='location',
field=models.ForeignKey(to='main.Location'),
preserve_default=True,
),
migrations.AddField(
model_name='locationdefaultenvironment',
name='template',
field=models.ForeignKey(to='main.Template'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='locationdefaultenvironment',
unique_together=set([('location', 'privacy', 'template')]),
),
migrations.AddField(
model_name='location',
name='regions',
field=models.ManyToManyField(to='main.Region', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventtweet',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventtweet',
name='event',
field=models.ForeignKey(to='main.Event'),
preserve_default=True,
),
migrations.AddField(
model_name='eventrevision',
name='channels',
field=models.ManyToManyField(to='main.Channel'),
preserve_default=True,
),
migrations.AddField(
model_name='eventrevision',
name='event',
field=models.ForeignKey(to='main.Event'),
preserve_default=True,
),
migrations.AddField(
model_name='eventrevision',
name='picture',
field=models.ForeignKey(blank=True, to='main.Picture', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventrevision',
name='recruitmentmessage',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to='main.RecruitmentMessage', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventrevision',
name='tags',
field=models.ManyToManyField(to='main.Tag', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventrevision',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventoldslug',
name='event',
field=models.ForeignKey(to='main.Event'),
preserve_default=True,
),
migrations.AddField(
model_name='eventlivehits',
name='event',
field=models.ForeignKey(to='main.Event', unique=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventhitstats',
name='event',
field=models.ForeignKey(to='main.Event', unique=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventemail',
name='event',
field=models.ForeignKey(to='main.Event'),
preserve_default=True,
),
migrations.AddField(
model_name='eventemail',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='eventassignment',
name='event',
field=models.ForeignKey(to='main.Event', unique=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventassignment',
name='locations',
field=models.ManyToManyField(to='main.Location'),
preserve_default=True,
),
migrations.AddField(
model_name='eventassignment',
name='users',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='channels',
field=models.ManyToManyField(to='main.Channel'),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='creator',
field=models.ForeignKey(related_name='creator', on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='location',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='main.Location', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='modified_user',
field=models.ForeignKey(related_name='modified_user', on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='picture',
field=models.ForeignKey(related_name='event_picture', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='main.Picture', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='recruitmentmessage',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to='main.RecruitmentMessage', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='tags',
field=models.ManyToManyField(to='main.Tag', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='template',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='main.Template', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='topics',
field=models.ManyToManyField(to='main.Topic'),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='upload',
field=models.ForeignKey(related_name='event_upload', on_delete=django.db.models.deletion.SET_NULL, to='uploads.Upload', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='curatedgroup',
name='event',
field=models.ForeignKey(to='main.Event'),
preserve_default=True,
),
migrations.AddField(
model_name='chapter',
name='event',
field=models.ForeignKey(to='main.Event'),
preserve_default=True,
),
migrations.AddField(
model_name='chapter',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='channel',
name='parent',
field=models.ForeignKey(to='main.Channel', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='approval',
name='event',
field=models.ForeignKey(to='main.Event'),
preserve_default=True,
),
migrations.AddField(
model_name='approval',
name='group',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='auth.Group', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='approval',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
]
| bsd-3-clause | 2,147,964,182,195,912,000 | 35.992565 | 164 | 0.559843 | false |
veltzer/demos-linux | config/project.py | 1 | 4910 | import datetime
import config.general
project_github_username='veltzer'
project_name='demos-linux'
project_website='https://{project_github_username}.github.io/{project_name}'.format(**locals())
project_website_source='https://github.com/{project_github_username}/{project_name}'.format(**locals())
project_website_git='git://github.com/{project_github_username}/{project_name}.git'.format(**locals())
project_long_description='{project_name} is a project to demo and explore the Linux user space C/C++ API'.format(**locals())
project_year_started='2011'
project_description='''This project is a source code repository for instructors or expert programmers
who want to explore the Linux C/C++ API.
It has about 1000 examples (as of 1/2020) I found are useful in explaining the Linux API.
The idea is to provide a good coverage of all major features and to resolve disputes
about exactly how a certain API works.
You can find the project at {project_website}
Topics covered by the examples
------------------------------
* Multi-threading
* Multi-processing
* Locking
* Calling system calls without C
* Performance
* Coding in assembly
* Various compiler directives
* Many more...
Platforms supported
-------------------
Only ia64 is supported. I used to support i386 but I don't have a 32 bit
machine anymore.
Other platforms will be supported if someone is willing to do the work and submit
the patches.
Contributing
------------
This project needs help. fork, commit and request me to pull.
Just open a github account, modify and add examples, commit and ask me to pull...
A lot of the code is documented but some of it is not. More documentation would be welcome.
I would give attribution to whomever contributes.
License
-------
Code is licensed GPL3 and I hold the copyright unless explicity stolen as attributed in the source code.
I encourage people to use this source code as aid for instructing courses.
Please give me some credit if you do wind up using this package and consider dropping
me a note about the fact that you did use the package so I could feel good...
Similar projects
----------------
Michael Kerrisk, the maintainer and author of many of the Linux manual pages has a similar project he calls
TLPI (The Linux Programming Interface) of programs he used in his book of the same title. You can find
it here http://man7.org/tlpi/code/.
Using it
--------
* you need python on your machine:
try:
`python --version`
if python is missing then:
for deb based distributions (debian, ubuntu, ...) do:
`sudo apt-get install python`
for rpm based distributions (fedora, redhat, centos,...) do:
`sudo yum install python`
* clone the examples: `git clone [email protected]:veltzer/demos-linux.git`
* cd into it: `cd demos-linux`
* install the missing packages and headers needed to compile and run this project `./scripts/ubuntu_install.py`
Mind you this only works for 15.10 and will install a ton of stuff.
If you don't want this ton of installations and only want to checkout specific examples
compile the individual examples as described below.
if you are on a different Linux distribution try to get as much of these for your platform
as you can. If you really into contributing I would love a `redhat_install.py` or some such...
then use `make`
* compiling a specific example
`make src/examples/performance/cache_misser.elf`
the name of the elf binary is the same as the example source code with .elf instead of
.[c|cc].
You must be at the root of the project to issue the `make` command.
* the most important part: tweak the examples, try to prove me (my comments) wrong, have fun!
'''.format(**locals())
project_keywords=[
'linux',
'API',
'C',
'C++',
'kernel',
'userspace',
'examples',
'samples',
'demos',
]
# deb section
deb_package=False
project_copyright_years = ', '.join(
map(str, range(int(project_year_started), datetime.datetime.now().year + 1)))
if str(config.general.general_current_year) == project_year_started:
project_copyright_years = config.general.general_current_year
else:
project_copyright_years = '{0}-{1}'.format(project_year_started, config.general.general_current_year)
# project_data_files.append(templar.utils.hlp_files_under('/usr/bin', 'src/*'))
project_google_analytics_tracking_id='UA-80940105-1'
project_google_analytics_snipplet = '''<script type="text/javascript">
(function(i,s,o,g,r,a,m){{i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){{
(i[r].q=i[r].q||[]).push(arguments)}},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
}})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', '{0}', 'auto');
ga('send', 'pageview');
</script>'''.format(project_google_analytics_tracking_id)
| gpl-3.0 | -7,562,981,995,973,148,000 | 41.327586 | 124 | 0.718126 | false |
mrquim/repository.mrquim | repo/script.module.covenant/lib/resources/lib/sources/en/mzmovies.py | 7 | 6851 | # -*- coding: utf-8 -*-
'''
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib, urlparse, re
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
from resources.lib.modules import directstream
from resources.lib.modules import cfscrape
class source:
def __init__(self):
self.priority = 0
self.language = ['en']
self.domains = ['mehlizmovies.com']
self.base_link = 'https://www.mehlizmovies.com/'
self.search_link = '?s=%s'
self.search_link2 = '/search/%s/feed/rss2/'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(
aliases),year)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = self.__search([localtvshowtitle] + source_utils.aliases_to_array(aliases), year)
if not url and tvshowtitle != localtvshowtitle: url = self.__search(
[tvshowtitle] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
url = urlparse.urljoin(self.base_link, url)
scraper = cfscrape.create_scraper()
data = scraper.get(url).content
data = client.parseDOM(data, 'ul', attrs={'class': 'episodios'})
links = client.parseDOM(data, 'div', attrs={'class': 'episodiotitle'})
sp = zip(client.parseDOM(data, 'div', attrs={'class': 'numerando'}), client.parseDOM(links, 'a', ret='href'))
Sea_Epi = '%dx%d'% (int(season), int(episode))
for i in sp:
sep = i[0]
if sep == Sea_Epi:
url = source_utils.strip_domain(i[1])
return url
except:
return
def __search(self, titles, year):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.getsearch(titles[0])))
query = urlparse.urljoin(self.base_link, query)
t = cleantitle.get(titles[0])
scraper = cfscrape.create_scraper()
data = scraper.get(query).content
#data = client.request(query, referer=self.base_link)
data = client.parseDOM(data, 'div', attrs={'class': 'result-item'})
r = dom_parser.parse_dom(data, 'div', attrs={'class': 'title'})
r = zip(dom_parser.parse_dom(r, 'a'), dom_parser.parse_dom(data, 'span', attrs={'class': 'year'}))
url = []
for i in range(len(r)):
title = cleantitle.get(r[i][0][1])
title = re.sub('(\d+p|4k|3d|hd|season\d+)','',title)
y = r[i][1][1]
link = r[i][0][0]['href']
if 'season' in title: continue
if t == title and y == year:
if 'season' in link:
url.append(source_utils.strip_domain(link))
print url[0]
return url[0]
else: url.append(source_utils.strip_domain(link))
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
links = self.links_found(url)
hostdict = hostDict + hostprDict
for url in links:
try:
valid, host = source_utils.is_host_valid(url, hostdict)
if 'mehliz' in url:
host = 'MZ'; direct = True; urls = (self.mz_server(url))
elif 'ok.ru' in url:
host = 'vk'; direct = True; urls = (directstream.odnoklassniki(url))
else:
direct = False; urls = [{'quality': 'SD', 'url': url}]
for x in urls:
sources.append({'source': host, 'quality': x['quality'], 'language': 'en',
'url': x['url'], 'direct': direct, 'debridonly': False})
except:
pass
return sources
except:
return sources
def links_found(self,urls):
try:
scraper = cfscrape.create_scraper()
links = []
if type(urls) is list:
for item in urls:
query = urlparse.urljoin(self.base_link, item)
r = scraper.get(query).content
data = client.parseDOM(r, 'div', attrs={'id': 'playex'})
data = client.parseDOM(data, 'div', attrs={'id': 'option-\d+'})
links += client.parseDOM(data, 'iframe', ret='src')
print links
else:
query = urlparse.urljoin(self.base_link, urls)
r = scraper.get(query).content
data = client.parseDOM(r, 'div', attrs={'id': 'playex'})
data = client.parseDOM(data, 'div', attrs={'id': 'option-\d+'})
links += client.parseDOM(data, 'iframe', ret='src')
return links
except:
return urls
def mz_server(self,url):
try:
scraper = cfscrape.create_scraper()
urls = []
data = scraper.get(url).content
data = re.findall('''file:\s*["']([^"']+)",label:\s*"(\d{3,}p)"''', data, re.DOTALL)
for url, label in data:
label = source_utils.label_to_quality(label)
if label == 'SD': continue
urls.append({'url': url, 'quality': label})
return urls
except:
return url
def resolve(self, url):
return url
| gpl-2.0 | -5,184,790,510,927,738,000 | 36.233696 | 121 | 0.525763 | false |
rven/odoo | addons/account_edi_extended/models/account_move.py | 2 | 4061 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, fields, _
class AccountMove(models.Model):
_inherit = 'account.move'
edi_show_abandon_cancel_button = fields.Boolean(
compute='_compute_edi_show_abandon_cancel_button')
edi_error_message = fields.Html(compute='_compute_edi_error_message')
edi_blocking_level = fields.Selection(selection=[('info', 'Info'), ('warning', 'Warning'), ('error', 'Error')], compute='_compute_edi_error_message')
@api.depends(
'edi_document_ids',
'edi_document_ids.state',
'edi_document_ids.blocking_level',
'edi_document_ids.edi_format_id',
'edi_document_ids.edi_format_id.name')
def _compute_edi_web_services_to_process(self):
# OVERRIDE to take blocking_level into account
for move in self:
to_process = move.edi_document_ids.filtered(lambda d: d.state in ['to_send', 'to_cancel'] and d.blocking_level != 'error')
format_web_services = to_process.edi_format_id.filtered(lambda f: f._needs_web_services())
move.edi_web_services_to_process = ', '.join(f.name for f in format_web_services)
@api.depends(
'state',
'edi_document_ids.state',
'edi_document_ids.attachment_id')
def _compute_edi_show_abandon_cancel_button(self):
for move in self:
move.edi_show_abandon_cancel_button = any(doc.edi_format_id._needs_web_services()
and doc.state == 'to_cancel'
and move.is_invoice(include_receipts=True)
and doc.edi_format_id._is_required_for_invoice(move)
for doc in move.edi_document_ids)
@api.depends('edi_error_count', 'edi_document_ids.error', 'edi_document_ids.blocking_level')
def _compute_edi_error_message(self):
for move in self:
if move.edi_error_count == 0:
move.edi_error_message = None
move.edi_blocking_level = None
elif move.edi_error_count == 1:
error_doc = move.edi_document_ids.filtered(lambda d: d.error)
move.edi_error_message = error_doc.error
move.edi_blocking_level = error_doc.blocking_level
else:
error_levels = set([doc.blocking_level for doc in move.edi_document_ids])
if 'error' in error_levels:
move.edi_error_message = str(move.edi_error_count) + _(" Electronic invoicing error(s)")
move.edi_blocking_level = 'error'
elif 'warning' in error_levels:
move.edi_error_message = str(move.edi_error_count) + _(" Electronic invoicing warning(s)")
move.edi_blocking_level = 'warning'
else:
move.edi_error_message = str(move.edi_error_count) + _(" Electronic invoicing info(s)")
move.edi_blocking_level = 'info'
def action_retry_edi_documents_error(self):
self.edi_document_ids.write({'error': False, 'blocking_level': False})
self.action_process_edi_web_services()
def button_abandon_cancel_posted_posted_moves(self):
'''Cancel the request for cancellation of the EDI.
'''
documents = self.env['account.edi.document']
for move in self:
is_move_marked = False
for doc in move.edi_document_ids:
if doc.state == 'to_cancel' \
and move.is_invoice(include_receipts=True) \
and doc.edi_format_id._is_required_for_invoice(move):
documents |= doc
is_move_marked = True
if is_move_marked:
move.message_post(body=_("A request for cancellation of the EDI has been called off."))
documents.write({'state': 'sent'})
| agpl-3.0 | 4,525,222,045,283,930,000 | 49.135802 | 153 | 0.569072 | false |
chemelnucfin/tensorflow | tensorflow/python/autograph/core/converter.py | 5 | 13637 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter construction support.
This module contains a base class for all converters, as well as supporting
structures. These structures are referred to as contexts.
The class hierarchy is as follows:
<your converter>
[extends] converter.Base
[extends] transformer.Base
[extends] gast.nodeTransformer
[uses] transfomer.SourceInfo
[uses] converter.EntityContext
[uses] converter.ProgramContext
[uses] transfomer.SourceInfo
converter.Base is a specialization of transformer.Base for AutoGraph. It's a
very lightweight subclass that adds a `ctx` attribute holding the corresponding
EntityContext object (see below). Note that converters are not reusable, and
`visit` will raise an error if called more than once.
converter.EntityContext contains mutable state associated with an entity that
the converter processes.
converter.ProgramContext contains mutable state across related entities. For
example, when converting several functions that call one another, the
ProgramContext should be shared across these entities.
Below is the overall flow at conversion:
program_ctx = ProgramContext(<entities to convert>, <global settings>, ...)
while <program_ctx has more entities to convert>:
entity, source_info = <get next entity from program_ctx>
entity_ctx = EntityContext(program_ctx, source_info)
for <each ConverterClass>:
converter = ConverterClass(entity_ctx)
# May update entity_ctx and program_ctx
entity = converter.visit(entity)
<add entity's dependencies to program_ctx>
Note that pyct contains a small number of transformers used for static analysis.
These implement transformer.Base, rather than converter.Base, to avoid a
dependency on AutoGraph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import enum
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.util.tf_export import tf_export
# TODO(mdan): These contexts can be refactored into first class objects.
# For example, we could define Program and Entity abstractions that hold on
# to the actual entity and have conversion methods.
# TODO(mdan): Add a test specific to this converter.
@tf_export('autograph.experimental.Feature')
class Feature(enum.Enum):
"""This enumeration represents optional conversion options.
These conversion options are experimental. They are subject to change without
notice and offer no guarantees.
_Example Usage_
```python
optionals= tf.autograph.experimental.Feature.EQUALITY_OPERATORS
@tf.function(experimental_autograph_options=optionals)
def f(i):
if i == 0: # EQUALITY_OPERATORS allows the use of == here.
tf.print('i is zero')
```
Attributes:
ALL: Enable all features.
AUTO_CONTROL_DEPS: Insert of control dependencies in the generated code.
ASSERT_STATEMENTS: Convert Tensor-dependent assert statements to tf.Assert.
BUILTIN_FUNCTIONS: Convert builtin functions applied to Tensors to
their TF counterparts.
EQUALITY_OPERATORS: Whether to convert the comparison operators, like
equality. This is soon to be deprecated as support is being added to the
Tensor class.
LISTS: Convert list idioms, like initializers, slices, append, etc.
NAME_SCOPES: Insert name scopes that name ops according to context, like the
function they were defined in.
"""
ALL = 'ALL'
AUTO_CONTROL_DEPS = 'AUTO_CONTROL_DEPS'
ASSERT_STATEMENTS = 'ASSERT_STATEMENTS'
BUILTIN_FUNCTIONS = 'BUILTIN_FUNCTIONS'
EQUALITY_OPERATORS = 'EQUALITY_OPERATORS'
LISTS = 'LISTS'
NAME_SCOPES = 'NAME_SCOPES'
@classmethod
def all(cls):
"""Returns a tuple that enables all options."""
return tuple(cls.__members__.values())
@classmethod
def all_but(cls, exclude):
"""Returns a tuple that enables all but the excluded options."""
if not isinstance(exclude, (list, tuple, set)):
exclude = (exclude,)
return tuple(set(cls.all()) - set(exclude) - {cls.ALL})
STANDARD_OPTIONS = None # Forward definition.
class ConversionOptions(object):
"""Immutable container for global conversion flags.
Attributes:
recursive: bool, whether to recursively convert any user functions or
classes that the converted function may use.
user_requested: bool, whether the conversion was explicitly requested by
the user, as opposed to being performed as a result of other logic. This
value always auto-resets resets to False in child conversions.
optional_features: Union[Feature, Set[Feature]], controls the use of
optional features in the conversion process. See Feature for available
options.
"""
def __init__(self,
recursive=False,
user_requested=False,
internal_convert_user_code=True,
optional_features=Feature.ALL):
self.recursive = recursive
self.user_requested = user_requested
# TODO(mdan): Rename to conversion_recursion_depth?
self.internal_convert_user_code = internal_convert_user_code
if optional_features is None:
optional_features = ()
elif isinstance(optional_features, Feature):
optional_features = (optional_features,)
optional_features = frozenset(optional_features)
self.optional_features = optional_features
def as_tuple(self):
return (self.recursive, self.user_requested,
self.internal_convert_user_code, self.optional_features)
def __hash__(self):
return hash(self.as_tuple())
def __eq__(self, other):
assert isinstance(other, ConversionOptions)
return self.as_tuple() == other.as_tuple()
def __str__(self):
return 'ConversionOptions[{}]'
def uses(self, feature):
return (Feature.ALL in self.optional_features or
feature in self.optional_features)
def call_options(self):
"""Returns the corresponding options to be used for recursive conversion."""
return ConversionOptions(
recursive=self.recursive,
user_requested=False,
internal_convert_user_code=self.recursive,
optional_features=self.optional_features)
def to_ast(self):
"""Returns a representation of this object as an AST node.
The AST node encodes a constructor that would create an object with the
same contents.
Returns:
ast.Node
"""
if self == STANDARD_OPTIONS:
return parser.parse_expression('ag__.STD')
template = """
ag__.ConversionOptions(
recursive=recursive_val,
user_requested=user_requested_val,
optional_features=optional_features_val,
internal_convert_user_code=internal_convert_user_code_val)
"""
def list_of_features(values):
return parser.parse_expression('({})'.format(', '.join(
'ag__.{}'.format(str(v)) for v in values)))
expr_ast = templates.replace(
template,
recursive_val=parser.parse_expression(str(self.recursive)),
user_requested_val=parser.parse_expression(str(self.user_requested)),
internal_convert_user_code_val=parser.parse_expression(
str(self.internal_convert_user_code)),
optional_features_val=list_of_features(self.optional_features))
return expr_ast[0].value
STANDARD_OPTIONS = ConversionOptions(
recursive=True,
user_requested=False,
internal_convert_user_code=True,
optional_features=None)
class ProgramContext(
collections.namedtuple('ProgramContext', ('options', 'autograph_module'))):
"""ProgramContext keeps track of converting function hierarchies.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
options: ConversionOptions
autograph_module: Module, a reference to the autograph module. This needs to
be specified by the caller to avoid circular dependencies.
"""
pass
class EntityContext(transformer.Context):
"""Tracks the conversion of a single entity.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
namer: Namer
info: transformer.EntityInfo
program: ProgramContext,
targe_name: Text
"""
def __init__(self, namer, entity_info, program_ctx, target_name=None):
super(EntityContext, self).__init__(entity_info)
self.namer = namer
self.program = program_ctx
self.target_name = target_name
class Base(transformer.Base):
"""All converters should inherit from this class.
Attributes:
ctx: EntityContext
"""
def __init__(self, ctx):
super(Base, self).__init__(ctx)
self._used = False
self._ast_depth = 0
def get_definition_directive(self, node, directive, arg, default):
"""Returns the unique directive argument for a symbol.
See lang/directives.py for details on directives.
Example:
# Given a directive in the code:
ag.foo_directive(bar, baz=1)
# One can write for an AST node Name(id='bar'):
get_definition_directive(node, ag.foo_directive, 'baz')
Args:
node: ast.AST, the node representing the symbol for which the directive
argument is needed.
directive: Callable[..., Any], the directive to search.
arg: str, the directive argument to return.
default: Any
Raises:
ValueError: if conflicting annotations have been found
"""
defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())
if not defs:
return default
arg_values_found = []
for def_ in defs:
if (directive in def_.directives and arg in def_.directives[directive]):
arg_values_found.append(def_.directives[directive][arg])
if not arg_values_found:
return default
if len(arg_values_found) == 1:
return arg_values_found[0]
# If multiple annotations reach the symbol, they must all match. If they do,
# return any of them.
first_value = arg_values_found[0]
for other_value in arg_values_found[1:]:
if not ast_util.matches(first_value, other_value):
qn = anno.getanno(node, anno.Basic.QN)
raise ValueError('%s has ambiguous annotations for %s(%s): %s, %s' %
(qn, directive.__name__, arg,
compiler.ast_to_source(other_value).strip(),
compiler.ast_to_source(first_value).strip()))
return first_value
def visit(self, node):
if not self._ast_depth:
if self._used:
raise ValueError('converter objects cannot be reused')
self._used = True
self._ast_depth += 1
try:
return super(Base, self).visit(node)
finally:
self._ast_depth -= 1
class AnnotatedDef(reaching_definitions.Definition):
def __init__(self):
super(AnnotatedDef, self).__init__()
self.directives = {}
class AgAnno(enum.Enum):
"""Annotation labels specific to AutoGraph. See anno.py."""
DIRECTIVES = 'User directives associated with the annotated statement.'
def __repr__(self):
return self.name
def standard_analysis(node, context, is_initial=False):
"""Performs a complete static analysis of the given code.
Args:
node: ast.AST
context: converter.EntityContext
is_initial: bool, whether this is the initial analysis done on the input
source code
Returns:
ast.AST, same as node, with the static analysis annotations added
"""
# TODO(mdan): Clear static analysis here.
# TODO(mdan): Consider not running all analyses every time.
# TODO(mdan): Don't return a node because it's modified by reference.
graphs = cfg.build(node)
node = qual_names.resolve(node)
node = activity.resolve(node, context, None)
node = reaching_definitions.resolve(node, context, graphs, AnnotatedDef)
node = liveness.resolve(node, context, graphs)
if is_initial:
anno.dup(
node,
{
anno.Static.DEFINITIONS: anno.Static.ORIG_DEFINITIONS,
},
)
return node
def apply_(node, context, converter_module):
"""Applies a converter to an AST.
Args:
node: ast.AST
context: converter.EntityContext
converter_module: converter.Base
Returns:
ast.AST, the result of applying converter to node
"""
node = standard_analysis(node, context)
node = converter_module.transform(node, context)
return node
| apache-2.0 | 4,377,951,582,014,473,000 | 32.260976 | 81 | 0.700154 | false |
thomasem/nova | nova/api/openstack/compute/plugins/v3/admin_actions.py | 33 | 3773 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import reset_server_state
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova.compute import vm_states
from nova import exception
ALIAS = "os-admin-actions"
# States usable in resetState action
# NOTE: It is necessary to update the schema of nova/api/openstack/compute/
# schemas/v3/reset_server_state.py, when updating this state_map.
state_map = dict(active=vm_states.ACTIVE, error=vm_states.ERROR)
authorize = extensions.os_compute_authorizer(ALIAS)
class AdminActionsController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(AdminActionsController, self).__init__(*args, **kwargs)
self.compute_api = compute.API(skip_policy_check=True)
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('resetNetwork')
def _reset_network(self, req, id, body):
"""Permit admins to reset networking on a server."""
context = req.environ['nova.context']
authorize(context, action='reset_network')
try:
instance = common.get_instance(self.compute_api, context, id)
self.compute_api.reset_network(context, instance)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('injectNetworkInfo')
def _inject_network_info(self, req, id, body):
"""Permit admins to inject network info into a server."""
context = req.environ['nova.context']
authorize(context, action='inject_network_info')
try:
instance = common.get_instance(self.compute_api, context, id)
self.compute_api.inject_network_info(context, instance)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
@wsgi.response(202)
@extensions.expected_errors((400, 404))
@wsgi.action('os-resetState')
@validation.schema(reset_server_state.reset_state)
def _reset_state(self, req, id, body):
"""Permit admins to reset the state of a server."""
context = req.environ["nova.context"]
authorize(context, action='reset_state')
# Identify the desired state from the body
state = state_map[body["os-resetState"]["state"]]
instance = common.get_instance(self.compute_api, context, id)
instance.vm_state = state
instance.task_state = None
instance.save(admin_state_reset=True)
class AdminActions(extensions.V3APIExtensionBase):
"""Enable admin-only server actions
Actions include: resetNetwork, injectNetworkInfo, os-resetState
"""
name = "AdminActions"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = AdminActionsController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
| apache-2.0 | 543,577,671,087,976,960 | 36.356436 | 79 | 0.694938 | false |
shreks7/crazyflie-android-no-otg | CrazyFliePythonServer/crazyflie/main.py | 1 | 18432 | /**
*
* The MIT License (MIT)
*
* Copyright (c) 2013 Shrey Malhotra
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
import sys
from PyQt4 import QtGui, QtCore, uic
from PyQt4.QtCore import pyqtSignal, Qt, pyqtSlot
import ast
import urllib2
import httplib
import logging,threading,time
from cflib.crazyflie import Crazyflie
import cflib.crtp
from cfclient.utils.logconfigreader import LogConfig
from cfclient.utils.logconfigreader import LogVariable
from connectiondialogue import ConnectDialogue
html =""
ipaddress = "http://192.168.43.1"
stream_url = ':8080/stream/json'
setup_url = ':8080/cgi/setup'
ex = False
threads = None
_signalThread = None
MAX_THRUST = 65365.0
MIN_THRUST = 10000.0
_CrazyFlieThread = None
values = None
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class UIState:
DISCONNECTED = 0
CONNECTING = 1
CONNECTED = 2
class App(QtGui.QMainWindow):
global _CrazyFlieThread,threads
connectionLostSignal = pyqtSignal(str, str)
connectionInitiatedSignal = pyqtSignal(str)
batteryUpdatedSignal = pyqtSignal(object)
connectionDoneSignal = pyqtSignal(str)
connectionFailedSignal = pyqtSignal(str, str)
disconnectedSignal = pyqtSignal(str)
linkQualitySignal = pyqtSignal(int)
_motor_data_signal = pyqtSignal(object)
_imu_data_signal = pyqtSignal(object)
def __init__(self):
global stream_url,setup_url, ipaddress
super(App, self).__init__()
stream_url = ipaddress+stream_url
setup_url = ipaddress+setup_url
uic.loadUi('mainUI.ui', self)
self.initUI()
cflib.crtp.init_drivers(enable_debug_driver=False)
self.cf = Crazyflie()
self.CrazyFlieSignal = QtCore.pyqtSignal(object)
#oldValues
self.oldThrust = 0
self.maxAngleV = self.maxAngle.value()
self.maxYawRateV = self.maxYawRate.value()
self.maxThrustV = self.maxThrust.value()
self.minThrustV = self.minThrust.value()
self.slewEnableLimitV = self.slewEnableLimit.value()
self.thrustLoweringSlewRateLimitV = self.thrustLoweringSlewRateLimit.value()
#Connection Dialogue
self.connectDialogue = ConnectDialogue()
#Status Bar Update
self._statusbar_label = QtGui.QLabel("Loading device and configuration.")
self.statusBar().addWidget(self._statusbar_label)
#Connect to the URI
self.connectDialogue.requestConnectionSignal.connect(self.cf.open_link)
# Set UI state in disconnected buy default
self.setUIState(UIState.DISCONNECTED)
# Connection callbacks and signal wrappers for UI protection
self.connectionDoneSignal.connect(self.connectionDone)
self.connectionFailedSignal.connect(self.connectionFailed)
self.batteryUpdatedSignal.connect(self.updateBatteryVoltage)
self.connectionLostSignal.connect(self.connectionLost)
self.disconnectedSignal.connect(
lambda linkURI: self.setUIState(UIState.DISCONNECTED,
linkURI))
self.connectionInitiatedSignal.connect(
lambda linkURI: self.setUIState(UIState.CONNECTING,
linkURI))
self.cf.connectionFailed.add_callback(self.connectionFailedSignal.emit)
self.cf.connectSetupFinished.add_callback(self.connectionDoneSignal.emit)
self.cf.disconnected.add_callback(self.disconnectedSignal.emit)
self.cf.connectionLost.add_callback(self.connectionLostSignal.emit)
self.cf.connectionInitiated.add_callback(self.connectionInitiatedSignal.emit)
# Connect link quality feedback
self.cf.linkQuality.add_callback(self.linkQualitySignal.emit)
self.linkQualitySignal.connect(
lambda percentage: self.linkQualityBar.setValue(percentage))
QtCore.QObject.connect(self.connectFlie, QtCore.SIGNAL('clicked()'), self.onConnectButtonClicked)
# Flight Data Signal Connection
self._imu_data_signal.connect(self._imu_data_received)
self._motor_data_signal.connect(self._motor_data_received)
# menu items
self.actionSet_Server_Ip.triggered.connect(self.setIpAddress)
# --------- Initialize UI & Define Close Event -------------
def setIpAddress(self):
global ipaddress,stream_url,setup_url
text, ok = QtGui.QInputDialog.getText(self, 'Input IpAddress (X.X.X.X)',
'Enter Ip Address:')
if ok:
ipaddress = "http://"+text
stream_url = ':8080/stream/json'
setup_url = ':8080/cgi/setup'
stream_url = str(ipaddress+stream_url)
setup_url = str(ipaddress+setup_url)
def initUI(self):
self.setWindowTitle('CrazyFlie')
self.show()
self.statusBar()
QtCore.QObject.connect(self.serverConnect, QtCore.SIGNAL('clicked()'), self.onServerButtonClicked)
QtCore.QObject.connect(self.updateServer, QtCore.SIGNAL('clicked()'), self.onSendServerAttr)
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(self, 'Message',
"Are you sure to quit?", QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
ex = True
self.cf.close_link()
event.accept()
else:
event.ignore()
# ---- Button State Handlers -----------------
def onSendServerAttr(self):
self.maxAngleV = self.maxAngle.value()
self.maxYawRateV = self.maxYawRate.value()
self.maxThrustV = self.maxThrust.value()
self.minThrustV = self.minThrust.value()
self.slewEnableLimitV = self.slewEnableLimit.value()
self.thrustLoweringSlewRateLimitV = self.thrustLoweringSlewRateLimit.value()
self.console.append("Sending Server: %d, %d ,%f,%f,%f" %(self.maxAngleV,self.maxYawRateV,self.maxThrustV,
self.minThrustV,self.slewEnableLimitV))
threading.Thread(target=self.updateSer).start()
def updateSer(self):
update_url = setup_url+"?maxRollPitchAngle=%d&maxYawAngle=%d&maxThrust=%f&minThrust=%f&xmode=False" %(self.maxAngleV,self.maxYawRateV,self.maxThrustV,
self.minThrustV)
try:
response = urllib2.urlopen(update_url)
except urllib2.HTTPError, e:
self.console.append(str(e))
return
except urllib2.URLError, e:
self.console.append(str(e))
return
except httplib.HTTPException, e:
self.console.append(str(e))
return
if(response.read=="OK"):
self.console.append("Server Update Status: OK")
def onServerButtonClicked(self):
global stream_url
ex=False
self.serverConnect.setEnabled(False)
downloader = DownloadThread(stream_url, self.console)
self.threads = downloader
downloader.data_downloaded.connect(self.on_data_ready,QtCore.Qt.QueuedConnection)
downloader.start()
self.updateServer.setEnabled(False)
def onConnectButtonClicked(self):
if (self.uiState == UIState.CONNECTED):
self.cf.close_link()
elif (self.uiState == UIState.CONNECTING):
self.cf.close_link()
self.setUIState(UIState.DISCONNECTED)
else:
self.connectDialogue.show()
# ------- Connection Callback Handlers -------------------------
def connectionFailed(self, linkURI, error):
msg = "Failed to connect on %s: %s" % (linkURI, error)
warningCaption = "Communication failure"
QtGui.QMessageBox.critical(self, warningCaption, msg)
self.setUIState(UIState.DISCONNECTED, linkURI)
self.disconnectedFlightData(linkURI)
def connectionLost(self, linkURI, msg):
warningCaption = "Communication failure"
error = "Connection lost to %s: %s" % (linkURI, msg)
QtGui.QMessageBox.critical(self, warningCaption, error)
self.setUIState(UIState.DISCONNECTED, linkURI)
self.disconnectedFlightData(linkURI)
def connectionDone(self, linkURI):
global _signalThread
self.setUIState(UIState.CONNECTED, linkURI)
dataThread= threading.Thread(target=self.connectedFlightData(linkURI))
dataThread.start()
threading.Thread(target=self.pulse_command).start()
def on_data_ready(self,value):
global values
if(value=="error"):
self.console.setText("Error in connection")
self.serverConnect.setEnabled(True)
self.updateServer.setEnabled(True)
values = None
else:
self.targetRoll.setText("%f" % (value[0]))
self.targetPitch.setText("%f" % (value[1]))
self.targetThrust.setText("%f" % (value[2]))
self.targetYaw.setText("%f" % (value[3]))
self.readInput(value)
def readInput(self,value):
global values,MAX_THRUST,MIN_THRUST,_signalThread
roll,pitch,thrust,yaw = value
if (self.slewEnableLimitV > thrust):
if self.oldThrust > self.slewEnableLimitV:
self.oldThrust = self.slewEnableLimitV
if thrust < (self.oldThrust - (self.thrustLoweringSlewRateLimitV / 100)):
thrust = self.oldThrust - self.thrustLoweringSlewRateLimitV / 100
if thrust < self.minThrustV:
thrust = 0
self.oldThrust = thrust
pitch = -pitch
values = roll,pitch,yaw,thrust
def pulse_command(self):
global values
while(True):
if(values!=None):
roll,pitch,yaw,thrust = values
#print "%f %f %f %f" %(roll, pitch, yaw, thrust)
self.cf.commander.send_setpoint(roll, pitch, yaw, thrust)
time.sleep(0.1)
else:
break
# ------- UI State Handling -------------------
def setUIState(self, newState, linkURI=""):
self.uiState = newState
if (newState == UIState.DISCONNECTED):
self.setWindowTitle("Not connected")
self.connectFlie.setText("Connect")
self.batteryBar.setValue(3000)
self.disconnectedFlightData(linkURI)
self.linkQualityBar.setValue(0)
if (newState == UIState.CONNECTED):
s = "Connected on %s" % linkURI
self.menuItemConnect.setText("Disconnect")
self.connectFlie.setText("Disconnect")
if (newState == UIState.CONNECTING):
s = "Connecting to %s ..." % linkURI
self.setWindowTitle(s)
self.connectFlie.setText("Connecting")
# ------------------ Flight Data Receiver-----------------------
def connectedFlightData(self, linkURI):
lg = LogConfig("Battery", 1000)
lg.addVariable(LogVariable("pm.vbat", "float"))
self.log = self.cf.log.create_log_packet(lg)
if (self.log != None):
self.log.data_received.add_callback(self.batteryUpdatedSignal.emit)
self.log.error.add_callback(self.loggingError)
self.log.start()
else:
print("Could not setup loggingblock!")
lg = LogConfig("Stabalizer", 100)
lg.addVariable(LogVariable("stabilizer.roll", "float"))
lg.addVariable(LogVariable("stabilizer.pitch", "float"))
lg.addVariable(LogVariable("stabilizer.yaw", "float"))
lg.addVariable(LogVariable("stabilizer.thrust", "uint16_t"))
self.log = self.cf.log.create_log_packet(lg)
if (self.log is not None):
self.log.data_received.add_callback(self._imu_data_signal.emit)
self.log.error.add_callback(self.loggingError)
self.log.start()
else:
print("Could not setup logconfiguration after "
"connection!")
lg = LogConfig("Motors", 100)
lg.addVariable(LogVariable("motor.m1", "uint32_t"))
lg.addVariable(LogVariable("motor.m2", "uint32_t"))
lg.addVariable(LogVariable("motor.m3", "uint32_t"))
lg.addVariable(LogVariable("motor.m4", "uint32_t"))
self.log = self.cf.log.create_log_packet(lg)
if (self.log is not None):
self.log.data_received.add_callback(self._motor_data_signal.emit)
self.log.error.add_callback(self.loggingError)
self.log.start()
else:
print("Could not setup logconfiguration after "
"connection!")
def loggingError(self, error):
logger.warn("logging error %s", error)
def disconnectedFlightData(self, linkURI):
self.actualM1.setValue(0)
self.actualM2.setValue(0)
self.actualM3.setValue(0)
self.actualM4.setValue(0)
self.actualRoll.setText("")
self.actualPitch.setText("")
self.actualYaw.setText("")
self.actualThrust.setText("")
def _motor_data_received(self, data):
self.actualM1.setValue(data["motor.m1"])
self.actualM2.setValue(data["motor.m2"])
self.actualM3.setValue(data["motor.m3"])
self.actualM4.setValue(data["motor.m4"])
def _imu_data_received(self, data):
self.actualRoll.setText(("%.2f" % data["stabilizer.roll"]))
self.actualPitch.setText(("%.2f" % data["stabilizer.pitch"]))
self.actualYaw.setText(("%.2f" % data["stabilizer.yaw"]))
self.actualThrust.setText("%.2f%%" %
self.thrustToPercentage(
data["stabilizer.thrust"]))
def updateBatteryVoltage(self, data):
self.batteryBar.setValue(int(data["pm.vbat"] * 1000))
def thrustToPercentage(self, thrust):
return ((thrust / MAX_THRUST) * 100.0)
def percentageToThrust(self, percentage):
return int(MAX_THRUST * (percentage / 100.0))
# ------------------- Android Server Thread -----------------------------------
class DownloadThread(QtCore.QThread):
data_downloaded = QtCore.pyqtSignal(object)
def __init__(self, url, console):
QtCore.QThread.__init__(self)
self.url = url
self.console = console
def run(self):
global ex
try:
response = urllib2.urlopen(self.url)
except urllib2.HTTPError, e:
self.data_downloaded.emit("error")
ex=True
except urllib2.URLError, e:
self.data_downloaded.emit("error")
ex=True
except httplib.HTTPException, e:
self.data_downloaded.emit("error")
ex=True
while(ex!=True):
try:
html = response.read(27);
if(len(html)>0):
index = html.find("]")
html = html[:index+1]
values = ast.literal_eval(html)
consoleText= "Roll: %f | Pitch: %f | Thrust: %f | Yaw: %f" % (values[0],values[1],values[2],values[3])
# print consoleText
self.data_downloaded.emit(values)
else:
self.data_downloaded.emit("error")
break
except:
continue
pass
# ----------------------- CrazyFlie Thread ---------------------
class CrazyFlieThread(QtCore.QThread):
global values
def __init__(self,cf):
QtCore.QThread.__init__(self)
self.values = values
self.cf = cf
def run(self):
if(self.values!=None or self.values!="error"):
roll,pitch,yaw,thrust = self.values
self.pulse_command(roll,pitch,yaw,thrust)
def pulse_command(self,roll,pitch,yaw,thrust):
print "%f %f %f %f" %(roll, pitch, yaw, thrust)
self.cf.commander.send_setpoint(roll, pitch, yaw, thrust)
time.sleep(0.1)
self.pulse_command(roll, pitch, yaw,thrust)
# ----------------------------- Main ------------------------------
def main():
app = QtGui.QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| mit | -5,991,249,323,020,450,000 | 37.240664 | 158 | 0.578885 | false |
thopiekar/Cura | plugins/UM3NetworkPrinting/src/Models/Http/CloudClusterStatus.py | 1 | 1234 | # Copyright (c) 2019 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from datetime import datetime
from typing import List, Dict, Union, Any
from ..BaseModel import BaseModel
from .ClusterPrinterStatus import ClusterPrinterStatus
from .ClusterPrintJobStatus import ClusterPrintJobStatus
# Model that represents the status of the cluster for the cloud
class CloudClusterStatus(BaseModel):
## Creates a new cluster status model object.
# \param printers: The latest status of each printer in the cluster.
# \param print_jobs: The latest status of each print job in the cluster.
# \param generated_time: The datetime when the object was generated on the server-side.
def __init__(self,
printers: List[Union[ClusterPrinterStatus, Dict[str, Any]]],
print_jobs: List[Union[ClusterPrintJobStatus, Dict[str, Any]]],
generated_time: Union[str, datetime],
**kwargs) -> None:
self.generated_time = self.parseDate(generated_time)
self.printers = self.parseModels(ClusterPrinterStatus, printers)
self.print_jobs = self.parseModels(ClusterPrintJobStatus, print_jobs)
super().__init__(**kwargs)
| lgpl-3.0 | 3,862,809,847,897,066,000 | 46.461538 | 92 | 0.705835 | false |
yoghadj/or-tools | examples/python/steel.py | 32 | 6171 | # Copyright 2010 Pierre Schaus [email protected], [email protected]
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.apputils import app
import gflags
from ortools.constraint_solver import pywrapcp
FLAGS = gflags.FLAGS
gflags.DEFINE_string('data', 'data/steel_mill/steel_mill_slab.txt',
'path to data file')
gflags.DEFINE_integer('time_limit', 20000, 'global time limit')
#----------------helper for binpacking posting----------------
def BinPacking(solver, binvars, weights, loadvars):
'''post the load constraint on bins.
constraints forall j: loadvars[j] == sum_i (binvars[i] == j) * weights[i])
'''
pack = solver.Pack(binvars, len(binvars))
pack.AddWeightedSumEqualVarDimension(weights, loadvars)
solver.Add(pack)
solver.Add(solver.SumEquality(loadvars, sum(weights)))
#------------------------------data reading-------------------
def ReadData(filename):
"""Read data from <filename>."""
f = open(filename)
capacity = [int(nb) for nb in f.readline().split()]
capacity.pop(0)
capacity = [0] + capacity
max_capacity = max(capacity)
nb_colors = int(f.readline())
nb_slabs = int(f.readline())
wc = [[int(j) for j in f.readline().split()] for i in range(nb_slabs)]
weights = [x[0] for x in wc]
colors = [x[1] for x in wc]
loss = [min(filter(lambda x: x >= c, capacity)) - c
for c in range(max_capacity + 1)]
color_orders = [filter(lambda o: colors[o] == c, range(nb_slabs))
for c in range(1, nb_colors + 1)]
print 'Solving steel mill with', nb_slabs, 'slabs'
return (nb_slabs, capacity, max_capacity, weights, colors, loss, color_orders)
#------------------dedicated search for this problem-----------
class SteelDecisionBuilder(pywrapcp.PyDecisionBuilder):
'''Dedicated Decision Builder for steel mill slab.
Search for the steel mill slab problem with Dynamic Symmetry
Breaking during search is an adaptation (for binary tree) from the
paper of Pascal Van Hentenryck and Laurent Michel CPAIOR-2008.
The value heuristic comes from the paper
Solving Steel Mill Slab Problems with Constraint-Based Techniques:
CP, LNS, and CBLS,
Schaus et. al. to appear in Constraints 2010
'''
def __init__(self, x, nb_slabs, weights, losstab, loads):
self.__x = x
self.__nb_slabs = nb_slabs
self.__weights = weights
self.__losstab = losstab
self.__loads = loads
self.__maxcapa = len(losstab) - 1
def Next(self, solver):
var, weight = self.NextVar()
if var:
v = self.MaxBound()
if v + 1 == var.Min():
# Symmetry breaking. If you need to assign to a new bin,
# select the first one.
solver.Add(var == v + 1)
return self.Next(solver)
else:
# value heuristic (important for difficult problem):
# try first to place the order in the slab that will induce
# the least increase of the loss
loads = self.getLoads()
l, v = min((self.__losstab[loads[i] + weight], i)
for i in range(var.Min(), var.Max() + 1)
if var.Contains(i) and loads[i] + weight <= self.__maxcapa)
decision = solver.AssignVariableValue(var, v)
return decision
else:
return None
def getLoads(self):
load = [0] * len(self.__loads)
for w, x in zip(self.__weights, self.__x):
if x.Bound():
load[x.Min()] += w
return load
def MaxBound(self):
""" returns the max value bound to a variable, -1 if no variables bound"""
return max([-1] + [self.__x[o].Min()
for o in range(self.__nb_slabs)
if self.__x[o].Bound()])
def NextVar(self):
""" mindom size heuristic with tie break on the weights of orders """
res = [(self.__x[o].Size(), -self.__weights[o], self.__x[o])
for o in range(self.__nb_slabs)
if self.__x[o].Size() > 1]
if res:
res.sort()
return res[0][2], -res[0][1] # returns the order var and its weight
else:
return None, None
def DebugString(self):
return 'SteelMillDecisionBuilder(' + str(self.__x) + ')'
def main(unused_argv):
#------------------solver and variable declaration-------------
(nb_slabs, capacity, max_capacity, weights, colors, loss, color_orders) =\
ReadData(FLAGS.data)
nb_colors = len(color_orders)
solver = pywrapcp.Solver('Steel Mill Slab')
x = [solver.IntVar(0, nb_slabs - 1, 'x' + str(i))
for i in range(nb_slabs)]
load_vars = [solver.IntVar(0, max_capacity - 1, 'load_vars' + str(i))
for i in range(nb_slabs)]
#-------------------post of the constraints--------------
# Bin Packing.
BinPacking(solver, x, weights, load_vars)
# At most two colors per slab.
for s in range(nb_slabs):
solver.Add(solver.SumLessOrEqual(
[solver.Max([solver.IsEqualCstVar(x[c], s) for c in o])
for o in color_orders], 2))
#----------------Objective-------------------------------
objective_var = \
solver.Sum([load_vars[s].IndexOf(loss) for s in range(nb_slabs)]).Var()
objective = solver.Minimize(objective_var, 1)
#------------start the search and optimization-----------
db = SteelDecisionBuilder(x, nb_slabs, weights, loss, load_vars)
search_log = solver.SearchLog(100000, objective_var)
global_limit = solver.TimeLimit(FLAGS.time_limit)
solver.NewSearch(db, [objective, search_log, global_limit])
while solver.NextSolution():
print 'Objective:', objective_var.Value(),\
'check:', sum(loss[load_vars[s].Min()] for s in range(nb_slabs))
solver.EndSearch()
if __name__ == '__main__':
app.run()
| apache-2.0 | -5,511,251,610,893,911,000 | 34.262857 | 80 | 0.618214 | false |
Lujeni/ansible | lib/ansible/modules/network/fortios/fortios_system_resource_limits.py | 13 | 12995 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_resource_limits
short_description: Configure resource limits in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and resource_limits category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
system_resource_limits:
description:
- Configure resource limits.
default: null
type: dict
suboptions:
custom_service:
description:
- Maximum number of firewall custom services.
type: int
dialup_tunnel:
description:
- Maximum number of dial-up tunnels.
type: int
firewall_address:
description:
- Maximum number of firewall addresses (IPv4, IPv6, multicast).
type: int
firewall_addrgrp:
description:
- Maximum number of firewall address groups (IPv4, IPv6).
type: int
firewall_policy:
description:
- Maximum number of firewall policies (IPv4, IPv6, policy46, policy64, DoS-policy4, DoS-policy6, multicast).
type: int
ipsec_phase1:
description:
- Maximum number of VPN IPsec phase1 tunnels.
type: int
ipsec_phase1_interface:
description:
- Maximum number of VPN IPsec phase1 interface tunnels.
type: int
ipsec_phase2:
description:
- Maximum number of VPN IPsec phase2 tunnels.
type: int
ipsec_phase2_interface:
description:
- Maximum number of VPN IPsec phase2 interface tunnels.
type: int
log_disk_quota:
description:
- Log disk quota in MB.
type: int
onetime_schedule:
description:
- Maximum number of firewall one-time schedules.
type: int
proxy:
description:
- Maximum number of concurrent proxy users.
type: int
recurring_schedule:
description:
- Maximum number of firewall recurring schedules.
type: int
service_group:
description:
- Maximum number of firewall service groups.
type: int
session:
description:
- Maximum number of sessions.
type: int
sslvpn:
description:
- Maximum number of SSL-VPN.
type: int
user:
description:
- Maximum number of local users.
type: int
user_group:
description:
- Maximum number of user groups.
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure resource limits.
fortios_system_resource_limits:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
system_resource_limits:
custom_service: "3"
dialup_tunnel: "4"
firewall_address: "5"
firewall_addrgrp: "6"
firewall_policy: "7"
ipsec_phase1: "8"
ipsec_phase1_interface: "9"
ipsec_phase2: "10"
ipsec_phase2_interface: "11"
log_disk_quota: "12"
onetime_schedule: "13"
proxy: "14"
recurring_schedule: "15"
service_group: "16"
session: "17"
sslvpn: "18"
user: "19"
user_group: "20"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_resource_limits_data(json):
option_list = ['custom_service', 'dialup_tunnel', 'firewall_address',
'firewall_addrgrp', 'firewall_policy', 'ipsec_phase1',
'ipsec_phase1_interface', 'ipsec_phase2', 'ipsec_phase2_interface',
'log_disk_quota', 'onetime_schedule', 'proxy',
'recurring_schedule', 'service_group', 'session',
'sslvpn', 'user', 'user_group']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_resource_limits(data, fos):
vdom = data['vdom']
system_resource_limits_data = data['system_resource_limits']
filtered_data = underscore_to_hyphen(filter_system_resource_limits_data(system_resource_limits_data))
return fos.set('system',
'resource-limits',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_resource_limits']:
resp = system_resource_limits(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"system_resource_limits": {
"required": False, "type": "dict", "default": None,
"options": {
"custom_service": {"required": False, "type": "int"},
"dialup_tunnel": {"required": False, "type": "int"},
"firewall_address": {"required": False, "type": "int"},
"firewall_addrgrp": {"required": False, "type": "int"},
"firewall_policy": {"required": False, "type": "int"},
"ipsec_phase1": {"required": False, "type": "int"},
"ipsec_phase1_interface": {"required": False, "type": "int"},
"ipsec_phase2": {"required": False, "type": "int"},
"ipsec_phase2_interface": {"required": False, "type": "int"},
"log_disk_quota": {"required": False, "type": "int"},
"onetime_schedule": {"required": False, "type": "int"},
"proxy": {"required": False, "type": "int"},
"recurring_schedule": {"required": False, "type": "int"},
"service_group": {"required": False, "type": "int"},
"session": {"required": False, "type": "int"},
"sslvpn": {"required": False, "type": "int"},
"user": {"required": False, "type": "int"},
"user_group": {"required": False, "type": "int"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,213,551,673,024,830,000 | 31.815657 | 128 | 0.572528 | false |
stvstnfrd/edx-platform | lms/djangoapps/program_enrollments/management/commands/reset_enrollment_data.py | 5 | 2105 | """
Management command to remove enrollments and any related models created as
a side effect of enrolling students.
Intented for use in integration sandbox environments
"""
import logging
from textwrap import dedent
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from common.djangoapps.student.models import CourseEnrollment
from lms.djangoapps.program_enrollments.models import ProgramEnrollment
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Deletes all enrollments and related data
Example usage:
$ ./manage.py lms reset_enrollment_data ca73b4af-676a-4bb3-a9a5-f6b5a3dedd,1c5f61b9-0be5-4a90-9ea5-582d5e066c
"""
help = dedent(__doc__).strip()
confirmation_prompt = "Type 'confirm' to continue with deletion\n"
def add_arguments(self, parser):
parser.add_argument(
'programs',
help='Comma separated list of programs to delete enrollments for'
)
parser.add_argument(
'--force',
action='store_true',
help='Skip manual confirmation step before deleting objects',
)
@transaction.atomic
def handle(self, *args, **options):
programs = options['programs'].split(',')
q1_count, deleted_course_enrollment_models = CourseEnrollment.objects.filter(
programcourseenrollment__program_enrollment__program_uuid__in=programs
).delete()
q2_count, deleted_program_enrollment_models = ProgramEnrollment.objects.filter(
program_uuid__in=programs
).delete()
log.info(
'The following records will be deleted:\n%s\n%s\n',
deleted_course_enrollment_models,
deleted_program_enrollment_models,
)
if not options['force']:
confirmation = input(self.confirmation_prompt)
if confirmation != 'confirm':
raise CommandError('User confirmation required. No records have been modified')
log.info('Deleting %s records...', q1_count + q2_count)
| agpl-3.0 | -2,217,810,590,852,880,600 | 31.890625 | 117 | 0.667458 | false |
shams169/pythonProject | ContactsDir/env/lib/python3.6/site-packages/setuptools/sandbox.py | 21 | 14543 | import os
import sys
import tempfile
import operator
import functools
import itertools
import re
import contextlib
import pickle
import textwrap
from setuptools.extern import six
from setuptools.extern.six.moves import builtins, map
import pkg_resources
if sys.platform.startswith('java'):
import org.python.modules.posix.PosixModule as _os
else:
_os = sys.modules[os.name]
try:
_file = file
except NameError:
_file = None
_open = open
from distutils.errors import DistutilsError
from pkg_resources import working_set
__all__ = [
"AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
]
def _execfile(filename, globals, locals=None):
"""
Python 3 implementation of execfile.
"""
mode = 'rb'
with open(filename, mode) as stream:
script = stream.read()
# compile() function in Python 2.6 and 3.1 requires LF line endings.
if sys.version_info[:2] < (2, 7) or sys.version_info[:2] >= (3, 0) and sys.version_info[:2] < (3, 2):
script = script.replace(b'\r\n', b'\n')
script = script.replace(b'\r', b'\n')
if locals is None:
locals = globals
code = compile(script, filename, 'exec')
exec(code, globals, locals)
@contextlib.contextmanager
def save_argv(repl=None):
saved = sys.argv[:]
if repl is not None:
sys.argv[:] = repl
try:
yield saved
finally:
sys.argv[:] = saved
@contextlib.contextmanager
def save_path():
saved = sys.path[:]
try:
yield saved
finally:
sys.path[:] = saved
@contextlib.contextmanager
def override_temp(replacement):
"""
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
"""
if not os.path.isdir(replacement):
os.makedirs(replacement)
saved = tempfile.tempdir
tempfile.tempdir = replacement
try:
yield
finally:
tempfile.tempdir = saved
@contextlib.contextmanager
def pushd(target):
saved = os.getcwd()
os.chdir(target)
try:
yield saved
finally:
os.chdir(saved)
class UnpickleableException(Exception):
"""
An exception representing another Exception that could not be pickled.
"""
@staticmethod
def dump(type, exc):
"""
Always return a dumped (pickled) type and exc. If exc can't be pickled,
wrap it in UnpickleableException first.
"""
try:
return pickle.dumps(type), pickle.dumps(exc)
except Exception:
# get UnpickleableException inside the sandbox
from setuptools.sandbox import UnpickleableException as cls
return cls.dump(cls, cls(repr(exc)))
class ExceptionSaver:
"""
A Context Manager that will save an exception, serialized, and restore it
later.
"""
def __enter__(self):
return self
def __exit__(self, type, exc, tb):
if not exc:
return
# dump the exception
self._saved = UnpickleableException.dump(type, exc)
self._tb = tb
# suppress the exception
return True
def resume(self):
"restore and re-raise any exception"
if '_saved' not in vars(self):
return
type, exc = map(pickle.loads, self._saved)
six.reraise(type, exc, self._tb)
@contextlib.contextmanager
def save_modules():
"""
Context in which imported modules are saved.
Translates exceptions internal to the context into the equivalent exception
outside the context.
"""
saved = sys.modules.copy()
with ExceptionSaver() as saved_exc:
yield saved
sys.modules.update(saved)
# remove any modules imported since
del_modules = (
mod_name for mod_name in sys.modules
if mod_name not in saved
# exclude any encodings modules. See #285
and not mod_name.startswith('encodings.')
)
_clear_modules(del_modules)
saved_exc.resume()
def _clear_modules(module_names):
for mod_name in list(module_names):
del sys.modules[mod_name]
@contextlib.contextmanager
def save_pkg_resources_state():
saved = pkg_resources.__getstate__()
try:
yield saved
finally:
pkg_resources.__setstate__(saved)
@contextlib.contextmanager
def setup_context(setup_dir):
temp_dir = os.path.join(setup_dir, 'temp')
with save_pkg_resources_state():
with save_modules():
hide_setuptools()
with save_path():
with save_argv():
with override_temp(temp_dir):
with pushd(setup_dir):
# ensure setuptools commands are available
__import__('setuptools')
yield
def _needs_hiding(mod_name):
"""
>>> _needs_hiding('setuptools')
True
>>> _needs_hiding('pkg_resources')
True
>>> _needs_hiding('setuptools_plugin')
False
>>> _needs_hiding('setuptools.__init__')
True
>>> _needs_hiding('distutils')
True
>>> _needs_hiding('os')
False
>>> _needs_hiding('Cython')
True
"""
pattern = re.compile(r'(setuptools|pkg_resources|distutils|Cython)(\.|$)')
return bool(pattern.match(mod_name))
def hide_setuptools():
"""
Remove references to setuptools' modules from sys.modules to allow the
invocation to import the most appropriate setuptools. This technique is
necessary to avoid issues such as #315 where setuptools upgrading itself
would fail to find a function declared in the metadata.
"""
modules = filter(_needs_hiding, sys.modules)
_clear_modules(modules)
def run_setup(setup_script, args):
"""Run a distutils setup script, sandboxed in its directory"""
setup_dir = os.path.abspath(os.path.dirname(setup_script))
with setup_context(setup_dir):
try:
sys.argv[:] = [setup_script] + list(args)
sys.path.insert(0, setup_dir)
# reset to include setup dir, w/clean callback list
working_set.__init__()
working_set.callbacks.append(lambda dist: dist.activate())
# __file__ should be a byte string on Python 2 (#712)
dunder_file = (
setup_script
if isinstance(setup_script, str) else
setup_script.encode(sys.getfilesystemencoding())
)
with DirectorySandbox(setup_dir):
ns = dict(__file__=dunder_file, __name__='__main__')
_execfile(setup_script, ns)
except SystemExit as v:
if v.args and v.args[0]:
raise
# Normal exit, just return
class AbstractSandbox:
"""Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
_active = False
def __init__(self):
self._attrs = [
name for name in dir(_os)
if not name.startswith('_') and hasattr(self, name)
]
def _copy(self, source):
for name in self._attrs:
setattr(os, name, getattr(source, name))
def __enter__(self):
self._copy(self)
if _file:
builtins.file = self._file
builtins.open = self._open
self._active = True
def __exit__(self, exc_type, exc_value, traceback):
self._active = False
if _file:
builtins.file = _file
builtins.open = _open
self._copy(_os)
def run(self, func):
"""Run 'func' under os sandboxing"""
with self:
return func()
def _mk_dual_path_wrapper(name):
original = getattr(_os, name)
def wrap(self, src, dst, *args, **kw):
if self._active:
src, dst = self._remap_pair(name, src, dst, *args, **kw)
return original(src, dst, *args, **kw)
return wrap
for name in ["rename", "link", "symlink"]:
if hasattr(_os, name):
locals()[name] = _mk_dual_path_wrapper(name)
def _mk_single_path_wrapper(name, original=None):
original = original or getattr(_os, name)
def wrap(self, path, *args, **kw):
if self._active:
path = self._remap_input(name, path, *args, **kw)
return original(path, *args, **kw)
return wrap
if _file:
_file = _mk_single_path_wrapper('file', _file)
_open = _mk_single_path_wrapper('open', _open)
for name in [
"stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
"remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
"startfile", "mkfifo", "mknod", "pathconf", "access"
]:
if hasattr(_os, name):
locals()[name] = _mk_single_path_wrapper(name)
def _mk_single_with_return(name):
original = getattr(_os, name)
def wrap(self, path, *args, **kw):
if self._active:
path = self._remap_input(name, path, *args, **kw)
return self._remap_output(name, original(path, *args, **kw))
return original(path, *args, **kw)
return wrap
for name in ['readlink', 'tempnam']:
if hasattr(_os, name):
locals()[name] = _mk_single_with_return(name)
def _mk_query(name):
original = getattr(_os, name)
def wrap(self, *args, **kw):
retval = original(*args, **kw)
if self._active:
return self._remap_output(name, retval)
return retval
return wrap
for name in ['getcwd', 'tmpnam']:
if hasattr(_os, name):
locals()[name] = _mk_query(name)
def _validate_path(self, path):
"""Called to remap or validate any path, whether input or output"""
return path
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
return self._validate_path(path)
def _remap_output(self, operation, path):
"""Called for path outputs"""
return self._validate_path(path)
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
return (
self._remap_input(operation + '-from', src, *args, **kw),
self._remap_input(operation + '-to', dst, *args, **kw)
)
if hasattr(os, 'devnull'):
_EXCEPTIONS = [os.devnull,]
else:
_EXCEPTIONS = []
class DirectorySandbox(AbstractSandbox):
"""Restrict operations to a single subdirectory - pseudo-chroot"""
write_ops = dict.fromkeys([
"open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
"utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
])
_exception_patterns = [
# Allow lib2to3 to attempt to save a pickled grammar object (#121)
r'.*lib2to3.*\.pickle$',
]
"exempt writing to paths that match the pattern"
def __init__(self, sandbox, exceptions=_EXCEPTIONS):
self._sandbox = os.path.normcase(os.path.realpath(sandbox))
self._prefix = os.path.join(self._sandbox, '')
self._exceptions = [
os.path.normcase(os.path.realpath(path))
for path in exceptions
]
AbstractSandbox.__init__(self)
def _violation(self, operation, *args, **kw):
from setuptools.sandbox import SandboxViolation
raise SandboxViolation(operation, args, kw)
if _file:
def _file(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("file", path, mode, *args, **kw)
return _file(path, mode, *args, **kw)
def _open(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("open", path, mode, *args, **kw)
return _open(path, mode, *args, **kw)
def tmpnam(self):
self._violation("tmpnam")
def _ok(self, path):
active = self._active
try:
self._active = False
realpath = os.path.normcase(os.path.realpath(path))
return (
self._exempted(realpath)
or realpath == self._sandbox
or realpath.startswith(self._prefix)
)
finally:
self._active = active
def _exempted(self, filepath):
start_matches = (
filepath.startswith(exception)
for exception in self._exceptions
)
pattern_matches = (
re.match(pattern, filepath)
for pattern in self._exception_patterns
)
candidates = itertools.chain(start_matches, pattern_matches)
return any(candidates)
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
if operation in self.write_ops and not self._ok(path):
self._violation(operation, os.path.realpath(path), *args, **kw)
return path
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
if not self._ok(src) or not self._ok(dst):
self._violation(operation, src, dst, *args, **kw)
return (src, dst)
def open(self, file, flags, mode=0o777, *args, **kw):
"""Called for low-level os.open()"""
if flags & WRITE_FLAGS and not self._ok(file):
self._violation("os.open", file, flags, mode, *args, **kw)
return _os.open(file, flags, mode, *args, **kw)
WRITE_FLAGS = functools.reduce(
operator.or_, [getattr(_os, a, 0) for a in
"O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
)
class SandboxViolation(DistutilsError):
"""A setup script attempted to modify the filesystem outside the sandbox"""
tmpl = textwrap.dedent("""
SandboxViolation: {cmd}{args!r} {kwargs}
The package setup script has attempted to modify files on your system
that are not within the EasyInstall build area, and has been aborted.
This package cannot be safely installed by EasyInstall, and may not
support alternate installation locations even if you run its setup
script by hand. Please inform the package's author and the EasyInstall
maintainers to find out if a fix or workaround is available.
""").lstrip()
def __str__(self):
cmd, args, kwargs = self.args
return self.tmpl.format(**locals())
| mit | 1,284,975,422,915,150,600 | 28.379798 | 105 | 0.584061 | false |
mbachry/exxo | exxo/venv.py | 1 | 2558 | ACTIVATE_SCRIPT = """# This file must be used with "source bin/activate" *from bash*
# you cannot run it directly
deactivate () {
# reset old environment variables
if [ -n "$_OLD_VIRTUAL_PATH" ] ; then
PATH="$_OLD_VIRTUAL_PATH"
export PATH
unset _OLD_VIRTUAL_PATH
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "$BASH" -o -n "$ZSH_VERSION" ] ; then
hash -r
fi
PYTHONPATH="$_OLD_PYTHONPATH"
export PYTHONPATH
unset _OLD_PYTHONPATH
if [ -n "$_OLD_VIRTUAL_PS1" ] ; then
PS1="$_OLD_VIRTUAL_PS1"
export PS1
unset _OLD_VIRTUAL_PS1
fi
unset VIRTUAL_ENV
if [ ! "$1" = "nondestructive" ] ; then
# Self destruct!
unset -f deactivate
fi
}
# unset irrelavent variables
deactivate nondestructive
VIRTUAL_ENV="{{ venv_path }}"
export VIRTUAL_ENV
VIRTUAL_ENV_PYRUN_VERSION="{{ pyrun_version }}"
export VIRTUAL_ENV_PYRUN_VERSION
_OLD_PYTHONPATH="$PYTHONPATH"
PYTHONPATH="$VIRTUAL_ENV/pip:$PYTHONPATH"
export PYTHONPATH
_OLD_VIRTUAL_PATH="$PATH"
PATH="$VIRTUAL_ENV/bin:$PATH"
export PATH
if [ -z "$VIRTUAL_ENV_DISABLE_PROMPT" ] ; then
_OLD_VIRTUAL_PS1="$PS1"
if [ "x({{ venv_name }}) " != x ] ; then
PS1="({{ venv_name }}) $PS1"
else
if [ "`basename \"$VIRTUAL_ENV\"`" = "__" ] ; then
# special case for Aspen magic directories
# see http://www.zetadev.com/software/aspen/
PS1="[`basename \`dirname \"$VIRTUAL_ENV\"\``] $PS1"
else
PS1="(`basename \"$VIRTUAL_ENV\"`)$PS1"
fi
fi
export PS1
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "$BASH" -o -n "$ZSH_VERSION" ] ; then
hash -r
fi
"""
PIP_SCRIPT = """#!/usr/bin/env python
import os
import sys
import pkg_resources
import pip
import pip._vendor.pkg_resources
eggdir = os.path.join(os.environ['VIRTUAL_ENV'], 'pip')
try:
pkg_resources.working_set.entries.remove(eggdir)
except ValueError:
pass
try:
pip._vendor.pkg_resources.working_set.entries.remove(eggdir)
except ValueError:
pass
for p in ('setuptools', 'pip'):
pkg_resources.working_set.by_key.pop(p, None)
pip._vendor.pkg_resources.working_set.by_key.pop(p, None)
sys.exit(pip.main())
"""
| isc | 6,727,654,097,634,589,000 | 24.838384 | 84 | 0.64269 | false |
mdworks2016/work_development | Python/20_Third_Certification/venv/lib/python3.7/site-packages/django/utils/translation/__init__.py | 5 | 10790 | """
Internationalization support.
"""
import re
import warnings
from contextlib import ContextDecorator
from decimal import ROUND_UP, Decimal
from django.utils.autoreload import autoreload_started, file_changed
from django.utils.deprecation import RemovedInDjango40Warning
from django.utils.functional import lazy
__all__ = [
'activate', 'deactivate', 'override', 'deactivate_all',
'get_language', 'get_language_from_request',
'get_language_info', 'get_language_bidi',
'check_for_language', 'to_language', 'to_locale', 'templatize',
'gettext', 'gettext_lazy', 'gettext_noop',
'ugettext', 'ugettext_lazy', 'ugettext_noop',
'ngettext', 'ngettext_lazy',
'ungettext', 'ungettext_lazy',
'pgettext', 'pgettext_lazy',
'npgettext', 'npgettext_lazy',
'LANGUAGE_SESSION_KEY',
]
LANGUAGE_SESSION_KEY = '_language'
class TranslatorCommentWarning(SyntaxWarning):
pass
# Here be dragons, so a short explanation of the logic won't hurt:
# We are trying to solve two problems: (1) access settings, in particular
# settings.USE_I18N, as late as possible, so that modules can be imported
# without having to first configure Django, and (2) if some other code creates
# a reference to one of these functions, don't break that reference when we
# replace the functions with their real counterparts (once we do access the
# settings).
class Trans:
"""
The purpose of this class is to store the actual translation function upon
receiving the first call to that function. After this is done, changes to
USE_I18N will have no effect to which function is served upon request. If
your tests rely on changing USE_I18N, you can delete all the functions
from _trans.__dict__.
Note that storing the function with setattr will have a noticeable
performance effect, as access to the function goes the normal path,
instead of using __getattr__.
"""
def __getattr__(self, real_name):
from django.conf import settings
if settings.USE_I18N:
from django.utils.translation import trans_real as trans
from django.utils.translation.reloader import watch_for_translation_changes, translation_file_changed
autoreload_started.connect(watch_for_translation_changes, dispatch_uid='translation_file_changed')
file_changed.connect(translation_file_changed, dispatch_uid='translation_file_changed')
else:
from django.utils.translation import trans_null as trans
setattr(self, real_name, getattr(trans, real_name))
return getattr(trans, real_name)
_trans = Trans()
# The Trans class is no more needed, so remove it from the namespace.
del Trans
def gettext_noop(message):
return _trans.gettext_noop(message)
def ugettext_noop(message):
"""
A legacy compatibility wrapper for Unicode handling on Python 2.
Alias of gettext_noop() since Django 2.0.
"""
warnings.warn(
'django.utils.translation.ugettext_noop() is deprecated in favor of '
'django.utils.translation.gettext_noop().',
RemovedInDjango40Warning, stacklevel=2,
)
return gettext_noop(message)
def gettext(message):
return _trans.gettext(message)
def ugettext(message):
"""
A legacy compatibility wrapper for Unicode handling on Python 2.
Alias of gettext() since Django 2.0.
"""
warnings.warn(
'django.utils.translation.ugettext() is deprecated in favor of '
'django.utils.translation.gettext().',
RemovedInDjango40Warning, stacklevel=2,
)
return gettext(message)
def ngettext(singular, plural, number):
return _trans.ngettext(singular, plural, number)
def ungettext(singular, plural, number):
"""
A legacy compatibility wrapper for Unicode handling on Python 2.
Alias of ngettext() since Django 2.0.
"""
warnings.warn(
'django.utils.translation.ungettext() is deprecated in favor of '
'django.utils.translation.ngettext().',
RemovedInDjango40Warning, stacklevel=2,
)
return ngettext(singular, plural, number)
def pgettext(context, message):
return _trans.pgettext(context, message)
def npgettext(context, singular, plural, number):
return _trans.npgettext(context, singular, plural, number)
gettext_lazy = lazy(gettext, str)
pgettext_lazy = lazy(pgettext, str)
def ugettext_lazy(message):
"""
A legacy compatibility wrapper for Unicode handling on Python 2. Has been
Alias of gettext_lazy since Django 2.0.
"""
warnings.warn(
'django.utils.translation.ugettext_lazy() is deprecated in favor of '
'django.utils.translation.gettext_lazy().',
RemovedInDjango40Warning, stacklevel=2,
)
return gettext_lazy(message)
def lazy_number(func, resultclass, number=None, **kwargs):
if isinstance(number, int):
kwargs['number'] = number
proxy = lazy(func, resultclass)(**kwargs)
else:
original_kwargs = kwargs.copy()
class NumberAwareString(resultclass):
def __bool__(self):
return bool(kwargs['singular'])
def _get_number_value(self, values):
try:
return values[number]
except KeyError:
raise KeyError(
"Your dictionary lacks key '%s\'. Please provide "
"it, because it is required to determine whether "
"string is singular or plural." % number
)
def _translate(self, number_value):
kwargs['number'] = number_value
return func(**kwargs)
def format(self, *args, **kwargs):
number_value = self._get_number_value(kwargs) if kwargs and number else args[0]
return self._translate(number_value).format(*args, **kwargs)
def __mod__(self, rhs):
if isinstance(rhs, dict) and number:
number_value = self._get_number_value(rhs)
else:
number_value = rhs
translated = self._translate(number_value)
try:
translated = translated % rhs
except TypeError:
# String doesn't contain a placeholder for the number.
pass
return translated
proxy = lazy(lambda **kwargs: NumberAwareString(), NumberAwareString)(**kwargs)
proxy.__reduce__ = lambda: (_lazy_number_unpickle, (func, resultclass, number, original_kwargs))
return proxy
def _lazy_number_unpickle(func, resultclass, number, kwargs):
return lazy_number(func, resultclass, number=number, **kwargs)
def ngettext_lazy(singular, plural, number=None):
return lazy_number(ngettext, str, singular=singular, plural=plural, number=number)
def ungettext_lazy(singular, plural, number=None):
"""
A legacy compatibility wrapper for Unicode handling on Python 2.
An alias of ungettext_lazy() since Django 2.0.
"""
warnings.warn(
'django.utils.translation.ungettext_lazy() is deprecated in favor of '
'django.utils.translation.ngettext_lazy().',
RemovedInDjango40Warning, stacklevel=2,
)
return ngettext_lazy(singular, plural, number)
def npgettext_lazy(context, singular, plural, number=None):
return lazy_number(npgettext, str, context=context, singular=singular, plural=plural, number=number)
def activate(language):
return _trans.activate(language)
def deactivate():
return _trans.deactivate()
class override(ContextDecorator):
def __init__(self, language, deactivate=False):
self.language = language
self.deactivate = deactivate
def __enter__(self):
self.old_language = get_language()
if self.language is not None:
activate(self.language)
else:
deactivate_all()
def __exit__(self, exc_type, exc_value, traceback):
if self.old_language is None:
deactivate_all()
elif self.deactivate:
deactivate()
else:
activate(self.old_language)
def get_language():
return _trans.get_language()
def get_language_bidi():
return _trans.get_language_bidi()
def check_for_language(lang_code):
return _trans.check_for_language(lang_code)
def to_language(locale):
"""Turn a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower() + '-' + locale[p + 1:].lower()
else:
return locale.lower()
def to_locale(language):
"""Turn a language name (en-us) into a locale name (en_US)."""
language, _, country = language.lower().partition('-')
if not country:
return language
# A language with > 2 characters after the dash only has its first
# character after the dash capitalized; e.g. sr-latn becomes sr_Latn.
# A language with 2 characters after the dash has both characters
# capitalized; e.g. en-us becomes en_US.
country, _, tail = country.partition('-')
country = country.title() if len(country) > 2 else country.upper()
if tail:
country += '-' + tail
return language + '_' + country
def get_language_from_request(request, check_path=False):
return _trans.get_language_from_request(request, check_path)
def get_language_from_path(path):
return _trans.get_language_from_path(path)
def get_supported_language_variant(lang_code, *, strict=False):
return _trans.get_supported_language_variant(lang_code, strict)
def templatize(src, **kwargs):
from .template import templatize
return templatize(src, **kwargs)
def deactivate_all():
return _trans.deactivate_all()
def get_language_info(lang_code):
from django.conf.locale import LANG_INFO
try:
lang_info = LANG_INFO[lang_code]
if 'fallback' in lang_info and 'name' not in lang_info:
info = get_language_info(lang_info['fallback'][0])
else:
info = lang_info
except KeyError:
if '-' not in lang_code:
raise KeyError("Unknown language code %s." % lang_code)
generic_lang_code = lang_code.split('-')[0]
try:
info = LANG_INFO[generic_lang_code]
except KeyError:
raise KeyError("Unknown language code %s and %s." % (lang_code, generic_lang_code))
if info:
info['name_translated'] = gettext_lazy(info['name'])
return info
trim_whitespace_re = re.compile(r'\s*\n\s*')
def trim_whitespace(s):
return trim_whitespace_re.sub(' ', s.strip())
def round_away_from_one(value):
return int(Decimal(value - 1).quantize(Decimal('0'), rounding=ROUND_UP)) + 1
| apache-2.0 | -4,081,953,658,181,526,000 | 30.828909 | 113 | 0.650046 | false |
trdean/grEME | gr-uhd/apps/hf_radio/ssbdemod.py | 58 | 4082 | # Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# This tries to push the hilbert transform for ssb demod back into the
# freq. xlating filter.
#
# The starting point for this was weaver_isb_am1_usrp3.py.
#
# The tap coefficients for freq_xlating_fir_filter_ccf were generated
# externally and are read from a file because I didn't want to learn how
# to make fir filters with arbitrary phase response using python numeric
# facilities.
#
# They were generated using Scilab which I am already familiar with.
# M. Revnell Jan 06
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
class ssb_demod( gr.hier_block2 ):
def __init__( self, if_rate, af_rate ):
gr.hier_block2.__init__(self, "ssb_demod",
gr.io_signature(1,1,gr.sizeof_gr_complex),
gr.io_signature(1,1,gr.sizeof_float))
self.if_rate = int(if_rate)
self.af_rate = int(af_rate)
self.if_decim = int(if_rate / af_rate)
self.sideband = 1
self.xlate_taps = ([complex(v) for v in file('ssb_taps').readlines()])
self.audio_taps = filter.firdes.low_pass(
1.0,
self.af_rate,
3e3,
600,
filter.firdes.WIN_HAMMING )
self.xlate = filter.freq_xlating_fir_filter_ccc(
self.if_decim,
self.xlate_taps,
0,
self.if_rate )
self.split = blocks.complex_to_float()
self.lpf = filter.fir_filter_fff(
1, self.audio_taps )
self.sum = blocks.add_ff( )
self.am_sel = blocks.multiply_const_ff( 0 )
self.sb_sel = blocks.multiply_const_ff( 1 )
self.mixer = blocks.add_ff()
self.am_det = blocks.complex_to_mag()
self.connect(self, self.xlate)
self.connect(self.xlate, self.split)
self.connect((self.split, 0), (self.sum, 0))
self.connect((self.split, 1), (self.sum, 1))
self.connect(self.sum, self.sb_sel)
self.connect(self.xlate, self.am_det)
self.connect(self.sb_sel, (self.mixer, 0))
self.connect(self.am_det, self.am_sel)
self.connect(self.am_sel, (self.mixer, 1))
self.connect(self.mixer, self.lpf)
self.connect(self.lpf, self)
def upper_sb( self ):
self.xlate.set_taps([v.conjugate() for v in self.xlate_taps])
self.sb_sel.set_k( 1.0 )
self.am_sel.set_k( 0.0 )
def lower_sb( self ):
self.xlate.set_taps(self.xlate_taps)
self.sb_sel.set_k( 1.0 )
self.am_sel.set_k( 0.0 )
def set_am( self ):
taps = filter.firdes.low_pass( 1.0,
self.if_rate,
5e3,
2e3,
filter.firdes.WIN_HAMMING )
self.xlate.set_taps( taps )
self.sb_sel.set_k( 0.0 )
self.am_sel.set_k( 1.0 )
def set_bw( self, bw ):
self.audio_taps = filter.firdes.low_pass(
1.0,
self.af_rate,
bw,
600,
filter.firdes.WIN_HAMMING )
self.lpf.set_taps( self.audio_taps )
def tune( self, freq ):
self.xlate.set_center_freq( freq )
| gpl-3.0 | -1,878,122,498,137,796,600 | 33.59322 | 78 | 0.581333 | false |
miipl-naveen/optibizz | addons/account/project/report/inverted_analytic_balance.py | 358 | 5760 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class account_inverted_analytic_balance(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_inverted_analytic_balance, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
'lines_g': self._lines_g,
'lines_a': self._lines_a,
'sum_debit': self._sum_debit,
'sum_credit': self._sum_credit,
'sum_balance': self._sum_balance,
'sum_quantity': self._sum_quantity,
})
def _lines_g(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT aa.name AS name, aa.code AS code, "
"sum(aal.amount) AS balance, "
"sum(aal.unit_amount) AS quantity, aa.id AS id \
FROM account_analytic_line AS aal, account_account AS aa \
WHERE (aal.general_account_id=aa.id) "
"AND (aal.account_id IN %s) "
"AND (date>=%s) AND (date<=%s) AND aa.active \
GROUP BY aal.general_account_id, aa.name, aa.code, aal.code, aa.id "
"ORDER BY aal.code",
(tuple(ids), date1, date2))
res = self.cr.dictfetchall()
for r in res:
if r['balance'] > 0:
r['debit'] = r['balance']
r['credit'] = 0.0
elif r['balance'] < 0:
r['debit'] = 0.0
r['credit'] = -r['balance']
else:
r['debit'] = 0.0
r['credit'] = 0.0
return res
def _lines_a(self, accounts, general_account_id, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT sum(aal.amount) AS balance, "
"sum(aal.unit_amount) AS quantity, "
"aaa.code AS code, aaa.name AS name, account_id \
FROM account_analytic_line AS aal, "
"account_analytic_account AS aaa \
WHERE aal.account_id=aaa.id AND aal.account_id IN %s "
"AND aal.general_account_id=%s AND aal.date>=%s "
"AND aal.date<=%s \
GROUP BY aal.account_id, general_account_id, aaa.code, aaa.name "
"ORDER BY aal.account_id",
(tuple(ids), general_account_id, date1, date2))
res = self.cr.dictfetchall()
aaa_obj = self.pool.get('account.analytic.account')
res2 = aaa_obj.read(self.cr, self.uid, ids, ['complete_name'])
complete_name = {}
for r in res2:
complete_name[r['id']] = r['complete_name']
for r in res:
r['complete_name'] = complete_name[r['account_id']]
if r['balance'] > 0:
r['debit'] = r['balance']
r['credit'] = 0.0
elif r['balance'] < 0:
r['debit'] = 0.0
r['credit'] = -r['balance']
else:
r['debit'] = 0.0
r['credit'] = 0.0
return res
def _sum_debit(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT sum(amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount>0", (tuple(ids),date1, date2,))
return self.cr.fetchone()[0] or 0.0
def _sum_credit(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT -sum(amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount<0", (tuple(ids),date1, date2,))
return self.cr.fetchone()[0] or 0.0
def _sum_balance(self, accounts, date1, date2):
debit = self._sum_debit(accounts, date1, date2)
credit = self._sum_credit(accounts, date1, date2)
return (debit-credit)
def _sum_quantity(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT sum(unit_amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s", (tuple(ids),date1, date2,))
return self.cr.fetchone()[0] or 0.0
class report_invertedanalyticbalance(osv.AbstractModel):
_name = 'report.account.report_invertedanalyticbalance'
_inherit = 'report.abstract_report'
_template = 'account.report_invertedanalyticbalance'
_wrapped_report_class = account_inverted_analytic_balance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,345,690,284,339,548,000 | 43.307692 | 107 | 0.540799 | false |
lucasmachadorj/pypln.web | pypln/web/core/tests/views/test_document.py | 2 | 11044 | # -*- coding:utf-8 -*-
#
# Copyright 2012 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
from mock import patch
from StringIO import StringIO
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test.client import encode_multipart, BOUNDARY, MULTIPART_CONTENT
from rest_framework.reverse import reverse as rest_framework_reverse
from pypln.web.core.models import Corpus, Document
from pypln.web.core.tests.utils import TestWithMongo
__all__ = ["DocumentListViewTest", "DocumentDetailViewTest"]
class DocumentListViewTest(TestWithMongo):
fixtures = ['users', 'corpora', 'documents']
def setUp(self):
self.user = User.objects.get(username="user")
self.fp = StringIO("Content")
self.fp.name = "document.txt"
def test_requires_login(self):
response = self.client.get(reverse('document-list'))
self.assertEqual(response.status_code, 403)
def test_only_lists_documents_that_belong_to_the_authenticated_user(self):
self.client.login(username="user", password="user")
response = self.client.get(reverse('document-list'))
self.assertEqual(response.status_code, 200)
expected_data = Document.objects.filter(
owner=User.objects.get(username="user"))
object_list = response.renderer_context['view'].get_queryset()
self.assertEqual(list(expected_data), list(object_list))
@patch('pypln.web.core.views.create_pipeline_from_document')
def test_create_new_document(self, create_pipelines):
self.assertEqual(len(self.user.document_set.all()), 1)
self.client.login(username="user", password="user")
corpus = self.user.corpus_set.all()[0]
data = {"corpus": rest_framework_reverse('corpus-detail',
kwargs={'pk': corpus.id}), "blob": self.fp}
response = self.client.post(reverse('document-list'), data)
self.assertEqual(response.status_code, 201)
self.assertEqual(len(self.user.document_set.all()), 2)
@patch('pypln.web.core.views.create_pipeline_from_document')
def test_cant_create_document_for_another_user(self, create_pipeline):
self.client.login(username="user", password="user")
corpus = self.user.corpus_set.all()[0]
corpus_url = rest_framework_reverse('corpus-detail', kwargs={'pk': corpus.id})
data = {"corpus": corpus_url, "blob": self.fp, "owner": 1}
response = self.client.post(reverse('document-list'), data)
self.assertEqual(response.status_code, 201)
document = self.user.document_set.all()[1]
self.assertEqual(document.owner, self.user)
def test_cant_create_document_for_inexistent_corpus(self):
self.client.login(username="user", password="user")
corpus_url = rest_framework_reverse('corpus-detail', kwargs={'pk': 9999})
data = {"corpus": corpus_url, "blob": self.fp}
response = self.client.post(reverse('document-list'), data)
self.assertEqual(response.status_code, 400)
@patch('pypln.web.core.views.create_pipeline_from_document')
def test_cant_create_document_in_another_users_corpus(self, create_pipelines):
self.client.login(username="user", password="user")
# We'll try to associate this document to a corpus that belongs to
# 'admin'
corpus = Corpus.objects.filter(owner__username="admin")[0]
corpus_url = rest_framework_reverse('corpus-detail', kwargs={'pk': corpus.id})
data = {"corpus": corpus_url, "blob": self.fp}
response = self.client.post(reverse('document-list'), data)
self.assertEqual(response.status_code, 400)
@patch('pypln.web.backend_adapter.pipelines.create_pipeline')
def test_creating_a_document_should_create_a_pipeline_for_it(self, create_pipeline):
self.assertEqual(len(self.user.document_set.all()), 1)
self.client.login(username="user", password="user")
corpus = self.user.corpus_set.all()[0]
data = {"corpus": rest_framework_reverse('corpus-detail',
kwargs={'pk': corpus.id}), "blob": self.fp}
response = self.client.post(reverse('document-list'), data)
self.assertEqual(response.status_code, 201)
self.assertTrue(create_pipeline.called)
doc_id = int(response.data['url'].split('/')[-2])
document = Document.objects.get(pk=doc_id)
pipeline_data = {"_id": str(document.blob.file._id), "id": document.id}
create_pipeline.assert_called_with(pipeline_data)
class DocumentDetailViewTest(TestWithMongo):
fixtures = ['users', 'corpora', 'documents']
def setUp(self):
self.user = User.objects.get(username="user")
self.fp = StringIO("Content")
self.fp.name = "document.txt"
def _get_corpus_url(self, corpus_id):
return rest_framework_reverse('corpus-detail',
kwargs={'pk': corpus_id})
def test_requires_login(self):
document = Document.objects.filter(owner__username='user')[0]
response = self.client.get(reverse('document-detail',
kwargs={'pk': document.id}))
self.assertEqual(response.status_code, 403)
def test_shows_document_correctly(self):
self.client.login(username="user", password="user")
document = Document.objects.filter(owner__username="user")[0]
response = self.client.get(reverse('document-detail',
kwargs={'pk': document.id}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.renderer_context['view'].get_object(), document)
def test_returns_404_for_inexistent_document(self):
self.client.login(username="user", password="user")
response = self.client.get(reverse('document-detail',
kwargs={'pk': 9999}))
self.assertEqual(response.status_code, 404)
def test_returns_404_if_user_is_not_the_owner_of_the_document(self):
self.client.login(username="user", password="user")
document = Document.objects.filter(owner__username="admin")[0]
response = self.client.get(reverse('document-detail',
kwargs={'pk': document.id}))
self.assertEqual(response.status_code, 404)
@patch('pypln.web.backend_adapter.pipelines.create_pipeline')
def test_edit_document(self, create_pipeline):
self.client.login(username="user", password="user")
document = self.user.document_set.all()[0]
new_corpus = Corpus.objects.create(name="New corpus",
description="", owner=self.user)
data = encode_multipart(BOUNDARY, {"corpus": self._get_corpus_url(
new_corpus.id), "blob": self.fp})
response = self.client.put(reverse('document-detail',
kwargs={'pk': document.id}), data, content_type=MULTIPART_CONTENT)
self.assertEqual(response.status_code, 200)
updated_document = Document.objects.get(id=document.id)
self.assertEqual(updated_document.corpus, new_corpus)
def test_cant_edit_other_peoples_documents(self):
self.client.login(username="user", password="user")
document = Document.objects.filter(owner__username="admin")[0]
data = encode_multipart(BOUNDARY,
{"corpus": self._get_corpus_url(document.corpus.id),
"blob": self.fp})
response = self.client.put(reverse('document-detail',
kwargs={'pk': document.id}), data, content_type=MULTIPART_CONTENT)
# Since this document belongs to another user, it's getting
# filtered out of the queryset in `view.get_queryset()` and it
# appears not to exist.
self.assertEqual(response.status_code, 404)
@patch('pypln.web.backend_adapter.pipelines.create_pipeline')
def test_cant_change_the_owner_of_a_document(self, create_pipeline):
self.client.login(username="user", password="user")
document = self.user.document_set.all()[0]
# We try to set 'admin' as the owner (id=1)
data = encode_multipart(BOUNDARY, {"blob": self.fp,
"corpus": self._get_corpus_url(document.corpus.id), "owner": 1})
response = self.client.put(reverse('document-detail',
kwargs={'pk': document.id}), data, content_type=MULTIPART_CONTENT)
self.assertEqual(response.status_code, 200)
# but the view sets the request user as the owner anyway
self.assertEqual(response.data["owner"], "user")
def test_delete_a_document(self):
self.client.login(username="user", password="user")
self.assertEqual(len(self.user.document_set.all()), 1)
document = self.user.document_set.all()[0]
response = self.client.delete(reverse('document-detail',
kwargs={'pk': document.id}))
self.assertEqual(response.status_code, 204)
self.assertEqual(len(self.user.document_set.all()), 0)
def test_cant_delete_other_peoples_documents(self):
self.client.login(username="user", password="user")
self.assertEqual(len(Corpus.objects.filter(owner__username="admin")), 1)
document = Document.objects.filter(owner__username="admin")[0]
response = self.client.delete(reverse('document-detail',
kwargs={'pk': document.id}))
self.assertEqual(response.status_code, 404)
self.assertEqual(len(Corpus.objects.filter(owner__username="admin")), 1)
@patch('pypln.web.backend_adapter.pipelines.create_pipeline')
def test_updating_a_document_should_create_a_pipeline_for_it(self, create_pipeline):
self.client.login(username="user", password="user")
document = self.user.document_set.all()[0]
corpus = self.user.corpus_set.all()[0]
# We try to set 'admin' as the owner (id=1)
data = encode_multipart(BOUNDARY, {"blob": self.fp,
"corpus": self._get_corpus_url(document.corpus.id), "owner": 2})
response = self.client.put(reverse('document-detail',
kwargs={'pk': document.id}), data, content_type=MULTIPART_CONTENT)
self.assertEqual(response.status_code, 200)
self.assertTrue(create_pipeline.called)
document = response.renderer_context['view'].get_object()
pipeline_data = {"_id": str(document.blob.file._id), "id": document.id}
create_pipeline.assert_called_with(pipeline_data)
| gpl-3.0 | 377,421,825,397,096,600 | 44.077551 | 88 | 0.662079 | false |
jotes/ansible | v2/ansible/plugins/connections/winrm.py | 29 | 11717 | # (c) 2014, Chris Church <[email protected]>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import base64
import hashlib
import imp
import os
import re
import shlex
import traceback
import urlparse
from ansible import errors
from ansible import utils
from ansible.callbacks import vvv, vvvv, verbose
from ansible.runner.shell_plugins import powershell
try:
from winrm import Response
from winrm.exceptions import WinRMTransportError
from winrm.protocol import Protocol
except ImportError:
raise errors.AnsibleError("winrm is not installed")
_winrm_cache = {
# 'user:pwhash@host:port': <protocol instance>
}
def vvvvv(msg, host=None):
verbose(msg, host=host, caplevel=4)
class Connection(object):
'''WinRM connections over HTTP/HTTPS.'''
def __init__(self, runner, host, port, user, password, *args, **kwargs):
self.runner = runner
self.host = host
self.port = port
self.user = user
self.password = password
self.has_pipelining = False
self.default_shell = 'powershell'
self.default_suffixes = ['.ps1', '']
self.protocol = None
self.shell_id = None
self.delegate = None
def _winrm_connect(self):
'''
Establish a WinRM connection over HTTP/HTTPS.
'''
port = self.port or 5986
vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % \
(self.user, port, self.host), host=self.host)
netloc = '%s:%d' % (self.host, port)
cache_key = '%s:%s@%s:%d' % (self.user, hashlib.md5(self.password).hexdigest(), self.host, port)
if cache_key in _winrm_cache:
vvvv('WINRM REUSE EXISTING CONNECTION: %s' % cache_key, host=self.host)
return _winrm_cache[cache_key]
transport_schemes = [('plaintext', 'https'), ('plaintext', 'http')] # FIXME: ssl/kerberos
if port == 5985:
transport_schemes = reversed(transport_schemes)
exc = None
for transport, scheme in transport_schemes:
endpoint = urlparse.urlunsplit((scheme, netloc, '/wsman', '', ''))
vvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint),
host=self.host)
protocol = Protocol(endpoint, transport=transport,
username=self.user, password=self.password)
try:
protocol.send_message('')
_winrm_cache[cache_key] = protocol
return protocol
except WinRMTransportError, exc:
err_msg = str(exc)
if re.search(r'Operation\s+?timed\s+?out', err_msg, re.I):
raise errors.AnsibleError("the connection attempt timed out")
m = re.search(r'Code\s+?(\d{3})', err_msg)
if m:
code = int(m.groups()[0])
if code == 401:
raise errors.AnsibleError("the username/password specified for this server was incorrect")
elif code == 411:
_winrm_cache[cache_key] = protocol
return protocol
vvvv('WINRM CONNECTION ERROR: %s' % err_msg, host=self.host)
continue
if exc:
raise errors.AnsibleError(str(exc))
def _winrm_exec(self, command, args=(), from_exec=False):
if from_exec:
vvvv("WINRM EXEC %r %r" % (command, args), host=self.host)
else:
vvvvv("WINRM EXEC %r %r" % (command, args), host=self.host)
if not self.protocol:
self.protocol = self._winrm_connect()
if not self.shell_id:
self.shell_id = self.protocol.open_shell()
command_id = None
try:
command_id = self.protocol.run_command(self.shell_id, command, args)
response = Response(self.protocol.get_command_output(self.shell_id, command_id))
if from_exec:
vvvv('WINRM RESULT %r' % response, host=self.host)
else:
vvvvv('WINRM RESULT %r' % response, host=self.host)
vvvvv('WINRM STDOUT %s' % response.std_out, host=self.host)
vvvvv('WINRM STDERR %s' % response.std_err, host=self.host)
return response
finally:
if command_id:
self.protocol.cleanup_command(self.shell_id, command_id)
def connect(self):
if not self.protocol:
self.protocol = self._winrm_connect()
return self
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable=None, in_data=None, su=None, su_user=None):
cmd = cmd.encode('utf-8')
cmd_parts = shlex.split(cmd, posix=False)
if '-EncodedCommand' in cmd_parts:
encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1]
decoded_cmd = base64.b64decode(encoded_cmd)
vvv("EXEC %s" % decoded_cmd, host=self.host)
else:
vvv("EXEC %s" % cmd, host=self.host)
# For script/raw support.
if cmd_parts and cmd_parts[0].lower().endswith('.ps1'):
script = powershell._build_file_cmd(cmd_parts)
cmd_parts = powershell._encode_script(script, as_list=True)
try:
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True)
except Exception, e:
traceback.print_exc()
raise errors.AnsibleError("failed to exec cmd %s" % cmd)
return (result.status_code, '', result.std_out.encode('utf-8'), result.std_err.encode('utf-8'))
def put_file(self, in_path, out_path):
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
if not os.path.exists(in_path):
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
with open(in_path) as in_file:
in_size = os.path.getsize(in_path)
script_template = '''
$s = [System.IO.File]::OpenWrite("%s");
[void]$s.Seek(%d, [System.IO.SeekOrigin]::Begin);
$b = [System.Convert]::FromBase64String("%s");
[void]$s.Write($b, 0, $b.length);
[void]$s.SetLength(%d);
[void]$s.Close();
'''
# Determine max size of data we can pass per command.
script = script_template % (powershell._escape(out_path), in_size, '', in_size)
cmd = powershell._encode_script(script)
# Encode script with no data, subtract its length from 8190 (max
# windows command length), divide by 2.67 (UTF16LE base64 command
# encoding), then by 1.35 again (data base64 encoding).
buffer_size = int(((8190 - len(cmd)) / 2.67) / 1.35)
for offset in xrange(0, in_size, buffer_size):
try:
out_data = in_file.read(buffer_size)
if offset == 0:
if out_data.lower().startswith('#!powershell') and not out_path.lower().endswith('.ps1'):
out_path = out_path + '.ps1'
b64_data = base64.b64encode(out_data)
script = script_template % (powershell._escape(out_path), offset, b64_data, in_size)
vvvv("WINRM PUT %s to %s (offset=%d size=%d)" % (in_path, out_path, offset, len(out_data)), host=self.host)
cmd_parts = powershell._encode_script(script, as_list=True)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
if result.status_code != 0:
raise IOError(result.std_err.encode('utf-8'))
except Exception:
traceback.print_exc()
raise errors.AnsibleError("failed to transfer file to %s" % out_path)
def fetch_file(self, in_path, out_path):
out_path = out_path.replace('\\', '/')
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
buffer_size = 2**20 # 1MB chunks
if not os.path.exists(os.path.dirname(out_path)):
os.makedirs(os.path.dirname(out_path))
out_file = None
try:
offset = 0
while True:
try:
script = '''
If (Test-Path -PathType Leaf "%(path)s")
{
$stream = [System.IO.File]::OpenRead("%(path)s");
$stream.Seek(%(offset)d, [System.IO.SeekOrigin]::Begin) | Out-Null;
$buffer = New-Object Byte[] %(buffer_size)d;
$bytesRead = $stream.Read($buffer, 0, %(buffer_size)d);
$bytes = $buffer[0..($bytesRead-1)];
[System.Convert]::ToBase64String($bytes);
$stream.Close() | Out-Null;
}
ElseIf (Test-Path -PathType Container "%(path)s")
{
Write-Host "[DIR]";
}
Else
{
Write-Error "%(path)s does not exist";
Exit 1;
}
''' % dict(buffer_size=buffer_size, path=powershell._escape(in_path), offset=offset)
vvvv("WINRM FETCH %s to %s (offset=%d)" % (in_path, out_path, offset), host=self.host)
cmd_parts = powershell._encode_script(script, as_list=True)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
if result.status_code != 0:
raise IOError(result.std_err.encode('utf-8'))
if result.std_out.strip() == '[DIR]':
data = None
else:
data = base64.b64decode(result.std_out.strip())
if data is None:
if not os.path.exists(out_path):
os.makedirs(out_path)
break
else:
if not out_file:
# If out_path is a directory and we're expecting a file, bail out now.
if os.path.isdir(out_path):
break
out_file = open(out_path, 'wb')
out_file.write(data)
if len(data) < buffer_size:
break
offset += len(data)
except Exception:
traceback.print_exc()
raise errors.AnsibleError("failed to transfer file to %s" % out_path)
finally:
if out_file:
out_file.close()
def close(self):
if self.protocol and self.shell_id:
self.protocol.close_shell(self.shell_id)
self.shell_id = None
| gpl-3.0 | 1,070,559,228,475,564,800 | 44.414729 | 128 | 0.530938 | false |
ahb0327/intellij-community | python/lib/Lib/readline.py | 82 | 5885 | from __future__ import with_statement
import os.path
import sys
from warnings import warn
import java.lang.reflect.Array
__all__ = ['add_history', 'clear_history', 'get_begidx', 'get_completer',
'get_completer_delims', 'get_current_history_length',
'get_endidx', 'get_history_item', 'get_history_length',
'get_line_buffer', 'insert_text', 'parse_and_bind',
'read_history_file', 'read_init_file', 'redisplay',
'remove_history_item', 'set_completer', 'set_completer_delims',
'set_history_length', 'set_pre_input_hook', 'set_startup_hook',
'write_history_file']
try:
_reader = sys._jy_interpreter.reader
except AttributeError:
raise ImportError("Cannot access JLineConsole")
_history_list = None
# The need for the following warnings should go away once we update
# JLine. Choosing ImportWarning as the closest warning to what is
# going on here, namely this is functionality not yet available on
# Jython.
class NotImplementedWarning(ImportWarning):
"""Not yet implemented by Jython"""
class SecurityWarning(ImportWarning):
"""Security manager prevents access to private field"""
def _setup_history():
# This is obviously not desirable, but avoids O(n) workarounds to
# modify the history (ipython uses the function
# remove_history_item to mutate the history relatively frequently)
global _history_list
history = _reader.history
try:
history_list_field = history.class.getDeclaredField("history")
history_list_field.setAccessible(True)
_history_list = history_list_field.get(history)
except:
pass
_setup_history()
def parse_and_bind(string):
if string == "tab: complete":
try:
keybindings_field = _reader.class.getDeclaredField("keybindings")
keybindings_field.setAccessible(True)
keybindings = keybindings_field.get(_reader)
COMPLETE = _reader.KEYMAP_NAMES.get('COMPLETE')
if java.lang.reflect.Array.getShort(keybindings, 9) != COMPLETE:
java.lang.reflect.Array.setShort(keybindings, 9, COMPLETE)
except:
warn("Cannot bind tab key to complete. You need to do this in a .jlinebindings.properties file instead", SecurityWarning, stacklevel=2)
else:
warn("Cannot bind key %s. You need to do this in a .jlinebindings.properties file instead" % (string,), NotImplementedWarning, stacklevel=2)
def get_line_buffer():
return str(_reader.cursorBuffer.buffer)
def insert_text(string):
_reader.putString(string)
def read_init_file(filename=None):
warn("read_init_file: %s" % (filename,), NotImplementedWarning, "module", 2)
def read_history_file(filename="~/.history"):
print "Reading history:", filename
expanded = os.path.expanduser(filename)
new_history = _reader.getHistory().getClass()()
# new_history.clear()
with open(expanded) as f:
for line in f:
new_history.addToHistory(line.rstrip())
_reader.history = new_history
_setup_history()
def write_history_file(filename="~/.history"):
expanded = os.path.expanduser(filename)
with open(expanded, 'w') as f:
for line in _reader.history.historyList:
f.write(line)
f.write("\n")
def clear_history():
_reader.history.clear()
def add_history(line):
_reader.addToHistory(line)
def get_history_length():
return _reader.history.maxSize
def set_history_length(length):
_reader.history.maxSize = length
def get_current_history_length():
return len(_reader.history.historyList)
def get_history_item(index):
return _reader.history.historyList[index]
def remove_history_item(pos):
if _history_list:
_history_list.remove(pos)
else:
warn("Cannot remove history item at position: %s" % (pos,), SecurityWarning, stacklevel=2)
def redisplay():
_reader.redrawLine()
def set_startup_hook(function=None):
sys._jy_interpreter.startupHook = function
def set_pre_input_hook(function=None):
warn("set_pre_input_hook %s" % (function,), NotImplementedWarning, stacklevel=2)
_completer_function = None
def set_completer(function=None):
"""set_completer([function]) -> None
Set or remove the completer function.
The function is called as function(text, state),
for state in 0, 1, 2, ..., until it returns a non-string.
It should return the next possible completion starting with 'text'."""
global _completer_function
_completer_function = function
def complete_handler(buffer, cursor, candidates):
start = _get_delimited(buffer, cursor)[0]
delimited = buffer[start:cursor]
for state in xrange(100): # TODO arbitrary, what's the number used by gnu readline?
completion = None
try:
completion = function(delimited, state)
except:
pass
if completion:
candidates.add(completion)
else:
break
return start
_reader.addCompletor(complete_handler)
def get_completer():
return _completer_function
def _get_delimited(buffer, cursor):
start = cursor
for i in xrange(cursor-1, -1, -1):
if buffer[i] in _completer_delims:
break
start = i
return start, cursor
def get_begidx():
return _get_delimited(str(_reader.cursorBuffer.buffer), _reader.cursorBuffer.cursor)[0]
def get_endidx():
return _get_delimited(str(_reader.cursorBuffer.buffer), _reader.cursorBuffer.cursor)[1]
def set_completer_delims(string):
global _completer_delims, _completer_delims_set
_completer_delims = string
_completer_delims_set = set(string)
def get_completer_delims():
return _completer_delims
set_completer_delims(' \t\n`~!@#$%^&*()-=+[{]}\\|;:\'",<>/?')
| apache-2.0 | 5,994,438,386,267,186,000 | 31.513812 | 148 | 0.662022 | false |
agrista/odoo-saas | openerp/report/render/rml2pdf/trml2pdf.py | 256 | 46679 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
import copy
import reportlab
import re
from reportlab.pdfgen import canvas
from reportlab import platypus
import utils
import color
import os
import logging
from lxml import etree
import base64
from distutils.version import LooseVersion
from reportlab.platypus.doctemplate import ActionFlowable
from openerp.tools.safe_eval import safe_eval as eval
from reportlab.lib.units import inch,cm,mm
from openerp.tools.misc import file_open
from reportlab.pdfbase import pdfmetrics
from reportlab.lib.pagesizes import A4, letter
try:
from cStringIO import StringIO
_hush_pyflakes = [ StringIO ]
except ImportError:
from StringIO import StringIO
_logger = logging.getLogger(__name__)
encoding = 'utf-8'
def select_fontname(fontname, default_fontname):
if fontname not in pdfmetrics.getRegisteredFontNames()\
or fontname not in pdfmetrics.standardFonts:
# let reportlab attempt to find it
try:
pdfmetrics.getFont(fontname)
except Exception:
addition = ""
if " " in fontname:
addition = ". Your font contains spaces which is not valid in RML."
_logger.warning('Could not locate font %s, substituting default: %s%s',
fontname, default_fontname, addition)
fontname = default_fontname
return fontname
def _open_image(filename, path=None):
"""Attempt to open a binary file and return the descriptor
"""
if os.path.isfile(filename):
return open(filename, 'rb')
for p in (path or []):
if p and os.path.isabs(p):
fullpath = os.path.join(p, filename)
if os.path.isfile(fullpath):
return open(fullpath, 'rb')
try:
if p:
fullpath = os.path.join(p, filename)
else:
fullpath = filename
return file_open(fullpath)
except IOError:
pass
raise IOError("File %s cannot be found in image path" % filename)
class NumberedCanvas(canvas.Canvas):
def __init__(self, *args, **kwargs):
canvas.Canvas.__init__(self, *args, **kwargs)
self._saved_page_states = []
def showPage(self):
self._startPage()
def save(self):
"""add page info to each page (page x of y)"""
for state in self._saved_page_states:
self.__dict__.update(state)
self.draw_page_number()
canvas.Canvas.showPage(self)
canvas.Canvas.save(self)
def draw_page_number(self):
page_count = len(self._saved_page_states)
self.setFont("Helvetica", 8)
self.drawRightString((self._pagesize[0]-30), (self._pagesize[1]-40),
" %(this)i / %(total)i" % {
'this': self._pageNumber,
'total': page_count,
}
)
class PageCount(platypus.Flowable):
def __init__(self, story_count=0):
platypus.Flowable.__init__(self)
self.story_count = story_count
def draw(self):
self.canv.beginForm("pageCount%d" % self.story_count)
self.canv.setFont("Helvetica", utils.unit_get(str(8)))
self.canv.drawString(0, 0, str(self.canv.getPageNumber()))
self.canv.endForm()
class PageReset(platypus.Flowable):
def draw(self):
"""Flag to close current story page numbering and prepare for the next
should be executed after the rendering of the full story"""
self.canv._doPageReset = True
class _rml_styles(object,):
def __init__(self, nodes, localcontext):
self.localcontext = localcontext
self.styles = {}
self.styles_obj = {}
self.names = {}
self.table_styles = {}
self.default_style = reportlab.lib.styles.getSampleStyleSheet()
for node in nodes:
for style in node.findall('blockTableStyle'):
self.table_styles[style.get('id')] = self._table_style_get(style)
for style in node.findall('paraStyle'):
sname = style.get('name')
self.styles[sname] = self._para_style_update(style)
if self.default_style.has_key(sname):
for key, value in self.styles[sname].items():
setattr(self.default_style[sname], key, value)
else:
self.styles_obj[sname] = reportlab.lib.styles.ParagraphStyle(sname, self.default_style["Normal"], **self.styles[sname])
for variable in node.findall('initialize'):
for name in variable.findall('name'):
self.names[ name.get('id')] = name.get('value')
def _para_style_update(self, node):
data = {}
for attr in ['textColor', 'backColor', 'bulletColor', 'borderColor']:
if node.get(attr):
data[attr] = color.get(node.get(attr))
for attr in ['bulletFontName', 'fontName']:
if node.get(attr):
fontname= select_fontname(node.get(attr), None)
if fontname is not None:
data['fontName'] = fontname
for attr in ['bulletText']:
if node.get(attr):
data[attr] = node.get(attr)
for attr in ['fontSize', 'leftIndent', 'rightIndent', 'spaceBefore', 'spaceAfter',
'firstLineIndent', 'bulletIndent', 'bulletFontSize', 'leading',
'borderWidth','borderPadding','borderRadius']:
if node.get(attr):
data[attr] = utils.unit_get(node.get(attr))
if node.get('alignment'):
align = {
'right':reportlab.lib.enums.TA_RIGHT,
'center':reportlab.lib.enums.TA_CENTER,
'justify':reportlab.lib.enums.TA_JUSTIFY
}
data['alignment'] = align.get(node.get('alignment').lower(), reportlab.lib.enums.TA_LEFT)
data['splitLongWords'] = 0
return data
def _table_style_get(self, style_node):
styles = []
for node in style_node:
start = utils.tuple_int_get(node, 'start', (0,0) )
stop = utils.tuple_int_get(node, 'stop', (-1,-1) )
if node.tag=='blockValign':
styles.append(('VALIGN', start, stop, str(node.get('value'))))
elif node.tag=='blockFont':
styles.append(('FONT', start, stop, str(node.get('name'))))
elif node.tag=='blockTextColor':
styles.append(('TEXTCOLOR', start, stop, color.get(str(node.get('colorName')))))
elif node.tag=='blockLeading':
styles.append(('LEADING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockAlignment':
styles.append(('ALIGNMENT', start, stop, str(node.get('value'))))
elif node.tag=='blockSpan':
styles.append(('SPAN', start, stop))
elif node.tag=='blockLeftPadding':
styles.append(('LEFTPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockRightPadding':
styles.append(('RIGHTPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockTopPadding':
styles.append(('TOPPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockBottomPadding':
styles.append(('BOTTOMPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockBackground':
styles.append(('BACKGROUND', start, stop, color.get(node.get('colorName'))))
if node.get('size'):
styles.append(('FONTSIZE', start, stop, utils.unit_get(node.get('size'))))
elif node.tag=='lineStyle':
kind = node.get('kind')
kind_list = [ 'GRID', 'BOX', 'OUTLINE', 'INNERGRID', 'LINEBELOW', 'LINEABOVE','LINEBEFORE', 'LINEAFTER' ]
assert kind in kind_list
thick = 1
if node.get('thickness'):
thick = float(node.get('thickness'))
styles.append((kind, start, stop, thick, color.get(node.get('colorName'))))
return platypus.tables.TableStyle(styles)
def para_style_get(self, node):
style = False
sname = node.get('style')
if sname:
if sname in self.styles_obj:
style = self.styles_obj[sname]
else:
_logger.debug('Warning: style not found, %s - setting default!', node.get('style'))
if not style:
style = self.default_style['Normal']
para_update = self._para_style_update(node)
if para_update:
# update style only is necessary
style = copy.deepcopy(style)
style.__dict__.update(para_update)
return style
class _rml_doc(object):
def __init__(self, node, localcontext=None, images=None, path='.', title=None):
if images is None:
images = {}
if localcontext is None:
localcontext = {}
self.localcontext = localcontext
self.etree = node
self.filename = self.etree.get('filename')
self.images = images
self.path = path
self.title = title
def docinit(self, els):
from reportlab.lib.fonts import addMapping
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
for node in els:
for font in node.findall('registerFont'):
name = font.get('fontName').encode('ascii')
fname = font.get('fontFile').encode('ascii')
if name not in pdfmetrics._fonts:
pdfmetrics.registerFont(TTFont(name, fname))
#by default, we map the fontName to each style (bold, italic, bold and italic), so that
#if there isn't any font defined for one of these style (via a font family), the system
#will fallback on the normal font.
addMapping(name, 0, 0, name) #normal
addMapping(name, 0, 1, name) #italic
addMapping(name, 1, 0, name) #bold
addMapping(name, 1, 1, name) #italic and bold
#if registerFontFamily is defined, we register the mapping of the fontName to use for each style.
for font_family in node.findall('registerFontFamily'):
family_name = font_family.get('normal').encode('ascii')
if font_family.get('italic'):
addMapping(family_name, 0, 1, font_family.get('italic').encode('ascii'))
if font_family.get('bold'):
addMapping(family_name, 1, 0, font_family.get('bold').encode('ascii'))
if font_family.get('boldItalic'):
addMapping(family_name, 1, 1, font_family.get('boldItalic').encode('ascii'))
def setTTFontMapping(self,face, fontname, filename, mode='all'):
from reportlab.lib.fonts import addMapping
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
if mode:
mode = mode.lower()
if fontname not in pdfmetrics._fonts:
pdfmetrics.registerFont(TTFont(fontname, filename))
if mode == 'all':
addMapping(face, 0, 0, fontname) #normal
addMapping(face, 0, 1, fontname) #italic
addMapping(face, 1, 0, fontname) #bold
addMapping(face, 1, 1, fontname) #italic and bold
elif mode in ['italic', 'oblique']:
addMapping(face, 0, 1, fontname) #italic
elif mode == 'bold':
addMapping(face, 1, 0, fontname) #bold
elif mode in ('bolditalic', 'bold italic','boldoblique', 'bold oblique'):
addMapping(face, 1, 1, fontname) #italic and bold
else:
addMapping(face, 0, 0, fontname) #normal
def _textual_image(self, node):
rc = ''
for n in node:
rc +=( etree.tostring(n) or '') + n.tail
return base64.decodestring(node.tostring())
def _images(self, el):
result = {}
for node in el.findall('.//image'):
rc =( node.text or '')
result[node.get('name')] = base64.decodestring(rc)
return result
def render(self, out):
el = self.etree.findall('.//docinit')
if el:
self.docinit(el)
el = self.etree.findall('.//stylesheet')
self.styles = _rml_styles(el,self.localcontext)
el = self.etree.findall('.//images')
if el:
self.images.update( self._images(el[0]) )
el = self.etree.findall('.//template')
if len(el):
pt_obj = _rml_template(self.localcontext, out, el[0], self, images=self.images, path=self.path, title=self.title)
el = utils._child_get(self.etree, self, 'story')
pt_obj.render(el)
else:
self.canvas = canvas.Canvas(out)
pd = self.etree.find('pageDrawing')[0]
pd_obj = _rml_canvas(self.canvas, self.localcontext, None, self, self.images, path=self.path, title=self.title)
pd_obj.render(pd)
self.canvas.showPage()
self.canvas.save()
class _rml_canvas(object):
def __init__(self, canvas, localcontext, doc_tmpl=None, doc=None, images=None, path='.', title=None):
if images is None:
images = {}
self.localcontext = localcontext
self.canvas = canvas
self.styles = doc.styles
self.doc_tmpl = doc_tmpl
self.doc = doc
self.images = images
self.path = path
self.title = title
if self.title:
self.canvas.setTitle(self.title)
def _textual(self, node, x=0, y=0):
text = node.text and node.text.encode('utf-8') or ''
rc = utils._process_text(self, text)
for n in node:
if n.tag == 'seq':
from reportlab.lib.sequencer import getSequencer
seq = getSequencer()
rc += str(seq.next(n.get('id')))
if n.tag == 'pageCount':
if x or y:
self.canvas.translate(x,y)
self.canvas.doForm('pageCount%s' % (self.canvas._storyCount,))
if x or y:
self.canvas.translate(-x,-y)
if n.tag == 'pageNumber':
rc += str(self.canvas.getPageNumber())
rc += utils._process_text(self, n.tail)
return rc.replace('\n','')
def _drawString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
try:
self.canvas.drawString(text=text, **v)
except TypeError:
_logger.error("Bad RML: <drawString> tag requires attributes 'x' and 'y'!")
raise
def _drawCenteredString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
self.canvas.drawCentredString(text=text, **v)
def _drawRightString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
self.canvas.drawRightString(text=text, **v)
def _rect(self, node):
if node.get('round'):
self.canvas.roundRect(radius=utils.unit_get(node.get('round')), **utils.attr_get(node, ['x','y','width','height'], {'fill':'bool','stroke':'bool'}))
else:
self.canvas.rect(**utils.attr_get(node, ['x','y','width','height'], {'fill':'bool','stroke':'bool'}))
def _ellipse(self, node):
x1 = utils.unit_get(node.get('x'))
x2 = utils.unit_get(node.get('width'))
y1 = utils.unit_get(node.get('y'))
y2 = utils.unit_get(node.get('height'))
self.canvas.ellipse(x1,y1,x2,y2, **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def _curves(self, node):
line_str = node.text.split()
lines = []
while len(line_str)>7:
self.canvas.bezier(*[utils.unit_get(l) for l in line_str[0:8]])
line_str = line_str[8:]
def _lines(self, node):
line_str = node.text.split()
lines = []
while len(line_str)>3:
lines.append([utils.unit_get(l) for l in line_str[0:4]])
line_str = line_str[4:]
self.canvas.lines(lines)
def _grid(self, node):
xlist = [utils.unit_get(s) for s in node.get('xs').split(',')]
ylist = [utils.unit_get(s) for s in node.get('ys').split(',')]
self.canvas.grid(xlist, ylist)
def _translate(self, node):
dx = utils.unit_get(node.get('dx')) or 0
dy = utils.unit_get(node.get('dy')) or 0
self.canvas.translate(dx,dy)
def _circle(self, node):
self.canvas.circle(x_cen=utils.unit_get(node.get('x')), y_cen=utils.unit_get(node.get('y')), r=utils.unit_get(node.get('radius')), **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def _place(self, node):
flows = _rml_flowable(self.doc, self.localcontext, images=self.images, path=self.path, title=self.title, canvas=self.canvas).render(node)
infos = utils.attr_get(node, ['x','y','width','height'])
infos['y']+=infos['height']
for flow in flows:
w,h = flow.wrap(infos['width'], infos['height'])
if w<=infos['width'] and h<=infos['height']:
infos['y']-=h
flow.drawOn(self.canvas,infos['x'],infos['y'])
infos['height']-=h
else:
raise ValueError("Not enough space")
def _line_mode(self, node):
ljoin = {'round':1, 'mitered':0, 'bevelled':2}
lcap = {'default':0, 'round':1, 'square':2}
if node.get('width'):
self.canvas.setLineWidth(utils.unit_get(node.get('width')))
if node.get('join'):
self.canvas.setLineJoin(ljoin[node.get('join')])
if node.get('cap'):
self.canvas.setLineCap(lcap[node.get('cap')])
if node.get('miterLimit'):
self.canvas.setDash(utils.unit_get(node.get('miterLimit')))
if node.get('dash'):
dashes = node.get('dash').split(',')
for x in range(len(dashes)):
dashes[x]=utils.unit_get(dashes[x])
self.canvas.setDash(node.get('dash').split(','))
def _image(self, node):
import urllib
import urlparse
from reportlab.lib.utils import ImageReader
nfile = node.get('file')
if not nfile:
if node.get('name'):
image_data = self.images[node.get('name')]
_logger.debug("Image %s used", node.get('name'))
s = StringIO(image_data)
else:
newtext = node.text
if self.localcontext:
res = utils._regex.findall(newtext)
for key in res:
newtext = eval(key, {}, self.localcontext) or ''
image_data = None
if newtext:
image_data = base64.decodestring(newtext)
if image_data:
s = StringIO(image_data)
else:
_logger.debug("No image data!")
return False
else:
if nfile in self.images:
s = StringIO(self.images[nfile])
else:
try:
up = urlparse.urlparse(str(nfile))
except ValueError:
up = False
if up and up.scheme:
# RFC: do we really want to open external URLs?
# Are we safe from cross-site scripting or attacks?
_logger.debug("Retrieve image from %s", nfile)
u = urllib.urlopen(str(nfile))
s = StringIO(u.read())
else:
_logger.debug("Open image file %s ", nfile)
s = _open_image(nfile, path=self.path)
try:
img = ImageReader(s)
(sx,sy) = img.getSize()
_logger.debug("Image is %dx%d", sx, sy)
args = { 'x': 0.0, 'y': 0.0, 'mask': 'auto'}
for tag in ('width','height','x','y'):
if node.get(tag):
args[tag] = utils.unit_get(node.get(tag))
if ('width' in args) and (not 'height' in args):
args['height'] = sy * args['width'] / sx
elif ('height' in args) and (not 'width' in args):
args['width'] = sx * args['height'] / sy
elif ('width' in args) and ('height' in args):
if (float(args['width'])/args['height'])>(float(sx)>sy):
args['width'] = sx * args['height'] / sy
else:
args['height'] = sy * args['width'] / sx
self.canvas.drawImage(img, **args)
finally:
s.close()
# self.canvas._doc.SaveToFile(self.canvas._filename, self.canvas)
def _path(self, node):
self.path = self.canvas.beginPath()
self.path.moveTo(**utils.attr_get(node, ['x','y']))
for n in utils._child_get(node, self):
if not n.text :
if n.tag=='moveto':
vals = utils.text_get(n).split()
self.path.moveTo(utils.unit_get(vals[0]), utils.unit_get(vals[1]))
elif n.tag=='curvesto':
vals = utils.text_get(n).split()
while len(vals)>5:
pos=[]
while len(pos)<6:
pos.append(utils.unit_get(vals.pop(0)))
self.path.curveTo(*pos)
elif n.text:
data = n.text.split() # Not sure if I must merge all TEXT_NODE ?
while len(data)>1:
x = utils.unit_get(data.pop(0))
y = utils.unit_get(data.pop(0))
self.path.lineTo(x,y)
if (not node.get('close')) or utils.bool_get(node.get('close')):
self.path.close()
self.canvas.drawPath(self.path, **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def setFont(self, node):
fontname = select_fontname(node.get('name'), self.canvas._fontname)
return self.canvas.setFont(fontname, utils.unit_get(node.get('size')))
def render(self, node):
tags = {
'drawCentredString': self._drawCenteredString,
'drawRightString': self._drawRightString,
'drawString': self._drawString,
'rect': self._rect,
'ellipse': self._ellipse,
'lines': self._lines,
'grid': self._grid,
'curves': self._curves,
'fill': lambda node: self.canvas.setFillColor(color.get(node.get('color'))),
'stroke': lambda node: self.canvas.setStrokeColor(color.get(node.get('color'))),
'setFont': self.setFont ,
'place': self._place,
'circle': self._circle,
'lineMode': self._line_mode,
'path': self._path,
'rotate': lambda node: self.canvas.rotate(float(node.get('degrees'))),
'translate': self._translate,
'image': self._image
}
for n in utils._child_get(node, self):
if n.tag in tags:
tags[n.tag](n)
class _rml_draw(object):
def __init__(self, localcontext, node, styles, images=None, path='.', title=None):
if images is None:
images = {}
self.localcontext = localcontext
self.node = node
self.styles = styles
self.canvas = None
self.images = images
self.path = path
self.canvas_title = title
def render(self, canvas, doc):
canvas.saveState()
cnv = _rml_canvas(canvas, self.localcontext, doc, self.styles, images=self.images, path=self.path, title=self.canvas_title)
cnv.render(self.node)
canvas.restoreState()
class _rml_Illustration(platypus.flowables.Flowable):
def __init__(self, node, localcontext, styles, self2):
self.localcontext = (localcontext or {}).copy()
self.node = node
self.styles = styles
self.width = utils.unit_get(node.get('width'))
self.height = utils.unit_get(node.get('height'))
self.self2 = self2
def wrap(self, *args):
return self.width, self.height
def draw(self):
drw = _rml_draw(self.localcontext ,self.node,self.styles, images=self.self2.images, path=self.self2.path, title=self.self2.title)
drw.render(self.canv, None)
# Workaround for issue #15: https://bitbucket.org/rptlab/reportlab/issue/15/infinite-pages-produced-when-splitting
original_pto_split = platypus.flowables.PTOContainer.split
def split(self, availWidth, availHeight):
res = original_pto_split(self, availWidth, availHeight)
if len(res) > 2 and len(self._content) > 0:
header = self._content[0]._ptoinfo.header
trailer = self._content[0]._ptoinfo.trailer
if isinstance(res[-2], platypus.flowables.UseUpSpace) and len(header + trailer) == len(res[:-2]):
return []
return res
platypus.flowables.PTOContainer.split = split
class _rml_flowable(object):
def __init__(self, doc, localcontext, images=None, path='.', title=None, canvas=None):
if images is None:
images = {}
self.localcontext = localcontext
self.doc = doc
self.styles = doc.styles
self.images = images
self.path = path
self.title = title
self.canvas = canvas
def _textual(self, node):
rc1 = utils._process_text(self, node.text or '')
for n in utils._child_get(node,self):
txt_n = copy.deepcopy(n)
for key in txt_n.attrib.keys():
if key in ('rml_except', 'rml_loop', 'rml_tag'):
del txt_n.attrib[key]
if not n.tag == 'bullet':
if n.tag == 'pageNumber':
txt_n.text = self.canvas and str(self.canvas.getPageNumber()) or ''
else:
txt_n.text = utils.xml2str(self._textual(n))
txt_n.tail = n.tail and utils.xml2str(utils._process_text(self, n.tail.replace('\n',''))) or ''
rc1 += etree.tostring(txt_n)
return rc1
def _table(self, node):
children = utils._child_get(node,self,'tr')
if not children:
return None
length = 0
colwidths = None
rowheights = None
data = []
styles = []
posy = 0
for tr in children:
paraStyle = None
if tr.get('style'):
st = copy.deepcopy(self.styles.table_styles[tr.get('style')])
for si in range(len(st._cmds)):
s = list(st._cmds[si])
s[1] = (s[1][0],posy)
s[2] = (s[2][0],posy)
st._cmds[si] = tuple(s)
styles.append(st)
if tr.get('paraStyle'):
paraStyle = self.styles.styles[tr.get('paraStyle')]
data2 = []
posx = 0
for td in utils._child_get(tr, self,'td'):
if td.get('style'):
st = copy.deepcopy(self.styles.table_styles[td.get('style')])
for s in st._cmds:
s[1][1] = posy
s[2][1] = posy
s[1][0] = posx
s[2][0] = posx
styles.append(st)
if td.get('paraStyle'):
# TODO: merge styles
paraStyle = self.styles.styles[td.get('paraStyle')]
posx += 1
flow = []
for n in utils._child_get(td, self):
if n.tag == etree.Comment:
n.text = ''
continue
fl = self._flowable(n, extra_style=paraStyle)
if isinstance(fl,list):
flow += fl
else:
flow.append( fl )
if not len(flow):
flow = self._textual(td)
data2.append( flow )
if len(data2)>length:
length=len(data2)
for ab in data:
while len(ab)<length:
ab.append('')
while len(data2)<length:
data2.append('')
data.append( data2 )
posy += 1
if node.get('colWidths'):
assert length == len(node.get('colWidths').split(','))
colwidths = [utils.unit_get(f.strip()) for f in node.get('colWidths').split(',')]
if node.get('rowHeights'):
rowheights = [utils.unit_get(f.strip()) for f in node.get('rowHeights').split(',')]
if len(rowheights) == 1:
rowheights = rowheights[0]
table = platypus.LongTable(data = data, colWidths=colwidths, rowHeights=rowheights, **(utils.attr_get(node, ['splitByRow'] ,{'repeatRows':'int','repeatCols':'int'})))
if node.get('style'):
table.setStyle(self.styles.table_styles[node.get('style')])
for s in styles:
table.setStyle(s)
return table
def _illustration(self, node):
return _rml_Illustration(node, self.localcontext, self.styles, self)
def _textual_image(self, node):
return base64.decodestring(node.text)
def _pto(self, node):
sub_story = []
pto_header = None
pto_trailer = None
for node in utils._child_get(node, self):
if node.tag == etree.Comment:
node.text = ''
continue
elif node.tag=='pto_header':
pto_header = self.render(node)
elif node.tag=='pto_trailer':
pto_trailer = self.render(node)
else:
flow = self._flowable(node)
if flow:
if isinstance(flow,list):
sub_story = sub_story + flow
else:
sub_story.append(flow)
return platypus.flowables.PTOContainer(sub_story, trailer=pto_trailer, header=pto_header)
def _flowable(self, node, extra_style=None):
if node.tag=='pto':
return self._pto(node)
if node.tag=='para':
style = self.styles.para_style_get(node)
if extra_style:
style.__dict__.update(extra_style)
text_node = self._textual(node).strip().replace('\n\n', '\n').replace('\n', '<br/>')
instance = platypus.Paragraph(text_node, style, **(utils.attr_get(node, [], {'bulletText':'str'})))
result = [instance]
if LooseVersion(reportlab.Version) > LooseVersion('3.0') and not instance.getPlainText().strip() and instance.text.strip():
result.append(platypus.Paragraph(' <br/>', style, **(utils.attr_get(node, [], {'bulletText': 'str'}))))
return result
elif node.tag=='barCode':
try:
from reportlab.graphics.barcode import code128
from reportlab.graphics.barcode import code39
from reportlab.graphics.barcode import code93
from reportlab.graphics.barcode import common
from reportlab.graphics.barcode import fourstate
from reportlab.graphics.barcode import usps
from reportlab.graphics.barcode import createBarcodeDrawing
except ImportError:
_logger.warning("Cannot use barcode renderers:", exc_info=True)
return None
args = utils.attr_get(node, [], {'ratio':'float','xdim':'unit','height':'unit','checksum':'int','quiet':'int','width':'unit','stop':'bool','bearers':'int','barWidth':'float','barHeight':'float'})
codes = {
'codabar': lambda x: common.Codabar(x, **args),
'code11': lambda x: common.Code11(x, **args),
'code128': lambda x: code128.Code128(str(x), **args),
'standard39': lambda x: code39.Standard39(str(x), **args),
'standard93': lambda x: code93.Standard93(str(x), **args),
'i2of5': lambda x: common.I2of5(x, **args),
'extended39': lambda x: code39.Extended39(str(x), **args),
'extended93': lambda x: code93.Extended93(str(x), **args),
'msi': lambda x: common.MSI(x, **args),
'fim': lambda x: usps.FIM(x, **args),
'postnet': lambda x: usps.POSTNET(x, **args),
'ean13': lambda x: createBarcodeDrawing('EAN13', value=str(x), **args),
'qrcode': lambda x: createBarcodeDrawing('QR', value=x, **args),
}
code = 'code128'
if node.get('code'):
code = node.get('code').lower()
return codes[code](self._textual(node))
elif node.tag=='name':
self.styles.names[ node.get('id')] = node.get('value')
return None
elif node.tag=='xpre':
style = self.styles.para_style_get(node)
return platypus.XPreformatted(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str','dedent':'int','frags':'int'})))
elif node.tag=='pre':
style = self.styles.para_style_get(node)
return platypus.Preformatted(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str','dedent':'int'})))
elif node.tag=='illustration':
return self._illustration(node)
elif node.tag=='blockTable':
return self._table(node)
elif node.tag=='title':
styles = reportlab.lib.styles.getSampleStyleSheet()
style = styles['Title']
return platypus.Paragraph(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str'})))
elif re.match('^h([1-9]+[0-9]*)$', (node.tag or '')):
styles = reportlab.lib.styles.getSampleStyleSheet()
style = styles['Heading'+str(node.tag[1:])]
return platypus.Paragraph(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str'})))
elif node.tag=='image':
image_data = False
if not node.get('file'):
if node.get('name'):
if node.get('name') in self.doc.images:
_logger.debug("Image %s read ", node.get('name'))
image_data = self.doc.images[node.get('name')].read()
else:
_logger.warning("Image %s not defined", node.get('name'))
return False
else:
import base64
newtext = node.text
if self.localcontext:
newtext = utils._process_text(self, node.text or '')
image_data = base64.decodestring(newtext)
if not image_data:
_logger.debug("No inline image data")
return False
image = StringIO(image_data)
else:
_logger.debug("Image get from file %s", node.get('file'))
image = _open_image(node.get('file'), path=self.doc.path)
return platypus.Image(image, mask=(250,255,250,255,250,255), **(utils.attr_get(node, ['width','height'])))
elif node.tag=='spacer':
if node.get('width'):
width = utils.unit_get(node.get('width'))
else:
width = utils.unit_get('1cm')
length = utils.unit_get(node.get('length'))
return platypus.Spacer(width=width, height=length)
elif node.tag=='section':
return self.render(node)
elif node.tag == 'pageNumberReset':
return PageReset()
elif node.tag in ('pageBreak', 'nextPage'):
return platypus.PageBreak()
elif node.tag=='condPageBreak':
return platypus.CondPageBreak(**(utils.attr_get(node, ['height'])))
elif node.tag=='setNextTemplate':
return platypus.NextPageTemplate(str(node.get('name')))
elif node.tag=='nextFrame':
return platypus.CondPageBreak(1000) # TODO: change the 1000 !
elif node.tag == 'setNextFrame':
from reportlab.platypus.doctemplate import NextFrameFlowable
return NextFrameFlowable(str(node.get('name')))
elif node.tag == 'currentFrame':
from reportlab.platypus.doctemplate import CurrentFrameFlowable
return CurrentFrameFlowable(str(node.get('name')))
elif node.tag == 'frameEnd':
return EndFrameFlowable()
elif node.tag == 'hr':
width_hr=node.get('width') or '100%'
color_hr=node.get('color') or 'black'
thickness_hr=node.get('thickness') or 1
lineCap_hr=node.get('lineCap') or 'round'
return platypus.flowables.HRFlowable(width=width_hr,color=color.get(color_hr),thickness=float(thickness_hr),lineCap=str(lineCap_hr))
else:
sys.stderr.write('Warning: flowable not yet implemented: %s !\n' % (node.tag,))
return None
def render(self, node_story):
def process_story(node_story):
sub_story = []
for node in utils._child_get(node_story, self):
if node.tag == etree.Comment:
node.text = ''
continue
flow = self._flowable(node)
if flow:
if isinstance(flow,list):
sub_story = sub_story + flow
else:
sub_story.append(flow)
return sub_story
return process_story(node_story)
class EndFrameFlowable(ActionFlowable):
def __init__(self,resume=0):
ActionFlowable.__init__(self,('frameEnd',resume))
class TinyDocTemplate(platypus.BaseDocTemplate):
def beforeDocument(self):
# Store some useful value directly inside canvas, so it's available
# on flowable drawing (needed for proper PageCount handling)
self.canv._doPageReset = False
self.canv._storyCount = 0
def ___handle_pageBegin(self):
self.page += 1
self.pageTemplate.beforeDrawPage(self.canv,self)
self.pageTemplate.checkPageSize(self.canv,self)
self.pageTemplate.onPage(self.canv,self)
for f in self.pageTemplate.frames: f._reset()
self.beforePage()
self._curPageFlowableCount = 0
if hasattr(self,'_nextFrameIndex'):
del self._nextFrameIndex
for f in self.pageTemplate.frames:
if f.id == 'first':
self.frame = f
break
self.handle_frameBegin()
def afterPage(self):
if isinstance(self.canv, NumberedCanvas):
# save current page states before eventual reset
self.canv._saved_page_states.append(dict(self.canv.__dict__))
if self.canv._doPageReset:
# Following a <pageReset/> tag:
# - we reset page number to 0
# - we add an new PageCount flowable (relative to the current
# story number), but not for NumeredCanvas at is handle page
# count itself)
# NOTE: _rml_template render() method add a PageReset flowable at end
# of each story, so we're sure to pass here at least once per story.
if not isinstance(self.canv, NumberedCanvas):
self.handle_flowable([ PageCount(story_count=self.canv._storyCount) ])
self.canv._pageCount = self.page
self.page = 0
self.canv._flag = True
self.canv._pageNumber = 0
self.canv._doPageReset = False
self.canv._storyCount += 1
class _rml_template(object):
def __init__(self, localcontext, out, node, doc, images=None, path='.', title=None):
if images is None:
images = {}
if not localcontext:
localcontext={'internal_header':True}
self.localcontext = localcontext
self.images= images
self.path = path
self.title = title
pagesize_map = {'a4': A4,
'us_letter': letter
}
pageSize = A4
if self.localcontext.get('company'):
pageSize = pagesize_map.get(self.localcontext.get('company').rml_paper_format, A4)
if node.get('pageSize'):
ps = map(lambda x:x.strip(), node.get('pageSize').replace(')', '').replace('(', '').split(','))
pageSize = ( utils.unit_get(ps[0]),utils.unit_get(ps[1]) )
self.doc_tmpl = TinyDocTemplate(out, pagesize=pageSize, **utils.attr_get(node, ['leftMargin','rightMargin','topMargin','bottomMargin'], {'allowSplitting':'int','showBoundary':'bool','rotation':'int','title':'str','author':'str'}))
self.page_templates = []
self.styles = doc.styles
self.doc = doc
self.image=[]
pts = node.findall('pageTemplate')
for pt in pts:
frames = []
for frame_el in pt.findall('frame'):
frame = platypus.Frame( **(utils.attr_get(frame_el, ['x1','y1', 'width','height', 'leftPadding', 'rightPadding', 'bottomPadding', 'topPadding'], {'id':'str', 'showBoundary':'bool'})) )
if utils.attr_get(frame_el, ['last']):
frame.lastFrame = True
frames.append( frame )
try :
gr = pt.findall('pageGraphics')\
or pt[1].findall('pageGraphics')
except Exception: # FIXME: be even more specific, perhaps?
gr=''
if len(gr):
# self.image=[ n for n in utils._child_get(gr[0], self) if n.tag=='image' or not self.localcontext]
drw = _rml_draw(self.localcontext,gr[0], self.doc, images=images, path=self.path, title=self.title)
self.page_templates.append( platypus.PageTemplate(frames=frames, onPage=drw.render, **utils.attr_get(pt, [], {'id':'str'}) ))
else:
drw = _rml_draw(self.localcontext,node,self.doc,title=self.title)
self.page_templates.append( platypus.PageTemplate(frames=frames,onPage=drw.render, **utils.attr_get(pt, [], {'id':'str'}) ))
self.doc_tmpl.addPageTemplates(self.page_templates)
def render(self, node_stories):
if self.localcontext and not self.localcontext.get('internal_header',False):
del self.localcontext['internal_header']
fis = []
r = _rml_flowable(self.doc,self.localcontext, images=self.images, path=self.path, title=self.title, canvas=None)
story_cnt = 0
for node_story in node_stories:
if story_cnt > 0:
fis.append(platypus.PageBreak())
fis += r.render(node_story)
# end of story numbering computation
fis.append(PageReset())
story_cnt += 1
try:
if self.localcontext and self.localcontext.get('internal_header',False):
self.doc_tmpl.afterFlowable(fis)
self.doc_tmpl.build(fis,canvasmaker=NumberedCanvas)
else:
self.doc_tmpl.build(fis)
except platypus.doctemplate.LayoutError, e:
e.name = 'Print Error'
e.value = 'The document you are trying to print contains a table row that does not fit on one page. Please try to split it in smaller rows or contact your administrator.'
raise
def parseNode(rml, localcontext=None, fout=None, images=None, path='.', title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
#try to override some font mappings
try:
from customfonts import SetCustomFonts
SetCustomFonts(r)
except ImportError:
# means there is no custom fonts mapping in this system.
pass
except Exception:
_logger.warning('Cannot set font mapping', exc_info=True)
pass
fp = StringIO()
r.render(fp)
return fp.getvalue()
def parseString(rml, localcontext=None, fout=None, images=None, path='.', title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
#try to override some font mappings
try:
from customfonts import SetCustomFonts
SetCustomFonts(r)
except Exception:
pass
if fout:
fp = file(fout,'wb')
r.render(fp)
fp.close()
return fout
else:
fp = StringIO()
r.render(fp)
return fp.getvalue()
def trml2pdf_help():
print 'Usage: trml2pdf input.rml >output.pdf'
print 'Render the standard input (RML) and output a PDF file'
sys.exit(0)
if __name__=="__main__":
if len(sys.argv)>1:
if sys.argv[1]=='--help':
trml2pdf_help()
print parseString(file(sys.argv[1], 'r').read()),
else:
print 'Usage: trml2pdf input.rml >output.pdf'
print 'Try \'trml2pdf --help\' for more information.'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,453,524,872,520,911,400 | 41.864096 | 238 | 0.546434 | false |
wangyum/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/gamma_test.py | 1 | 14770 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import special
from scipy import stats
from tensorflow.contrib.distributions.python.ops import gamma as gamma_lib
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.platform import test
class GammaTest(test.TestCase):
def testGammaShape(self):
with self.test_session():
alpha = constant_op.constant([3.0] * 5)
beta = constant_op.constant(11.0)
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
self.assertEqual(gamma.batch_shape_tensor().eval(), (5,))
self.assertEqual(gamma.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(gamma.event_shape_tensor().eval(), [])
self.assertEqual(gamma.event_shape, tensor_shape.TensorShape([]))
def testGammaLogPDF(self):
with self.test_session():
batch_size = 6
alpha = constant_op.constant([2.0] * batch_size)
beta = constant_op.constant([3.0] * batch_size)
alpha_v = 2.0
beta_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
log_pdf = gamma.log_prob(x)
self.assertEqual(log_pdf.get_shape(), (6,))
self.assertAllClose(log_pdf.eval(), expected_log_pdf)
pdf = gamma.prob(x)
self.assertEqual(pdf.get_shape(), (6,))
self.assertAllClose(pdf.eval(), np.exp(expected_log_pdf))
def testGammaLogPDFMultidimensional(self):
with self.test_session():
batch_size = 6
alpha = constant_op.constant([[2.0, 4.0]] * batch_size)
beta = constant_op.constant([[3.0, 4.0]] * batch_size)
alpha_v = np.array([2.0, 4.0])
beta_v = np.array([3.0, 4.0])
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
log_pdf = gamma.log_prob(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllClose(log_pdf_values, expected_log_pdf)
pdf = gamma.prob(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testGammaLogPDFMultidimensionalBroadcasting(self):
with self.test_session():
batch_size = 6
alpha = constant_op.constant([[2.0, 4.0]] * batch_size)
beta = constant_op.constant(3.0)
alpha_v = np.array([2.0, 4.0])
beta_v = 3.0
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
log_pdf = gamma.log_prob(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllClose(log_pdf_values, expected_log_pdf)
pdf = gamma.prob(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testGammaCDF(self):
with self.test_session():
batch_size = 6
alpha = constant_op.constant([2.0] * batch_size)
beta = constant_op.constant([3.0] * batch_size)
alpha_v = 2.0
beta_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
expected_cdf = stats.gamma.cdf(x, alpha_v, scale=1 / beta_v)
cdf = gamma.cdf(x)
self.assertEqual(cdf.get_shape(), (6,))
self.assertAllClose(cdf.eval(), expected_cdf)
def testGammaMean(self):
with self.test_session():
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
expected_means = stats.gamma.mean(alpha_v, scale=1 / beta_v)
self.assertEqual(gamma.mean().get_shape(), (3,))
self.assertAllClose(gamma.mean().eval(), expected_means)
def testGammaModeAllowNanStatsIsFalseWorksWhenAllBatchMembersAreDefined(self):
with self.test_session():
alpha_v = np.array([5.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
expected_modes = (alpha_v - 1) / beta_v
self.assertEqual(gamma.mode().get_shape(), (3,))
self.assertAllClose(gamma.mode().eval(), expected_modes)
def testGammaModeAllowNanStatsFalseRaisesForUndefinedBatchMembers(self):
with self.test_session():
# Mode will not be defined for the first entry.
alpha_v = np.array([0.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v,
rate=beta_v,
allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
gamma.mode().eval()
def testGammaModeAllowNanStatsIsTrueReturnsNaNforUndefinedBatchMembers(self):
with self.test_session():
# Mode will not be defined for the first entry.
alpha_v = np.array([0.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v,
rate=beta_v,
allow_nan_stats=True)
expected_modes = (alpha_v - 1) / beta_v
expected_modes[0] = np.nan
self.assertEqual(gamma.mode().get_shape(), (3,))
self.assertAllClose(gamma.mode().eval(), expected_modes)
def testGammaVariance(self):
with self.test_session():
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
expected_variances = stats.gamma.var(alpha_v, scale=1 / beta_v)
self.assertEqual(gamma.variance().get_shape(), (3,))
self.assertAllClose(gamma.variance().eval(), expected_variances)
def testGammaStd(self):
with self.test_session():
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
expected_stddev = stats.gamma.std(alpha_v, scale=1. / beta_v)
self.assertEqual(gamma.stddev().get_shape(), (3,))
self.assertAllClose(gamma.stddev().eval(), expected_stddev)
def testGammaEntropy(self):
with self.test_session():
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
expected_entropy = stats.gamma.entropy(alpha_v, scale=1 / beta_v)
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
self.assertEqual(gamma.entropy().get_shape(), (3,))
self.assertAllClose(gamma.entropy().eval(), expected_entropy)
def testGammaSampleSmallAlpha(self):
with session.Session():
alpha_v = 0.05
beta_v = 1.0
alpha = constant_op.constant(alpha_v)
beta = constant_op.constant(beta_v)
n = 100000
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
samples = gamma.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertAllClose(
sample_values.mean(),
stats.gamma.mean(
alpha_v, scale=1 / beta_v),
atol=.01)
self.assertAllClose(
sample_values.var(),
stats.gamma.var(alpha_v, scale=1 / beta_v),
atol=.15)
self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
def testGammaSample(self):
with session.Session():
alpha_v = 4.0
beta_v = 3.0
alpha = constant_op.constant(alpha_v)
beta = constant_op.constant(beta_v)
n = 100000
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
samples = gamma.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertAllClose(
sample_values.mean(),
stats.gamma.mean(
alpha_v, scale=1 / beta_v),
atol=.01)
self.assertAllClose(
sample_values.var(),
stats.gamma.var(alpha_v, scale=1 / beta_v),
atol=.15)
self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
def testGammaSampleMultiDimensional(self):
with session.Session():
alpha_v = np.array([np.arange(1, 101, dtype=np.float32)]) # 1 x 100
beta_v = np.array([np.arange(1, 11, dtype=np.float32)]).T # 10 x 1
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
n = 10000
samples = gamma.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (n, 10, 100))
self.assertEqual(sample_values.shape, (n, 10, 100))
zeros = np.zeros_like(alpha_v + beta_v) # 10 x 100
alpha_bc = alpha_v + zeros
beta_bc = beta_v + zeros
self.assertAllClose(
sample_values.mean(axis=0),
stats.gamma.mean(
alpha_bc, scale=1 / beta_bc),
rtol=.035)
self.assertAllClose(
sample_values.var(axis=0),
stats.gamma.var(alpha_bc, scale=1 / beta_bc),
atol=4.5)
fails = 0
trials = 0
for ai, a in enumerate(np.reshape(alpha_v, [-1])):
for bi, b in enumerate(np.reshape(beta_v, [-1])):
s = sample_values[:, bi, ai]
trials += 1
fails += 0 if self._kstest(a, b, s) else 1
self.assertLess(fails, trials * 0.03)
def _kstest(self, alpha, beta, samples):
# Uses the Kolmogorov-Smirnov test for goodness of fit.
ks, _ = stats.kstest(samples, stats.gamma(alpha, scale=1 / beta).cdf)
# Return True when the test passes.
return ks < 0.02
def testGammaPdfOfSampleMultiDims(self):
with session.Session() as sess:
gamma = gamma_lib.Gamma(concentration=[7., 11.], rate=[[5.], [6.]])
num = 50000
samples = gamma.sample(num, seed=137)
pdfs = gamma.prob(samples)
sample_vals, pdf_vals = sess.run([samples, pdfs])
self.assertEqual(samples.get_shape(), (num, 2, 2))
self.assertEqual(pdfs.get_shape(), (num, 2, 2))
self.assertAllClose(
stats.gamma.mean(
[[7., 11.], [7., 11.]], scale=1 / np.array([[5., 5.], [6., 6.]])),
sample_vals.mean(axis=0),
atol=.1)
self.assertAllClose(
stats.gamma.var([[7., 11.], [7., 11.]],
scale=1 / np.array([[5., 5.], [6., 6.]])),
sample_vals.var(axis=0),
atol=.1)
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3):
s_p = zip(sample_vals, pdf_vals)
prev = (0, 0)
total = 0
for k in sorted(s_p, key=lambda x: x[0]):
pair_pdf = (k[1] + prev[1]) / 2
total += (k[0] - prev[0]) * pair_pdf
prev = k
self.assertNear(1., total, err=err)
def testGammaNonPositiveInitializationParamsRaises(self):
with self.test_session():
alpha_v = constant_op.constant(0.0, name="alpha")
beta_v = constant_op.constant(1.0, name="beta")
gamma = gamma_lib.Gamma(concentration=alpha_v,
rate=beta_v,
validate_args=True)
with self.assertRaisesOpError("alpha"):
gamma.mean().eval()
alpha_v = constant_op.constant(1.0, name="alpha")
beta_v = constant_op.constant(0.0, name="beta")
gamma = gamma_lib.Gamma(concentration=alpha_v,
rate=beta_v,
validate_args=True)
with self.assertRaisesOpError("beta"):
gamma.mean().eval()
def testGammaWithSoftplusConcentrationRate(self):
with self.test_session():
alpha_v = constant_op.constant([0.0, -2.1], name="alpha")
beta_v = constant_op.constant([1.0, -3.6], name="beta")
gamma = gamma_lib.GammaWithSoftplusConcentrationRate(
concentration=alpha_v, rate=beta_v)
self.assertAllEqual(nn_ops.softplus(alpha_v).eval(),
gamma.concentration.eval())
self.assertAllEqual(nn_ops.softplus(beta_v).eval(),
gamma.rate.eval())
def testGammaGammaKL(self):
alpha0 = np.array([3.])
beta0 = np.array([1., 2., 3., 1.5, 2.5, 3.5])
alpha1 = np.array([0.4])
beta1 = np.array([0.5, 1., 1.5, 2., 2.5, 3.])
# Build graph.
with self.test_session() as sess:
g0 = gamma_lib.Gamma(concentration=alpha0, rate=beta0)
g1 = gamma_lib.Gamma(concentration=alpha1, rate=beta1)
x = g0.sample(int(1e4), seed=0)
kl_sample = math_ops.reduce_mean(g0.log_prob(x) - g1.log_prob(x), 0)
kl_actual = kullback_leibler.kl_divergence(g0, g1)
# Execute graph.
[kl_sample_, kl_actual_] = sess.run([kl_sample, kl_actual])
kl_expected = ((alpha0 - alpha1) * special.digamma(alpha0)
+ special.gammaln(alpha1)
- special.gammaln(alpha0)
+ alpha1 * np.log(beta0)
- alpha1 * np.log(beta1)
+ alpha0 * (beta1 / beta0 - 1.))
self.assertEqual(beta0.shape, kl_actual.get_shape())
self.assertAllClose(kl_expected, kl_actual_, atol=0., rtol=1e-6)
self.assertAllClose(kl_sample_, kl_actual_, atol=0., rtol=1e-2)
if __name__ == "__main__":
test.main()
| apache-2.0 | -814,847,510,712,591,200 | 39.355191 | 80 | 0.606567 | false |
rikirenz/inspire-next | tests/integration/test_detailed_records.py | 2 | 1956 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
from invenio_records.models import RecordMetadata
from inspirehep.modules.migrator.models import InspireProdRecords
def test_all_records_were_loaded(app):
records = [record.json for record in RecordMetadata.query.all()]
expected = 42
result = len(records)
assert expected == result
def test_all_records_are_valid(app):
invalid = InspireProdRecords.query.filter(InspireProdRecords.valid is False).values(InspireProdRecords.recid)
recids = [el[0] for el in invalid]
assert recids == []
def test_all_records_are_there(app_client):
failed = []
for record in [record.json for record in RecordMetadata.query.all()]:
try:
absolute_url = record['self']['$ref']
relative_url = absolute_url.partition('api')[2]
response = app_client.get(relative_url)
assert response.status_code == 200
except Exception:
failed.append(record['control_number'])
assert failed == []
| gpl-3.0 | -4,749,621,418,016,953,000 | 32.152542 | 113 | 0.713701 | false |
gurneyalex/OpenUpgrade | addons/base_import/tests/test_cases.py | 101 | 13059 | # -*- encoding: utf-8 -*-
import unittest2
from openerp.tests.common import TransactionCase
from .. import models
ID_FIELD = {'id': 'id', 'name': 'id', 'string': "External ID", 'required': False, 'fields': []}
def make_field(name='value', string='unknown', required=False, fields=[]):
return [
ID_FIELD,
{'id': name, 'name': name, 'string': string, 'required': required, 'fields': fields},
]
class test_basic_fields(TransactionCase):
def get_fields(self, field):
return self.registry('base_import.import')\
.get_fields(self.cr, self.uid, 'base_import.tests.models.' + field)
def test_base(self):
""" A basic field is not required """
self.assertEqual(self.get_fields('char'), make_field())
def test_required(self):
""" Required fields should be flagged (so they can be fill-required) """
self.assertEqual(self.get_fields('char.required'), make_field(required=True))
def test_readonly(self):
""" Readonly fields should be filtered out"""
self.assertEqual(self.get_fields('char.readonly'), [ID_FIELD])
def test_readonly_states(self):
""" Readonly fields with states should not be filtered out"""
self.assertEqual(self.get_fields('char.states'), make_field())
def test_readonly_states_noreadonly(self):
""" Readonly fields with states having nothing to do with
readonly should still be filtered out"""
self.assertEqual(self.get_fields('char.noreadonly'), [ID_FIELD])
def test_readonly_states_stillreadonly(self):
""" Readonly fields with readonly states leaving them readonly
always... filtered out"""
self.assertEqual(self.get_fields('char.stillreadonly'), [ID_FIELD])
def test_m2o(self):
""" M2O fields should allow import of themselves (name_get),
their id and their xid"""
self.assertEqual(self.get_fields('m2o'), make_field(fields=[
{'id': 'value', 'name': 'id', 'string': 'External ID', 'required': False, 'fields': []},
{'id': 'value', 'name': '.id', 'string': 'Database ID', 'required': False, 'fields': []},
]))
def test_m2o_required(self):
""" If an m2o field is required, its three sub-fields are
required as well (the client has to handle that: requiredness
is id-based)
"""
self.assertEqual(self.get_fields('m2o.required'), make_field(required=True, fields=[
{'id': 'value', 'name': 'id', 'string': 'External ID', 'required': True, 'fields': []},
{'id': 'value', 'name': '.id', 'string': 'Database ID', 'required': True, 'fields': []},
]))
class test_o2m(TransactionCase):
def get_fields(self, field):
return self.registry('base_import.import')\
.get_fields(self.cr, self.uid, 'base_import.tests.models.' + field)
def test_shallow(self):
self.assertEqual(self.get_fields('o2m'), make_field(fields=[
{'id': 'id', 'name': 'id', 'string': 'External ID', 'required': False, 'fields': []},
# FIXME: should reverse field be ignored?
{'id': 'parent_id', 'name': 'parent_id', 'string': 'unknown', 'required': False, 'fields': [
{'id': 'parent_id', 'name': 'id', 'string': 'External ID', 'required': False, 'fields': []},
{'id': 'parent_id', 'name': '.id', 'string': 'Database ID', 'required': False, 'fields': []},
]},
{'id': 'value', 'name': 'value', 'string': 'unknown', 'required': False, 'fields': []},
]))
class test_match_headers_single(TransactionCase):
def test_match_by_name(self):
match = self.registry('base_import.import')._match_header(
'f0', [{'name': 'f0'}], {})
self.assertEqual(match, [{'name': 'f0'}])
def test_match_by_string(self):
match = self.registry('base_import.import')._match_header(
'some field', [{'name': 'bob', 'string': "Some Field"}], {})
self.assertEqual(match, [{'name': 'bob', 'string': "Some Field"}])
def test_nomatch(self):
match = self.registry('base_import.import')._match_header(
'should not be', [{'name': 'bob', 'string': "wheee"}], {})
self.assertEqual(match, [])
def test_recursive_match(self):
f = {
'name': 'f0',
'string': "My Field",
'fields': [
{'name': 'f0', 'string': "Sub field 0", 'fields': []},
{'name': 'f1', 'string': "Sub field 2", 'fields': []},
]
}
match = self.registry('base_import.import')._match_header(
'f0/f1', [f], {})
self.assertEqual(match, [f, f['fields'][1]])
def test_recursive_nomatch(self):
""" Match first level, fail to match second level
"""
f = {
'name': 'f0',
'string': "My Field",
'fields': [
{'name': 'f0', 'string': "Sub field 0", 'fields': []},
{'name': 'f1', 'string': "Sub field 2", 'fields': []},
]
}
match = self.registry('base_import.import')._match_header(
'f0/f2', [f], {})
self.assertEqual(match, [])
class test_match_headers_multiple(TransactionCase):
def test_noheaders(self):
self.assertEqual(
self.registry('base_import.import')._match_headers(
[], [], {}),
(None, None)
)
def test_nomatch(self):
self.assertEqual(
self.registry('base_import.import')._match_headers(
iter([
['foo', 'bar', 'baz', 'qux'],
['v1', 'v2', 'v3', 'v4'],
]),
[],
{'headers': True}),
(
['foo', 'bar', 'baz', 'qux'],
dict.fromkeys(range(4))
)
)
def test_mixed(self):
self.assertEqual(
self.registry('base_import.import')._match_headers(
iter(['foo bar baz qux/corge'.split()]),
[
{'name': 'bar', 'string': 'Bar'},
{'name': 'bob', 'string': 'Baz'},
{'name': 'qux', 'string': 'Qux', 'fields': [
{'name': 'corge', 'fields': []},
]}
],
{'headers': True}),
(['foo', 'bar', 'baz', 'qux/corge'], {
0: None,
1: ['bar'],
2: ['bob'],
3: ['qux', 'corge'],
})
)
class test_preview(TransactionCase):
def make_import(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'res.users',
'file': u"로그인,언어\nbob,1\n".encode('euc_kr'),
})
return Import, id
def test_encoding(self):
Import, id = self.make_import()
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': '"',
'separator': ',',
})
self.assertTrue('error' in result)
def test_csv_errors(self):
Import, id = self.make_import()
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': 'foo',
'separator': ',',
'encoding': 'euc_kr',
})
self.assertTrue('error' in result)
def test_csv_errors(self):
Import, id = self.make_import()
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': '"',
'separator': 'bob',
'encoding': 'euc_kr',
})
self.assertTrue('error' in result)
def test_success(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
'bar,3,4\n'
'qux,5,6\n'
})
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': '"',
'separator': ',',
'headers': True,
})
self.assertEqual(result['matches'], {0: ['name'], 1: ['somevalue'], 2: None})
self.assertEqual(result['headers'], ['name', 'Some Value', 'Counter'])
# Order depends on iteration order of fields_get
self.assertItemsEqual(result['fields'], [
{'id': 'id', 'name': 'id', 'string': 'External ID', 'required':False, 'fields': []},
{'id': 'name', 'name': 'name', 'string': 'Name', 'required':False, 'fields': []},
{'id': 'somevalue', 'name': 'somevalue', 'string': 'Some Value', 'required':True, 'fields': []},
{'id': 'othervalue', 'name': 'othervalue', 'string': 'Other Variable', 'required':False, 'fields': []},
])
self.assertEqual(result['preview'], [
['foo', '1', '2'],
['bar', '3', '4'],
['qux', '5', '6'],
])
# Ensure we only have the response fields we expect
self.assertItemsEqual(result.keys(), ['matches', 'headers', 'fields', 'preview'])
class test_convert_import_data(TransactionCase):
""" Tests conversion of base_import.import input into data which
can be fed to Model.import_data
"""
def test_all(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
'bar,3,4\n'
'qux,5,6\n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', 'somevalue', 'othervalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'somevalue', 'othervalue'])
self.assertItemsEqual(data, [
('foo', '1', '2'),
('bar', '3', '4'),
('qux', '5', '6'),
])
def test_filtered(self):
""" If ``False`` is provided as field mapping for a column,
that column should be removed from importable data
"""
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
'bar,3,4\n'
'qux,5,6\n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', False, 'othervalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'othervalue'])
self.assertItemsEqual(data, [
('foo', '2'),
('bar', '4'),
('qux', '6'),
])
def test_norow(self):
""" If a row is composed only of empty values (due to having
filtered out non-empty values from it), it should be removed
"""
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
',3,\n'
',5,6\n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', False, 'othervalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'othervalue'])
self.assertItemsEqual(data, [
('foo', '2'),
('', '6'),
])
def test_nofield(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
})
record = Import.browse(self.cr, self.uid, id)
self.assertRaises(
ValueError,
Import._convert_import_data,
record, [],
{'quoting': '"', 'separator': ',', 'headers': True,})
def test_falsefields(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
})
record = Import.browse(self.cr, self.uid, id)
self.assertRaises(
ValueError,
Import._convert_import_data,
record, [False, False, False],
{'quoting': '"', 'separator': ',', 'headers': True,})
| agpl-3.0 | 8,766,445,772,598,901,000 | 37.154971 | 115 | 0.504943 | false |
nvbn/python-social-auth | social/tests/backends/test_github.py | 11 | 4899 | import json
from httpretty import HTTPretty
from social.exceptions import AuthFailed
from social.tests.backends.oauth import OAuth2Test
class GithubOAuth2Test(OAuth2Test):
backend_path = 'social.backends.github.GithubOAuth2'
user_data_url = 'https://api.github.com/user'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
user_data_body = json.dumps({
'login': 'foobar',
'id': 1,
'avatar_url': 'https://github.com/images/error/foobar_happy.gif',
'gravatar_id': 'somehexcode',
'url': 'https://api.github.com/users/foobar',
'name': 'monalisa foobar',
'company': 'GitHub',
'blog': 'https://github.com/blog',
'location': 'San Francisco',
'email': '[email protected]',
'hireable': False,
'bio': 'There once was...',
'public_repos': 2,
'public_gists': 1,
'followers': 20,
'following': 0,
'html_url': 'https://github.com/foobar',
'created_at': '2008-01-14T04:33:35Z',
'type': 'User',
'total_private_repos': 100,
'owned_private_repos': 100,
'private_gists': 81,
'disk_usage': 10000,
'collaborators': 8,
'plan': {
'name': 'Medium',
'space': 400,
'collaborators': 10,
'private_repos': 20
}
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
class GithubOAuth2NoEmailTest(GithubOAuth2Test):
user_data_body = json.dumps({
'login': 'foobar',
'id': 1,
'avatar_url': 'https://github.com/images/error/foobar_happy.gif',
'gravatar_id': 'somehexcode',
'url': 'https://api.github.com/users/foobar',
'name': 'monalisa foobar',
'company': 'GitHub',
'blog': 'https://github.com/blog',
'location': 'San Francisco',
'email': '',
'hireable': False,
'bio': 'There once was...',
'public_repos': 2,
'public_gists': 1,
'followers': 20,
'following': 0,
'html_url': 'https://github.com/foobar',
'created_at': '2008-01-14T04:33:35Z',
'type': 'User',
'total_private_repos': 100,
'owned_private_repos': 100,
'private_gists': 81,
'disk_usage': 10000,
'collaborators': 8,
'plan': {
'name': 'Medium',
'space': 400,
'collaborators': 10,
'private_repos': 20
}
})
def test_login(self):
url = 'https://api.github.com/user/emails'
HTTPretty.register_uri(HTTPretty.GET, url, status=200,
body=json.dumps(['[email protected]']),
content_type='application/json')
self.do_login()
def test_login_next_format(self):
url = 'https://api.github.com/user/emails'
HTTPretty.register_uri(HTTPretty.GET, url, status=200,
body=json.dumps([{'email': '[email protected]'}]),
content_type='application/json')
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
class GithubOrganizationOAuth2Test(GithubOAuth2Test):
backend_path = 'social.backends.github.GithubOrganizationOAuth2'
def auth_handlers(self, start_url):
url = 'https://api.github.com/orgs/foobar/members/foobar'
HTTPretty.register_uri(HTTPretty.GET, url, status=204, body='')
return super(GithubOrganizationOAuth2Test, self).auth_handlers(
start_url
)
def test_login(self):
self.strategy.set_settings({'SOCIAL_AUTH_GITHUB_ORG_NAME': 'foobar'})
self.do_login()
def test_partial_pipeline(self):
self.strategy.set_settings({'SOCIAL_AUTH_GITHUB_ORG_NAME': 'foobar'})
self.do_partial_pipeline()
class GithubOrganizationOAuth2FailTest(GithubOAuth2Test):
backend_path = 'social.backends.github.GithubOrganizationOAuth2'
def auth_handlers(self, start_url):
url = 'https://api.github.com/orgs/foobar/members/foobar'
HTTPretty.register_uri(HTTPretty.GET, url, status=404,
body='{"message": "Not Found"}',
content_type='application/json')
return super(GithubOrganizationOAuth2FailTest, self).auth_handlers(
start_url
)
def test_login(self):
self.strategy.set_settings({'SOCIAL_AUTH_GITHUB_ORG_NAME': 'foobar'})
self.do_login.when.called_with().should.throw(AuthFailed)
def test_partial_pipeline(self):
self.strategy.set_settings({'SOCIAL_AUTH_GITHUB_ORG_NAME': 'foobar'})
self.do_partial_pipeline.when.called_with().should.throw(AuthFailed)
| bsd-3-clause | -3,150,295,932,942,579,700 | 32.326531 | 77 | 0.569912 | false |
lucuma/solution | solution/utils.py | 2 | 2728 | # coding=utf-8
import datetime
import re
from xml.sax.saxutils import quoteattr
from markupsafe import Markup, escape_silent
from ._compat import to_unicode, iteritems
class FakeMultiDict(dict):
"""Adds a fake `getlist` method to a regular dict; or acts as a proxy to
Webob's MultiDict `getall` method.
"""
def __getattr__(self, attr):
try:
return self[attr]
except KeyError as error:
raise AttributeError(error)
def getlist(self, name):
if hasattr(self, 'getall'):
return self.getall(name)
value = self.get(name)
if value is None:
return []
return [value]
def escape(value):
return escape_silent(to_unicode(value))
def get_html_attrs(kwargs=None):
"""Generate HTML attributes from the provided keyword arguments.
The output value is sorted by the passed keys, to provide consistent
output. Because of the frequent use of the normally reserved keyword
`class`, `classes` is used instead. Also, all underscores are translated
to regular dashes.
Set any property with a `True` value.
>>> _get_html_attrs({'id': 'text1', 'classes': 'myclass',
'data_id': 1, 'checked': True})
u'class="myclass" data-id="1" id="text1" checked'
"""
kwargs = kwargs or {}
attrs = []
props = []
classes = kwargs.get('classes', '').strip()
if classes:
classes = ' '.join(re.split(r'\s+', classes))
classes = to_unicode(quoteattr(classes))
attrs.append('class=%s' % classes)
try:
del kwargs['classes']
except KeyError:
pass
for key, value in iteritems(kwargs):
key = key.replace('_', '-')
key = to_unicode(key)
if isinstance(value, bool):
if value is True:
props.append(key)
else:
value = quoteattr(Markup(value))
attrs.append(u'%s=%s' % (key, value))
attrs.sort()
props.sort()
attrs.extend(props)
return u' '.join(attrs)
def get_obj_value(obj, name, default=None):
# The field name could conflict with a native method
# if `obj` is a dictionary instance
if isinstance(obj, dict):
return obj.get(name, default)
return getattr(obj, name, default)
def set_obj_value(obj, name, value):
# The field name could conflict with a native method
# if `obj` is a dictionary instance
if isinstance(obj, dict):
obj[name] = value
else:
setattr(obj, name, value)
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime.date):
serial = obj.isoformat()
return serial
| mit | -1,893,859,089,562,527,500 | 26.28 | 76 | 0.613636 | false |
tallakahath/pymatgen | pymatgen/util/coord_utils.py | 2 | 15753 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
from six.moves import zip
import itertools
import numpy as np
import math
from . import coord_utils_cython as cuc
"""
Utilities for manipulating coordinates or list of coordinates, under periodic
boundary conditions or otherwise. Many of these are heavily vectorized in
numpy for performance.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Nov 27, 2011"
# array size threshold for looping instead of broadcasting
LOOP_THRESHOLD = 1e6
def find_in_coord_list(coord_list, coord, atol=1e-8):
"""
Find the indices of matches of a particular coord in a coord_list.
Args:
coord_list: List of coords to test
coord: Specific coordinates
atol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and
array.
Returns:
Indices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.
"""
if len(coord_list) == 0:
return []
diff = np.array(coord_list) - np.array(coord)[None, :]
return np.where(np.all(np.abs(diff) < atol, axis=1))[0]
def in_coord_list(coord_list, coord, atol=1e-8):
"""
Tests if a particular coord is within a coord_list.
Args:
coord_list: List of coords to test
coord: Specific coordinates
atol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and
array.
Returns:
True if coord is in the coord list.
"""
return len(find_in_coord_list(coord_list, coord, atol=atol)) > 0
def is_coord_subset(subset, superset, atol=1e-8):
"""
Tests if all coords in subset are contained in superset.
Doesn't use periodic boundary conditions
Args:
subset, superset: List of coords
Returns:
True if all of subset is in superset.
"""
c1 = np.array(subset)
c2 = np.array(superset)
is_close = np.all(np.abs(c1[:, None, :] - c2[None, :, :]) < atol, axis=-1)
any_close = np.any(is_close, axis=-1)
return np.all(any_close)
def coord_list_mapping(subset, superset, atol=1e-8):
"""
Gives the index mapping from a subset to a superset.
Subset and superset cannot contain duplicate rows
Args:
subset, superset: List of coords
Returns:
list of indices such that superset[indices] = subset
"""
c1 = np.array(subset)
c2 = np.array(superset)
inds = np.where(np.all(np.isclose(c1[:, None, :], c2[None, :, :], atol=atol),
axis=2))[1]
result = c2[inds]
if not np.allclose(c1, result, atol=atol):
if not is_coord_subset(subset, superset):
raise ValueError("subset is not a subset of superset")
if not result.shape == c1.shape:
raise ValueError("Something wrong with the inputs, likely duplicates "
"in superset")
return inds
def coord_list_mapping_pbc(subset, superset, atol=1e-8):
"""
Gives the index mapping from a subset to a superset.
Superset cannot contain duplicate matching rows
Args:
subset, superset: List of frac_coords
Returns:
list of indices such that superset[indices] = subset
"""
atol = np.array([1., 1. ,1.]) * atol
return cuc.coord_list_mapping_pbc(subset, superset, atol)
def get_linear_interpolated_value(x_values, y_values, x):
"""
Returns an interpolated value by linear interpolation between two values.
This method is written to avoid dependency on scipy, which causes issues on
threading servers.
Args:
x_values: Sequence of x values.
y_values: Corresponding sequence of y values
x: Get value at particular x
Returns:
Value at x.
"""
a = np.array(sorted(zip(x_values, y_values), key=lambda d: d[0]))
ind = np.where(a[:, 0] >= x)[0]
if len(ind) == 0 or ind[0] == 0:
raise ValueError("x is out of range of provided x_values")
i = ind[0]
x1, x2 = a[i - 1][0], a[i][0]
y1, y2 = a[i - 1][1], a[i][1]
return y1 + (y2 - y1) / (x2 - x1) * (x - x1)
def all_distances(coords1, coords2):
"""
Returns the distances between two lists of coordinates
Args:
coords1: First set of cartesian coordinates.
coords2: Second set of cartesian coordinates.
Returns:
2d array of cartesian distances. E.g the distance between
coords1[i] and coords2[j] is distances[i,j]
"""
c1 = np.array(coords1)
c2 = np.array(coords2)
z = (c1[:, None, :] - c2[None, :, :]) ** 2
return np.sum(z, axis=-1) ** 0.5
def pbc_diff(fcoords1, fcoords2):
"""
Returns the 'fractional distance' between two coordinates taking into
account periodic boundary conditions.
Args:
fcoords1: First set of fractional coordinates. e.g., [0.5, 0.6,
0.7] or [[1.1, 1.2, 4.3], [0.5, 0.6, 0.7]]. It can be a single
coord or any array of coords.
fcoords2: Second set of fractional coordinates.
Returns:
Fractional distance. Each coordinate must have the property that
abs(a) <= 0.5. Examples:
pbc_diff([0.1, 0.1, 0.1], [0.3, 0.5, 0.9]) = [-0.2, -0.4, 0.2]
pbc_diff([0.9, 0.1, 1.01], [0.3, 0.5, 0.9]) = [-0.4, -0.4, 0.11]
"""
fdist = np.subtract(fcoords1, fcoords2)
return fdist - np.round(fdist)
def pbc_shortest_vectors(lattice, fcoords1, fcoords2, mask=None,
return_d2=False):
"""
Returns the shortest vectors between two lists of coordinates taking into
account periodic boundary conditions and the lattice.
Args:
lattice: lattice to use
fcoords1: First set of fractional coordinates. e.g., [0.5, 0.6, 0.7]
or [[1.1, 1.2, 4.3], [0.5, 0.6, 0.7]]. It can be a single
coord or any array of coords.
fcoords2: Second set of fractional coordinates.
mask (boolean array): Mask of matches that are not allowed.
i.e. if mask[1,2] == True, then subset[1] cannot be matched
to superset[2]
return_d2 (boolean): whether to also return the squared distances
Returns:
array of displacement vectors from fcoords1 to fcoords2
first index is fcoords1 index, second is fcoords2 index
"""
return cuc.pbc_shortest_vectors(lattice, fcoords1, fcoords2, mask,
return_d2)
def find_in_coord_list_pbc(fcoord_list, fcoord, atol=1e-8):
"""
Get the indices of all points in a fractional coord list that are
equal to a fractional coord (with a tolerance), taking into account
periodic boundary conditions.
Args:
fcoord_list: List of fractional coords
fcoord: A specific fractional coord to test.
atol: Absolute tolerance. Defaults to 1e-8.
Returns:
Indices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.
"""
if len(fcoord_list) == 0:
return []
fcoords = np.tile(fcoord, (len(fcoord_list), 1))
fdist = fcoord_list - fcoords
fdist -= np.round(fdist)
return np.where(np.all(np.abs(fdist) < atol, axis=1))[0]
def in_coord_list_pbc(fcoord_list, fcoord, atol=1e-8):
"""
Tests if a particular fractional coord is within a fractional coord_list.
Args:
fcoord_list: List of fractional coords to test
fcoord: A specific fractional coord to test.
atol: Absolute tolerance. Defaults to 1e-8.
Returns:
True if coord is in the coord list.
"""
return len(find_in_coord_list_pbc(fcoord_list, fcoord, atol=atol)) > 0
def is_coord_subset_pbc(subset, superset, atol=1e-8, mask=None):
"""
Tests if all fractional coords in subset are contained in superset.
Args:
subset, superset: List of fractional coords
atol (float or size 3 array): Tolerance for matching
mask (boolean array): Mask of matches that are not allowed.
i.e. if mask[1,2] == True, then subset[1] cannot be matched
to superset[2]
Returns:
True if all of subset is in superset.
"""
c1 = np.array(subset, dtype=np.float64)
c2 = np.array(superset, dtype=np.float64)
if mask is not None:
m = np.array(mask, dtype=np.int)
else:
m = np.zeros((len(subset), len(superset)), dtype=np.int)
atol = np.zeros(3, dtype=np.float64) + atol
return cuc.is_coord_subset_pbc(c1, c2, atol, m)
def lattice_points_in_supercell(supercell_matrix):
"""
Returns the list of points on the original lattice contained in the
supercell in fractional coordinates (with the supercell basis).
e.g. [[2,0,0],[0,1,0],[0,0,1]] returns [[0,0,0],[0.5,0,0]]
Args:
supercell_matrix: 3x3 matrix describing the supercell
Returns:
numpy array of the fractional coordinates
"""
diagonals = np.array(
[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1],
[1, 1, 0], [1, 1, 1]])
d_points = np.dot(diagonals, supercell_matrix)
mins = np.min(d_points, axis=0)
maxes = np.max(d_points, axis=0) + 1
ar = np.arange(mins[0], maxes[0])[:, None] * \
np.array([1, 0, 0])[None, :]
br = np.arange(mins[1], maxes[1])[:, None] * \
np.array([0, 1, 0])[None, :]
cr = np.arange(mins[2], maxes[2])[:, None] * \
np.array([0, 0, 1])[None, :]
all_points = ar[:, None, None] + br[None, :, None] + cr[None, None, :]
all_points = all_points.reshape((-1, 3))
frac_points = np.dot(all_points, np.linalg.inv(supercell_matrix))
tvects = frac_points[np.all(frac_points < 1 - 1e-10, axis=1)
& np.all(frac_points >= -1e-10, axis=1)]
assert len(tvects) == round(abs(np.linalg.det(supercell_matrix)))
return tvects
def barycentric_coords(coords, simplex):
"""
Converts a list of coordinates to barycentric coordinates, given a
simplex with d+1 points. Only works for d >= 2.
Args:
coords: list of n coords to transform, shape should be (n,d)
simplex: list of coordinates that form the simplex, shape should be
(d+1, d)
Returns:
a LIST of barycentric coordinates (even if the original input was 1d)
"""
coords = np.atleast_2d(coords)
t = np.transpose(simplex[:-1, :]) - np.transpose(simplex[-1, :])[:, None]
all_but_one = np.transpose(
np.linalg.solve(t, np.transpose(coords - simplex[-1])))
last_coord = 1 - np.sum(all_but_one, axis=-1)[:, None]
return np.append(all_but_one, last_coord, axis=-1)
def get_angle(v1, v2, units="degrees"):
"""
Calculates the angle between two vectors.
Args:
v1: Vector 1
v2: Vector 2
units: "degrees" or "radians". Defaults to "degrees".
Returns:
Angle between them in degrees.
"""
d = np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)
d = min(d, 1)
d = max(d, -1)
angle = math.acos(d)
if units == "degrees":
return math.degrees(angle)
elif units == "radians":
return angle
else:
raise ValueError("Invalid units {}".format(units))
class Simplex(object):
"""
A generalized simplex object. See http://en.wikipedia.org/wiki/Simplex.
.. attribute: space_dim
Dimension of the space. Usually, this is 1 more than the simplex_dim.
.. attribute: simplex_dim
Dimension of the simplex coordinate space.
"""
def __init__(self, coords):
"""
Initializes a Simplex from vertex coordinates.
Args:
coords ([[float]]): Coords of the vertices of the simplex. E.g.,
[[1, 2, 3], [2, 4, 5], [6, 7, 8], [8, 9, 10].
"""
self._coords = np.array(coords)
self.space_dim, self.simplex_dim = self._coords.shape
self.origin = self._coords[-1]
if self.space_dim == self.simplex_dim + 1:
# precompute augmented matrix for calculating bary_coords
self._aug = np.concatenate([coords, np.ones((self.space_dim, 1))],
axis=-1)
self._aug_inv = np.linalg.inv(self._aug)
@property
def volume(self):
"""
Volume of the simplex.
"""
return abs(np.linalg.det(self._aug)) / math.factorial(self.simplex_dim)
def bary_coords(self, point):
try:
return np.dot(np.concatenate([point, [1]]), self._aug_inv)
except AttributeError:
raise ValueError('Simplex is not full-dimensional')
def point_from_bary_coords(self, bary_coords):
try:
return np.dot(bary_coords, self._aug[:, :-1])
except AttributeError:
raise ValueError('Simplex is not full-dimensional')
def in_simplex(self, point, tolerance=1e-8):
"""
Checks if a point is in the simplex using the standard barycentric
coordinate system algorithm.
Taking an arbitrary vertex as an origin, we compute the basis for the
simplex from this origin by subtracting all other vertices from the
origin. We then project the point into this coordinate system and
determine the linear decomposition coefficients in this coordinate
system. If the coeffs satisfy that all coeffs >= 0, the composition
is in the facet.
Args:
point ([float]): Point to test
tolerance (float): Tolerance to test if point is in simplex.
"""
return (self.bary_coords(point) >= -tolerance).all()
def line_intersection(self, point1, point2, tolerance=1e-8):
"""
Computes the intersection points of a line with a simplex
Args:
point1, point2 ([float]): Points that determine the line
Returns:
points where the line intersects the simplex (0, 1, or 2)
"""
b1 = self.bary_coords(point1)
b2 = self.bary_coords(point2)
l = b1 - b2
# don't use barycentric dimension where line is parallel to face
valid = np.abs(l) > 1e-10
# array of all the barycentric coordinates on the line where
# one of the values is 0
possible = b1 - (b1[valid] / l[valid])[:, None] * l
barys = []
for p in possible:
# it's only an intersection if its in the simplex
if (p >= -tolerance).all():
found = False
# don't return duplicate points
for b in barys:
if np.all(np.abs(b - p) < tolerance):
found = True
break
if not found:
barys.append(p)
assert len(barys) < 3
return [self.point_from_bary_coords(b) for b in barys]
def __eq__(self, other):
for p in itertools.permutations(self._coords):
if np.allclose(p, other.coords):
return True
return False
def __hash__(self):
return len(self._coords)
def __repr__(self):
output = ["{}-simplex in {}D space".format(self.simplex_dim,
self.space_dim),
"Vertices:"]
for coord in self._coords:
output.append("\t({})".format(", ".join(map(str, coord))))
return "\n".join(output)
def __str__(self):
return self.__repr__()
@property
def coords(self):
"""
Returns a copy of the vertex coordinates in the simplex.
"""
return self._coords.copy()
| mit | -412,714,762,024,709,500 | 31.547521 | 81 | 0.594807 | false |
idooley/AnalogCrossover | calculator/high_pass_with_gain.py | 1 | 3175 | #!/opt/local/bin/python3.4
# Loads some common capacitor and resistor values and then exhaustively searches the space of possible combinations to find a good combination.
import re,sys,math,blist
from blist import sortedset
def convertToValue(x) :
y = x.strip()
if y.endswith("pF") :
return float(y.replace("pF","")) * 0.000000000001
if y.endswith("nF") :
return float(y.replace("nF","")) * 0.000000001
if y.endswith("uF") :
return float(y.replace("uF","")) * 0.000001
if y.endswith("mF") :
return float(y.replace("mF","")) * 0.001
if y.endswith("K") :
return float(y.replace("K","")) * 1000.0
if y.endswith("M") :
return float(y.replace("M","")) *1000000.0
return float(y)
def resistancesComposedOfTwoResistors(x) :
results = sortedset()
for v in x:
results.add(v)
for v1 in x:
for v2 in x:
r = v2/v1;
if r > 0.33 and r < 3.0:
results.add((v1*v2)/(v1+v2))
return results
def main() :
if len(sys.argv) != 3:
print ("usage: " + sys.argv[0] + " frequency gain")
capfile = open("capacitorValues.txt", "r")
resfile = open("resistor_E24_values.txt", "r")
capValues = []
for line in capfile.readlines():
capValues = capValues + [convertToValue(x) for x in re.split("\s*", line) if (x is not None and x is not "")]
capValues.sort()
resValues = []
for line in resfile.readlines():
resValues = resValues + [convertToValue(x) for x in re.split("\s*", line) if (x is not None and x is not "")]
resValues.sort()
# Filter to specific ranges
resValues = [x for x in resValues if (x >= convertToValue("200") and x <= convertToValue("4K")) ]
capValues = [x for x in capValues if (x >= convertToValue("47nF") and x <= convertToValue("120nF")) ]
qTol = 0.01
qExpected = 1.0 / math.sqrt(2.0)
kTol = 0.05
kExpected = float(sys.argv[2]) # target gain
fTol = 10.0
fExpected = float(sys.argv[1]) # target frequency
print ("num resistor values: ", len(resValues))
expandedResValues = resistancesComposedOfTwoResistors(resValues)
print ("num resistor pair values: ", len(expandedResValues))
# print expandedResValues
resValues1 = resValues
resValues2 = resValues
resValues3 = [1000]
resValues4 = expandedResValues
for R1 in resValues1:
for R2 in resValues2:
for R3 in resValues3:
for R4 in resValues4:
# = R3 # 2x gain
for C1 in capValues:
C2 = C1
k = 1.0 + R4 / R3
if abs(k - kExpected) < kTol:
try:
q = math.sqrt(C1*C2*R1*R2)/(R2*C2+R2*C1+R1*C2*(1-k))
if abs(q - qExpected) < qTol:
f = 1.0/(2.0*3.1415927*math.sqrt(C1*C2*R1*R2))
if abs(f - fExpected) < fTol:
print (R1, R2, R3, R4, C1, C2)
print ("q=", q)
print ("f=", f)
except:
pass
if __name__ == "__main__":
main()
| bsd-3-clause | -2,791,232,877,052,321,000 | 32.072917 | 143 | 0.547402 | false |
cebrusfs/217gdb | pwndbg/strings.py | 5 | 1331 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Functionality for resolving ASCII printable strings within
the debuggee's address space.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import string
import gdb
import pwndbg.events
import pwndbg.memory
import pwndbg.typeinfo
length = 15
@pwndbg.events.stop
def update_length():
r"""
Unfortunately there's not a better way to get at this info.
>>> gdb.execute('show print elements', from_tty=False, to_string=True)
'Limit on string chars or array elements to print is 21.\n'
"""
global length
message = gdb.execute('show print elements', from_tty=False, to_string=True)
message = message.split()[-1]
message = message.strip('.')
if message == 'unlimited':
length = 0
else:
length = int(message)
def get(address, maxlen = None):
if maxlen is None:
maxlen = length
try:
sz = pwndbg.memory.string(address, maxlen)
sz = sz.decode('latin-1', 'replace')
if not sz or not all(s in string.printable for s in sz):
return None
except Exception as e:
return None
if len(sz) < maxlen or not maxlen:
return sz
return sz[:maxlen] + '...'
| mit | 4,524,889,484,868,914,000 | 23.2 | 80 | 0.647633 | false |
crs4/blast-python | BlastPython/blaster.py | 1 | 1869 | # BEGIN_COPYRIGHT
#
# Copyright (C) 2014 CRS4.
#
# This file is part of blast-python.
#
# blast-python is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# blast-python is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# blast-python. If not, see <http://www.gnu.org/licenses/>.
#
# END_COPYRIGHT
import ncbi_toolkit
class blaster(object):
def __init__(self, query, **kw):
# program type must be passed to the constructor to ensure correct
# default settings for all other options
program = kw.pop('Program', ncbi_toolkit.EProgram.eBlastn)
self.query = query
self.blast_engine = ncbi_toolkit.CBl2Seq(
ncbi_toolkit.SSeqLoc(),
ncbi_toolkit.SSeqLoc(),
program
)
self.blast_engine.SetQuery(query)
self.query_already_setup = False
opts = self.blast_engine.SetOptions()
for k in kw:
opts[k] = kw[k]
def get_options(self):
return self.blast_engine.GetOptions()
def set_options(self):
return self.blast_engine.SetOptions()
def blast(self, subject):
"""
Blast on subject and return results.
"""
self.blast_engine.SetSubject(subject)
self.blast_engine.SetupSearch(self.query_already_setup)
self.query_already_setup = True
self.blast_engine.ScanDB()
return subject, self.blast_engine.GetResults()
| gpl-3.0 | -5,738,635,644,341,038,000 | 32.375 | 78 | 0.662921 | false |
40223117cda/w16cdaa | static/Brython3.1.3-20150514-095342/Lib/platform.py | 620 | 51006 | #!/usr/bin/env python3
""" This module tries to retrieve as much platform-identifying data as
possible. It makes this information available via function APIs.
If called from the command line, it prints the platform
information concatenated as single string to stdout. The output
format is useable as part of a filename.
"""
# This module is maintained by Marc-Andre Lemburg <[email protected]>.
# If you find problems, please submit bug reports/patches via the
# Python bug tracker (http://bugs.python.org) and assign them to "lemburg".
#
# Still needed:
# * more support for WinCE
# * support for MS-DOS (PythonDX ?)
# * support for Amiga and other still unsupported platforms running Python
# * support for additional Linux distributions
#
# Many thanks to all those who helped adding platform-specific
# checks (in no particular order):
#
# Charles G Waldman, David Arnold, Gordon McMillan, Ben Darnell,
# Jeff Bauer, Cliff Crawford, Ivan Van Laningham, Josef
# Betancourt, Randall Hopper, Karl Putland, John Farrell, Greg
# Andruk, Just van Rossum, Thomas Heller, Mark R. Levinson, Mark
# Hammond, Bill Tutt, Hans Nowak, Uwe Zessin (OpenVMS support),
# Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter
#
# History:
#
# <see CVS and SVN checkin messages for history>
#
# 1.0.7 - added DEV_NULL
# 1.0.6 - added linux_distribution()
# 1.0.5 - fixed Java support to allow running the module on Jython
# 1.0.4 - added IronPython support
# 1.0.3 - added normalization of Windows system name
# 1.0.2 - added more Windows support
# 1.0.1 - reformatted to make doc.py happy
# 1.0.0 - reformatted a bit and checked into Python CVS
# 0.8.0 - added sys.version parser and various new access
# APIs (python_version(), python_compiler(), etc.)
# 0.7.2 - fixed architecture() to use sizeof(pointer) where available
# 0.7.1 - added support for Caldera OpenLinux
# 0.7.0 - some fixes for WinCE; untabified the source file
# 0.6.2 - support for OpenVMS - requires version 1.5.2-V006 or higher and
# vms_lib.getsyi() configured
# 0.6.1 - added code to prevent 'uname -p' on platforms which are
# known not to support it
# 0.6.0 - fixed win32_ver() to hopefully work on Win95,98,NT and Win2k;
# did some cleanup of the interfaces - some APIs have changed
# 0.5.5 - fixed another type in the MacOS code... should have
# used more coffee today ;-)
# 0.5.4 - fixed a few typos in the MacOS code
# 0.5.3 - added experimental MacOS support; added better popen()
# workarounds in _syscmd_ver() -- still not 100% elegant
# though
# 0.5.2 - fixed uname() to return '' instead of 'unknown' in all
# return values (the system uname command tends to return
# 'unknown' instead of just leaving the field emtpy)
# 0.5.1 - included code for slackware dist; added exception handlers
# to cover up situations where platforms don't have os.popen
# (e.g. Mac) or fail on socket.gethostname(); fixed libc
# detection RE
# 0.5.0 - changed the API names referring to system commands to *syscmd*;
# added java_ver(); made syscmd_ver() a private
# API (was system_ver() in previous versions) -- use uname()
# instead; extended the win32_ver() to also return processor
# type information
# 0.4.0 - added win32_ver() and modified the platform() output for WinXX
# 0.3.4 - fixed a bug in _follow_symlinks()
# 0.3.3 - fixed popen() and "file" command invokation bugs
# 0.3.2 - added architecture() API and support for it in platform()
# 0.3.1 - fixed syscmd_ver() RE to support Windows NT
# 0.3.0 - added system alias support
# 0.2.3 - removed 'wince' again... oh well.
# 0.2.2 - added 'wince' to syscmd_ver() supported platforms
# 0.2.1 - added cache logic and changed the platform string format
# 0.2.0 - changed the API to use functions instead of module globals
# since some action take too long to be run on module import
# 0.1.0 - first release
#
# You can always get the latest version of this module at:
#
# http://www.egenix.com/files/python/platform.py
#
# If that URL should fail, try contacting the author.
__copyright__ = """
Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:[email protected]
Copyright (c) 2000-2010, eGenix.com Software GmbH; mailto:[email protected]
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee or royalty is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation or portions thereof, including modifications,
that you make.
EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
"""
__version__ = '1.0.7'
import collections
import sys, os, re, subprocess
### Globals & Constants
# Determine the platform's /dev/null device
try:
DEV_NULL = os.devnull
except AttributeError:
# os.devnull was added in Python 2.4, so emulate it for earlier
# Python versions
if sys.platform in ('dos','win32','win16','os2'):
# Use the old CP/M NUL as device name
DEV_NULL = 'NUL'
else:
# Standard Unix uses /dev/null
DEV_NULL = '/dev/null'
### Platform specific APIs
_libc_search = re.compile(b'(__libc_init)'
b'|'
b'(GLIBC_([0-9.]+))'
b'|'
br'(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)', re.ASCII)
def libc_ver(executable=sys.executable,lib='',version='',
chunksize=16384):
""" Tries to determine the libc version that the file executable
(which defaults to the Python interpreter) is linked against.
Returns a tuple of strings (lib,version) which default to the
given parameters in case the lookup fails.
Note that the function has intimate knowledge of how different
libc versions add symbols to the executable and thus is probably
only useable for executables compiled using gcc.
The file is read and scanned in chunks of chunksize bytes.
"""
if hasattr(os.path, 'realpath'):
# Python 2.2 introduced os.path.realpath(); it is used
# here to work around problems with Cygwin not being
# able to open symlinks for reading
executable = os.path.realpath(executable)
f = open(executable,'rb')
binary = f.read(chunksize)
pos = 0
while 1:
if b'libc' in binary or b'GLIBC' in binary:
m = _libc_search.search(binary,pos)
else:
m = None
if not m:
binary = f.read(chunksize)
if not binary:
break
pos = 0
continue
libcinit,glibc,glibcversion,so,threads,soversion = [
s.decode('latin1') if s is not None else s
for s in m.groups()]
if libcinit and not lib:
lib = 'libc'
elif glibc:
if lib != 'glibc':
lib = 'glibc'
version = glibcversion
elif glibcversion > version:
version = glibcversion
elif so:
if lib != 'glibc':
lib = 'libc'
if soversion and soversion > version:
version = soversion
if threads and version[-len(threads):] != threads:
version = version + threads
pos = m.end()
f.close()
return lib,version
def _dist_try_harder(distname,version,id):
""" Tries some special tricks to get the distribution
information in case the default method fails.
Currently supports older SuSE Linux, Caldera OpenLinux and
Slackware Linux distributions.
"""
if os.path.exists('/var/adm/inst-log/info'):
# SuSE Linux stores distribution information in that file
distname = 'SuSE'
for line in open('/var/adm/inst-log/info'):
tv = line.split()
if len(tv) == 2:
tag,value = tv
else:
continue
if tag == 'MIN_DIST_VERSION':
version = value.strip()
elif tag == 'DIST_IDENT':
values = value.split('-')
id = values[2]
return distname,version,id
if os.path.exists('/etc/.installed'):
# Caldera OpenLinux has some infos in that file (thanks to Colin Kong)
for line in open('/etc/.installed'):
pkg = line.split('-')
if len(pkg) >= 2 and pkg[0] == 'OpenLinux':
# XXX does Caldera support non Intel platforms ? If yes,
# where can we find the needed id ?
return 'OpenLinux',pkg[1],id
if os.path.isdir('/usr/lib/setup'):
# Check for slackware version tag file (thanks to Greg Andruk)
verfiles = os.listdir('/usr/lib/setup')
for n in range(len(verfiles)-1, -1, -1):
if verfiles[n][:14] != 'slack-version-':
del verfiles[n]
if verfiles:
verfiles.sort()
distname = 'slackware'
version = verfiles[-1][14:]
return distname,version,id
return distname,version,id
_release_filename = re.compile(r'(\w+)[-_](release|version)', re.ASCII)
_lsb_release_version = re.compile(r'(.+)'
' release '
'([\d.]+)'
'[^(]*(?:\((.+)\))?', re.ASCII)
_release_version = re.compile(r'([^0-9]+)'
'(?: release )?'
'([\d.]+)'
'[^(]*(?:\((.+)\))?', re.ASCII)
# See also http://www.novell.com/coolsolutions/feature/11251.html
# and http://linuxmafia.com/faq/Admin/release-files.html
# and http://data.linux-ntfs.org/rpm/whichrpm
# and http://www.die.net/doc/linux/man/man1/lsb_release.1.html
_supported_dists = (
'SuSE', 'debian', 'fedora', 'redhat', 'centos',
'mandrake', 'mandriva', 'rocks', 'slackware', 'yellowdog', 'gentoo',
'UnitedLinux', 'turbolinux', 'arch', 'mageia')
def _parse_release_file(firstline):
# Default to empty 'version' and 'id' strings. Both defaults are used
# when 'firstline' is empty. 'id' defaults to empty when an id can not
# be deduced.
version = ''
id = ''
# Parse the first line
m = _lsb_release_version.match(firstline)
if m is not None:
# LSB format: "distro release x.x (codename)"
return tuple(m.groups())
# Pre-LSB format: "distro x.x (codename)"
m = _release_version.match(firstline)
if m is not None:
return tuple(m.groups())
# Unknown format... take the first two words
l = firstline.strip().split()
if l:
version = l[0]
if len(l) > 1:
id = l[1]
return '', version, id
def linux_distribution(distname='', version='', id='',
supported_dists=_supported_dists,
full_distribution_name=1):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
supported_dists may be given to define the set of Linux
distributions to look for. It defaults to a list of currently
supported Linux distributions identified by their release file
name.
If full_distribution_name is true (default), the full
distribution read from the OS is returned. Otherwise the short
name taken from supported_dists is used.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
try:
etc = os.listdir('/etc')
except os.error:
# Probably not a Unix system
return distname,version,id
etc.sort()
for file in etc:
m = _release_filename.match(file)
if m is not None:
_distname,dummy = m.groups()
if _distname in supported_dists:
distname = _distname
break
else:
return _dist_try_harder(distname,version,id)
# Read the first line
with open('/etc/'+file, 'r') as f:
firstline = f.readline()
_distname, _version, _id = _parse_release_file(firstline)
if _distname and full_distribution_name:
distname = _distname
if _version:
version = _version
if _id:
id = _id
return distname, version, id
# To maintain backwards compatibility:
def dist(distname='',version='',id='',
supported_dists=_supported_dists):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
return linux_distribution(distname, version, id,
supported_dists=supported_dists,
full_distribution_name=0)
def popen(cmd, mode='r', bufsize=-1):
""" Portable popen() interface.
"""
import warnings
warnings.warn('use os.popen instead', DeprecationWarning, stacklevel=2)
return os.popen(cmd, mode, bufsize)
def _norm_version(version, build=''):
""" Normalize the version and build strings and return a single
version string using the format major.minor.build (or patchlevel).
"""
l = version.split('.')
if build:
l.append(build)
try:
ints = map(int,l)
except ValueError:
strings = l
else:
strings = list(map(str,ints))
version = '.'.join(strings[:3])
return version
_ver_output = re.compile(r'(?:([\w ]+) ([\w.]+) '
'.*'
'\[.* ([\d.]+)\])')
# Examples of VER command output:
#
# Windows 2000: Microsoft Windows 2000 [Version 5.00.2195]
# Windows XP: Microsoft Windows XP [Version 5.1.2600]
# Windows Vista: Microsoft Windows [Version 6.0.6002]
#
# Note that the "Version" string gets localized on different
# Windows versions.
def _syscmd_ver(system='', release='', version='',
supported_platforms=('win32','win16','dos','os2')):
""" Tries to figure out the OS version used and returns
a tuple (system,release,version).
It uses the "ver" shell command for this which is known
to exists on Windows, DOS and OS/2. XXX Others too ?
In case this fails, the given parameters are used as
defaults.
"""
if sys.platform not in supported_platforms:
return system,release,version
# Try some common cmd strings
for cmd in ('ver','command /c ver','cmd /c ver'):
try:
pipe = popen(cmd)
info = pipe.read()
if pipe.close():
raise os.error('command failed')
# XXX How can I suppress shell errors from being written
# to stderr ?
except os.error as why:
#print 'Command %s failed: %s' % (cmd,why)
continue
except IOError as why:
#print 'Command %s failed: %s' % (cmd,why)
continue
else:
break
else:
return system,release,version
# Parse the output
info = info.strip()
m = _ver_output.match(info)
if m is not None:
system,release,version = m.groups()
# Strip trailing dots from version and release
if release[-1] == '.':
release = release[:-1]
if version[-1] == '.':
version = version[:-1]
# Normalize the version and build strings (eliminating additional
# zeros)
version = _norm_version(version)
return system,release,version
def _win32_getvalue(key,name,default=''):
""" Read a value for name from the registry key.
In case this fails, default is returned.
"""
try:
# Use win32api if available
from win32api import RegQueryValueEx
except ImportError:
# On Python 2.0 and later, emulate using winreg
import winreg
RegQueryValueEx = winreg.QueryValueEx
try:
return RegQueryValueEx(key,name)
except:
return default
def win32_ver(release='',version='',csd='',ptype=''):
""" Get additional version information from the Windows Registry
and return a tuple (version,csd,ptype) referring to version
number, CSD level (service pack), and OS type (multi/single
processor).
As a hint: ptype returns 'Uniprocessor Free' on single
processor NT machines and 'Multiprocessor Free' on multi
processor machines. The 'Free' refers to the OS version being
free of debugging code. It could also state 'Checked' which
means the OS version uses debugging code, i.e. code that
checks arguments, ranges, etc. (Thomas Heller).
Note: this function works best with Mark Hammond's win32
package installed, but also on Python 2.3 and later. It
obviously only runs on Win32 compatible platforms.
"""
# XXX Is there any way to find out the processor type on WinXX ?
# XXX Is win32 available on Windows CE ?
#
# Adapted from code posted by Karl Putland to comp.lang.python.
#
# The mappings between reg. values and release names can be found
# here: http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
# Import the needed APIs
try:
import win32api
from win32api import RegQueryValueEx, RegOpenKeyEx, \
RegCloseKey, GetVersionEx
from win32con import HKEY_LOCAL_MACHINE, VER_PLATFORM_WIN32_NT, \
VER_PLATFORM_WIN32_WINDOWS, VER_NT_WORKSTATION
except ImportError:
# Emulate the win32api module using Python APIs
try:
sys.getwindowsversion
except AttributeError:
# No emulation possible, so return the defaults...
return release,version,csd,ptype
else:
# Emulation using winreg (added in Python 2.0) and
# sys.getwindowsversion() (added in Python 2.3)
import winreg
GetVersionEx = sys.getwindowsversion
RegQueryValueEx = winreg.QueryValueEx
RegOpenKeyEx = winreg.OpenKeyEx
RegCloseKey = winreg.CloseKey
HKEY_LOCAL_MACHINE = winreg.HKEY_LOCAL_MACHINE
VER_PLATFORM_WIN32_WINDOWS = 1
VER_PLATFORM_WIN32_NT = 2
VER_NT_WORKSTATION = 1
VER_NT_SERVER = 3
REG_SZ = 1
# Find out the registry key and some general version infos
winver = GetVersionEx()
maj,min,buildno,plat,csd = winver
version = '%i.%i.%i' % (maj,min,buildno & 0xFFFF)
if hasattr(winver, "service_pack"):
if winver.service_pack != "":
csd = 'SP%s' % winver.service_pack_major
else:
if csd[:13] == 'Service Pack ':
csd = 'SP' + csd[13:]
if plat == VER_PLATFORM_WIN32_WINDOWS:
regkey = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion'
# Try to guess the release name
if maj == 4:
if min == 0:
release = '95'
elif min == 10:
release = '98'
elif min == 90:
release = 'Me'
else:
release = 'postMe'
elif maj == 5:
release = '2000'
elif plat == VER_PLATFORM_WIN32_NT:
regkey = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion'
if maj <= 4:
release = 'NT'
elif maj == 5:
if min == 0:
release = '2000'
elif min == 1:
release = 'XP'
elif min == 2:
release = '2003Server'
else:
release = 'post2003'
elif maj == 6:
if hasattr(winver, "product_type"):
product_type = winver.product_type
else:
product_type = VER_NT_WORKSTATION
# Without an OSVERSIONINFOEX capable sys.getwindowsversion(),
# or help from the registry, we cannot properly identify
# non-workstation versions.
try:
key = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey)
name, type = RegQueryValueEx(key, "ProductName")
# Discard any type that isn't REG_SZ
if type == REG_SZ and name.find("Server") != -1:
product_type = VER_NT_SERVER
except WindowsError:
# Use default of VER_NT_WORKSTATION
pass
if min == 0:
if product_type == VER_NT_WORKSTATION:
release = 'Vista'
else:
release = '2008Server'
elif min == 1:
if product_type == VER_NT_WORKSTATION:
release = '7'
else:
release = '2008ServerR2'
elif min == 2:
if product_type == VER_NT_WORKSTATION:
release = '8'
else:
release = '2012Server'
else:
release = 'post2012Server'
else:
if not release:
# E.g. Win3.1 with win32s
release = '%i.%i' % (maj,min)
return release,version,csd,ptype
# Open the registry key
try:
keyCurVer = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey)
# Get a value to make sure the key exists...
RegQueryValueEx(keyCurVer, 'SystemRoot')
except:
return release,version,csd,ptype
# Parse values
#subversion = _win32_getvalue(keyCurVer,
# 'SubVersionNumber',
# ('',1))[0]
#if subversion:
# release = release + subversion # 95a, 95b, etc.
build = _win32_getvalue(keyCurVer,
'CurrentBuildNumber',
('',1))[0]
ptype = _win32_getvalue(keyCurVer,
'CurrentType',
(ptype,1))[0]
# Normalize version
version = _norm_version(version,build)
# Close key
RegCloseKey(keyCurVer)
return release,version,csd,ptype
def _mac_ver_lookup(selectors,default=None):
from _gestalt import gestalt
l = []
append = l.append
for selector in selectors:
try:
append(gestalt(selector))
except (RuntimeError, OSError):
append(default)
return l
def _bcd2str(bcd):
return hex(bcd)[2:]
def _mac_ver_gestalt():
"""
Thanks to Mark R. Levinson for mailing documentation links and
code examples for this function. Documentation for the
gestalt() API is available online at:
http://www.rgaros.nl/gestalt/
"""
# Check whether the version info module is available
try:
import _gestalt
except ImportError:
return None
# Get the infos
sysv, sysa = _mac_ver_lookup(('sysv','sysa'))
# Decode the infos
if sysv:
major = (sysv & 0xFF00) >> 8
minor = (sysv & 0x00F0) >> 4
patch = (sysv & 0x000F)
if (major, minor) >= (10, 4):
# the 'sysv' gestald cannot return patchlevels
# higher than 9. Apple introduced 3 new
# gestalt codes in 10.4 to deal with this
# issue (needed because patch levels can
# run higher than 9, such as 10.4.11)
major,minor,patch = _mac_ver_lookup(('sys1','sys2','sys3'))
release = '%i.%i.%i' %(major, minor, patch)
else:
release = '%s.%i.%i' % (_bcd2str(major),minor,patch)
if sysa:
machine = {0x1: '68k',
0x2: 'PowerPC',
0xa: 'i386'}.get(sysa,'')
versioninfo=('', '', '')
return release,versioninfo,machine
def _mac_ver_xml():
fn = '/System/Library/CoreServices/SystemVersion.plist'
if not os.path.exists(fn):
return None
try:
import plistlib
except ImportError:
return None
pl = plistlib.readPlist(fn)
release = pl['ProductVersion']
versioninfo=('', '', '')
machine = os.uname().machine
if machine in ('ppc', 'Power Macintosh'):
# for compatibility with the gestalt based code
machine = 'PowerPC'
return release,versioninfo,machine
def mac_ver(release='',versioninfo=('','',''),machine=''):
""" Get MacOS version information and return it as tuple (release,
versioninfo, machine) with versioninfo being a tuple (version,
dev_stage, non_release_version).
Entries which cannot be determined are set to the parameter values
which default to ''. All tuple entries are strings.
"""
# First try reading the information from an XML file which should
# always be present
info = _mac_ver_xml()
if info is not None:
return info
# If that doesn't work for some reason fall back to reading the
# information using gestalt calls.
info = _mac_ver_gestalt()
if info is not None:
return info
# If that also doesn't work return the default values
return release,versioninfo,machine
def _java_getprop(name,default):
from java.lang import System
try:
value = System.getProperty(name)
if value is None:
return default
return value
except AttributeError:
return default
def java_ver(release='',vendor='',vminfo=('','',''),osinfo=('','','')):
""" Version interface for Jython.
Returns a tuple (release,vendor,vminfo,osinfo) with vminfo being
a tuple (vm_name,vm_release,vm_vendor) and osinfo being a
tuple (os_name,os_version,os_arch).
Values which cannot be determined are set to the defaults
given as parameters (which all default to '').
"""
# Import the needed APIs
try:
import java.lang
except ImportError:
return release,vendor,vminfo,osinfo
vendor = _java_getprop('java.vendor', vendor)
release = _java_getprop('java.version', release)
vm_name, vm_release, vm_vendor = vminfo
vm_name = _java_getprop('java.vm.name', vm_name)
vm_vendor = _java_getprop('java.vm.vendor', vm_vendor)
vm_release = _java_getprop('java.vm.version', vm_release)
vminfo = vm_name, vm_release, vm_vendor
os_name, os_version, os_arch = osinfo
os_arch = _java_getprop('java.os.arch', os_arch)
os_name = _java_getprop('java.os.name', os_name)
os_version = _java_getprop('java.os.version', os_version)
osinfo = os_name, os_version, os_arch
return release, vendor, vminfo, osinfo
### System name aliasing
def system_alias(system,release,version):
""" Returns (system,release,version) aliased to common
marketing names used for some systems.
It also does some reordering of the information in some cases
where it would otherwise cause confusion.
"""
if system == 'Rhapsody':
# Apple's BSD derivative
# XXX How can we determine the marketing release number ?
return 'MacOS X Server',system+release,version
elif system == 'SunOS':
# Sun's OS
if release < '5':
# These releases use the old name SunOS
return system,release,version
# Modify release (marketing release = SunOS release - 3)
l = release.split('.')
if l:
try:
major = int(l[0])
except ValueError:
pass
else:
major = major - 3
l[0] = str(major)
release = '.'.join(l)
if release < '6':
system = 'Solaris'
else:
# XXX Whatever the new SunOS marketing name is...
system = 'Solaris'
elif system == 'IRIX64':
# IRIX reports IRIX64 on platforms with 64-bit support; yet it
# is really a version and not a different platform, since 32-bit
# apps are also supported..
system = 'IRIX'
if version:
version = version + ' (64bit)'
else:
version = '64bit'
elif system in ('win32','win16'):
# In case one of the other tricks
system = 'Windows'
return system,release,version
### Various internal helpers
def _platform(*args):
""" Helper to format the platform string in a filename
compatible format e.g. "system-version-machine".
"""
# Format the platform string
platform = '-'.join(x.strip() for x in filter(len, args))
# Cleanup some possible filename obstacles...
platform = platform.replace(' ','_')
platform = platform.replace('/','-')
platform = platform.replace('\\','-')
platform = platform.replace(':','-')
platform = platform.replace(';','-')
platform = platform.replace('"','-')
platform = platform.replace('(','-')
platform = platform.replace(')','-')
# No need to report 'unknown' information...
platform = platform.replace('unknown','')
# Fold '--'s and remove trailing '-'
while 1:
cleaned = platform.replace('--','-')
if cleaned == platform:
break
platform = cleaned
while platform[-1] == '-':
platform = platform[:-1]
return platform
def _node(default=''):
""" Helper to determine the node name of this machine.
"""
try:
import socket
except ImportError:
# No sockets...
return default
try:
return socket.gethostname()
except socket.error:
# Still not working...
return default
def _follow_symlinks(filepath):
""" In case filepath is a symlink, follow it until a
real file is reached.
"""
filepath = os.path.abspath(filepath)
while os.path.islink(filepath):
filepath = os.path.normpath(
os.path.join(os.path.dirname(filepath),os.readlink(filepath)))
return filepath
def _syscmd_uname(option,default=''):
""" Interface to the system's uname command.
"""
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
try:
f = os.popen('uname %s 2> %s' % (option, DEV_NULL))
except (AttributeError,os.error):
return default
output = f.read().strip()
rc = f.close()
if not output or rc:
return default
else:
return output
def _syscmd_file(target,default=''):
""" Interface to the system's file command.
The function uses the -b option of the file command to have it
omit the filename in its output. Follow the symlinks. It returns
default in case the command should fail.
"""
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
target = _follow_symlinks(target)
try:
proc = subprocess.Popen(['file', target],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except (AttributeError,os.error):
return default
output = proc.communicate()[0].decode('latin-1')
rc = proc.wait()
if not output or rc:
return default
else:
return output
### Information about the used architecture
# Default values for architecture; non-empty strings override the
# defaults given as parameters
_default_architecture = {
'win32': ('','WindowsPE'),
'win16': ('','Windows'),
'dos': ('','MSDOS'),
}
def architecture(executable=sys.executable,bits='',linkage=''):
""" Queries the given executable (defaults to the Python interpreter
binary) for various architecture information.
Returns a tuple (bits,linkage) which contains information about
the bit architecture and the linkage format used for the
executable. Both values are returned as strings.
Values that cannot be determined are returned as given by the
parameter presets. If bits is given as '', the sizeof(pointer)
(or sizeof(long) on Python version < 1.5.2) is used as
indicator for the supported pointer size.
The function relies on the system's "file" command to do the
actual work. This is available on most if not all Unix
platforms. On some non-Unix platforms where the "file" command
does not exist and the executable is set to the Python interpreter
binary defaults from _default_architecture are used.
"""
# Use the sizeof(pointer) as default number of bits if nothing
# else is given as default.
if not bits:
import struct
try:
size = struct.calcsize('P')
except struct.error:
# Older installations can only query longs
size = struct.calcsize('l')
bits = str(size*8) + 'bit'
# Get data from the 'file' system command
if executable:
fileout = _syscmd_file(executable, '')
else:
fileout = ''
if not fileout and \
executable == sys.executable:
# "file" command did not return anything; we'll try to provide
# some sensible defaults then...
if sys.platform in _default_architecture:
b,l = _default_architecture[sys.platform]
if b:
bits = b
if l:
linkage = l
return bits,linkage
if 'executable' not in fileout:
# Format not supported
return bits,linkage
# Bits
if '32-bit' in fileout:
bits = '32bit'
elif 'N32' in fileout:
# On Irix only
bits = 'n32bit'
elif '64-bit' in fileout:
bits = '64bit'
# Linkage
if 'ELF' in fileout:
linkage = 'ELF'
elif 'PE' in fileout:
# E.g. Windows uses this format
if 'Windows' in fileout:
linkage = 'WindowsPE'
else:
linkage = 'PE'
elif 'COFF' in fileout:
linkage = 'COFF'
elif 'MS-DOS' in fileout:
linkage = 'MSDOS'
else:
# XXX the A.OUT format also falls under this class...
pass
return bits,linkage
### Portable uname() interface
uname_result = collections.namedtuple("uname_result",
"system node release version machine processor")
_uname_cache = None
def uname():
""" Fairly portable uname interface. Returns a tuple
of strings (system,node,release,version,machine,processor)
identifying the underlying platform.
Note that unlike the os.uname function this also returns
possible processor information as an additional tuple entry.
Entries which cannot be determined are set to ''.
"""
global _uname_cache
no_os_uname = 0
if _uname_cache is not None:
return _uname_cache
processor = ''
# Get some infos from the builtin os.uname API...
try:
system,node,release,version,machine = os.uname()
except AttributeError:
no_os_uname = 1
if no_os_uname or not list(filter(None, (system, node, release, version, machine))):
# Hmm, no there is either no uname or uname has returned
#'unknowns'... we'll have to poke around the system then.
if no_os_uname:
system = sys.platform
release = ''
version = ''
node = _node()
machine = ''
use_syscmd_ver = 1
# Try win32_ver() on win32 platforms
if system == 'win32':
release,version,csd,ptype = win32_ver()
if release and version:
use_syscmd_ver = 0
# Try to use the PROCESSOR_* environment variables
# available on Win XP and later; see
# http://support.microsoft.com/kb/888731 and
# http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM
if not machine:
# WOW64 processes mask the native architecture
if "PROCESSOR_ARCHITEW6432" in os.environ:
machine = os.environ.get("PROCESSOR_ARCHITEW6432", '')
else:
machine = os.environ.get('PROCESSOR_ARCHITECTURE', '')
if not processor:
processor = os.environ.get('PROCESSOR_IDENTIFIER', machine)
# Try the 'ver' system command available on some
# platforms
if use_syscmd_ver:
system,release,version = _syscmd_ver(system)
# Normalize system to what win32_ver() normally returns
# (_syscmd_ver() tends to return the vendor name as well)
if system == 'Microsoft Windows':
system = 'Windows'
elif system == 'Microsoft' and release == 'Windows':
# Under Windows Vista and Windows Server 2008,
# Microsoft changed the output of the ver command. The
# release is no longer printed. This causes the
# system and release to be misidentified.
system = 'Windows'
if '6.0' == version[:3]:
release = 'Vista'
else:
release = ''
# In case we still don't know anything useful, we'll try to
# help ourselves
if system in ('win32','win16'):
if not version:
if system == 'win32':
version = '32bit'
else:
version = '16bit'
system = 'Windows'
elif system[:4] == 'java':
release,vendor,vminfo,osinfo = java_ver()
system = 'Java'
version = ', '.join(vminfo)
if not version:
version = vendor
# System specific extensions
if system == 'OpenVMS':
# OpenVMS seems to have release and version mixed up
if not release or release == '0':
release = version
version = ''
# Get processor information
try:
import vms_lib
except ImportError:
pass
else:
csid, cpu_number = vms_lib.getsyi('SYI$_CPU',0)
if (cpu_number >= 128):
processor = 'Alpha'
else:
processor = 'VAX'
if not processor:
# Get processor information from the uname system command
processor = _syscmd_uname('-p','')
#If any unknowns still exist, replace them with ''s, which are more portable
if system == 'unknown':
system = ''
if node == 'unknown':
node = ''
if release == 'unknown':
release = ''
if version == 'unknown':
version = ''
if machine == 'unknown':
machine = ''
if processor == 'unknown':
processor = ''
# normalize name
if system == 'Microsoft' and release == 'Windows':
system = 'Windows'
release = 'Vista'
_uname_cache = uname_result(system,node,release,version,machine,processor)
return _uname_cache
### Direct interfaces to some of the uname() return values
def system():
""" Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'.
An empty string is returned if the value cannot be determined.
"""
return uname().system
def node():
""" Returns the computer's network name (which may not be fully
qualified)
An empty string is returned if the value cannot be determined.
"""
return uname().node
def release():
""" Returns the system's release, e.g. '2.2.0' or 'NT'
An empty string is returned if the value cannot be determined.
"""
return uname().release
def version():
""" Returns the system's release version, e.g. '#3 on degas'
An empty string is returned if the value cannot be determined.
"""
return uname().version
def machine():
""" Returns the machine type, e.g. 'i386'
An empty string is returned if the value cannot be determined.
"""
return uname().machine
def processor():
""" Returns the (true) processor name, e.g. 'amdk6'
An empty string is returned if the value cannot be
determined. Note that many platforms do not provide this
information or simply return the same value as for machine(),
e.g. NetBSD does this.
"""
return uname().processor
### Various APIs for extracting information from sys.version
_sys_version_parser = re.compile(
r'([\w.+]+)\s*'
'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
'\[([^\]]+)\]?', re.ASCII)
_ironpython_sys_version_parser = re.compile(
r'IronPython\s*'
'([\d\.]+)'
'(?: \(([\d\.]+)\))?'
' on (.NET [\d\.]+)', re.ASCII)
# IronPython covering 2.6 and 2.7
_ironpython26_sys_version_parser = re.compile(
r'([\d.]+)\s*'
'\(IronPython\s*'
'[\d.]+\s*'
'\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)'
)
_pypy_sys_version_parser = re.compile(
r'([\w.+]+)\s*'
'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
'\[PyPy [^\]]+\]?')
_sys_version_cache = {}
def _sys_version(sys_version=None):
""" Returns a parsed version of Python's sys.version as tuple
(name, version, branch, revision, buildno, builddate, compiler)
referring to the Python implementation name, version, branch,
revision, build number, build date/time as string and the compiler
identification string.
Note that unlike the Python sys.version, the returned value
for the Python version will always include the patchlevel (it
defaults to '.0').
The function returns empty strings for tuple entries that
cannot be determined.
sys_version may be given to parse an alternative version
string, e.g. if the version was read from a different Python
interpreter.
"""
# Get the Python version
if sys_version is None:
sys_version = sys.version
# Try the cache first
result = _sys_version_cache.get(sys_version, None)
if result is not None:
return result
# Parse it
if 'Brython' in sys_version:
# IronPython
name = 'Brython'
_parser=re.compile("^(\d+\.\d+\.\d+)[^[]+\[(.*)\]")
match=_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Brython sys.version: %s' %
repr(sys_version))
#version, alt_version, compiler = match.groups()
version, compiler = match.groups()
alt_version = ''
buildno = ''
builddate = ''
elif 'IronPython' in sys_version:
# IronPython
name = 'IronPython'
if sys_version.startswith('IronPython'):
match = _ironpython_sys_version_parser.match(sys_version)
else:
match = _ironpython26_sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse IronPython sys.version: %s' %
repr(sys_version))
version, alt_version, compiler = match.groups()
buildno = ''
builddate = ''
elif sys.platform.startswith('java'):
# Jython
name = 'Jython'
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Jython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, _ = match.groups()
compiler = sys.platform
elif "PyPy" in sys_version:
# PyPy
name = "PyPy"
match = _pypy_sys_version_parser.match(sys_version)
if match is None:
raise ValueError("failed to parse PyPy sys.version: %s" %
repr(sys_version))
version, buildno, builddate, buildtime = match.groups()
compiler = ""
else:
# CPython
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse CPython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, compiler = \
match.groups()
name = 'CPython'
builddate = builddate + ' ' + buildtime
if hasattr(sys, '_mercurial'):
_, branch, revision = sys._mercurial
elif hasattr(sys, 'subversion'):
# sys.subversion was added in Python 2.5
_, branch, revision = sys.subversion
else:
branch = ''
revision = ''
# Add the patchlevel version if missing
l = version.split('.')
if len(l) == 2:
l.append('0')
version = '.'.join(l)
# Build and cache the result
result = (name, version, branch, revision, buildno, builddate, compiler)
_sys_version_cache[sys_version] = result
return result
def python_implementation():
""" Returns a string identifying the Python implementation.
Currently, the following implementations are identified:
'CPython' (C implementation of Python),
'IronPython' (.NET implementation of Python),
'Jython' (Java implementation of Python),
'PyPy' (Python implementation of Python).
"""
return _sys_version()[0]
def python_version():
""" Returns the Python version as string 'major.minor.patchlevel'
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return _sys_version()[1]
def python_version_tuple():
""" Returns the Python version as tuple (major, minor, patchlevel)
of strings.
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return tuple(_sys_version()[1].split('.'))
def python_branch():
""" Returns a string identifying the Python implementation
branch.
For CPython this is the Subversion branch from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[2]
def python_revision():
""" Returns a string identifying the Python implementation
revision.
For CPython this is the Subversion revision from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[3]
def python_build():
""" Returns a tuple (buildno, builddate) stating the Python
build number and date as strings.
"""
return _sys_version()[4:6]
def python_compiler():
""" Returns a string identifying the compiler used for compiling
Python.
"""
return _sys_version()[6]
### The Opus Magnum of platform strings :-)
_platform_cache = {}
def platform(aliased=0, terse=0):
""" Returns a single string identifying the underlying platform
with as much useful information as possible (but no more :).
The output is intended to be human readable rather than
machine parseable. It may look different on different
platforms and this is intended.
If "aliased" is true, the function will use aliases for
various platforms that report system names which differ from
their common names, e.g. SunOS will be reported as
Solaris. The system_alias() function is used to implement
this.
Setting terse to true causes the function to return only the
absolute minimum information needed to identify the platform.
"""
result = _platform_cache.get((aliased, terse), None)
if result is not None:
return result
# Get uname information and then apply platform specific cosmetics
# to it...
system,node,release,version,machine,processor = uname()
if machine == processor:
processor = ''
if aliased:
system,release,version = system_alias(system,release,version)
if system == 'Windows':
# MS platforms
rel,vers,csd,ptype = win32_ver(version)
if terse:
platform = _platform(system,release)
else:
platform = _platform(system,release,version,csd)
elif system in ('Linux',):
# Linux based systems
distname,distversion,distid = dist('')
if distname and not terse:
platform = _platform(system,release,machine,processor,
'with',
distname,distversion,distid)
else:
# If the distribution name is unknown check for libc vs. glibc
libcname,libcversion = libc_ver(sys.executable)
platform = _platform(system,release,machine,processor,
'with',
libcname+libcversion)
elif system == 'Java':
# Java platforms
r,v,vminfo,(os_name,os_version,os_arch) = java_ver()
if terse or not os_name:
platform = _platform(system,release,version)
else:
platform = _platform(system,release,version,
'on',
os_name,os_version,os_arch)
elif system == 'MacOS':
# MacOS platforms
if terse:
platform = _platform(system,release)
else:
platform = _platform(system,release,machine)
else:
# Generic handler
if terse:
platform = _platform(system,release)
else:
bits,linkage = architecture(sys.executable)
platform = _platform(system,release,machine,processor,bits,linkage)
_platform_cache[(aliased, terse)] = platform
return platform
### Command line interface
if __name__ == '__main__':
# Default is to print the aliased verbose platform string
terse = ('terse' in sys.argv or '--terse' in sys.argv)
aliased = (not 'nonaliased' in sys.argv and not '--nonaliased' in sys.argv)
print(platform(aliased,terse))
sys.exit(0)
| gpl-3.0 | 6,549,492,661,857,998,000 | 31.843529 | 88 | 0.585245 | false |
SuriyaaKudoIsc/olympia | scripts/update/update.py | 1 | 5688 | import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from commander.deploy import hostgroups, task
import commander_settings as settings
_src_dir = lambda *p: os.path.join(settings.SRC_DIR, *p)
VIRTUALENV = os.path.join(os.path.dirname(settings.SRC_DIR), 'venv')
@task
def create_virtualenv(ctx):
with ctx.lcd(settings.SRC_DIR):
status = ctx.local('git diff HEAD@{1} HEAD --name-only')
if 'requirements/' in status.out:
venv = VIRTUALENV
if not venv.startswith('/data'):
raise Exception('venv must start with /data')
ctx.local('rm -rf %s' % venv)
ctx.local('virtualenv --distribute --never-download %s' % venv)
ctx.local('%s/bin/pip install --exists-action=w --no-deps --no-index '
'--download-cache=/tmp/pip-cache -f %s '
'-r %s/requirements/prod.txt' %
(venv, settings.PYREPO, settings.SRC_DIR))
if getattr(settings, 'LOAD_TESTING', False):
ctx.local('%s/bin/pip install --exists-action=w --no-deps '
'--no-index --download-cache=/tmp/pip-cache -f %s '
'-r %s/requirements/load.txt' %
(venv, settings.PYREPO, settings.SRC_DIR))
# make sure this always runs
ctx.local("rm -f %s/lib/python2.6/no-global-site-packages.txt" % venv)
ctx.local("%s/bin/python /usr/bin/virtualenv --relocatable %s" %
(venv, venv))
@task
def update_locales(ctx):
with ctx.lcd(_src_dir("locale")):
ctx.local("svn revert -R .")
ctx.local("svn up")
ctx.local("./compile-mo.sh .")
@task
def loadtest(ctx, repo=''):
if hasattr(settings, 'MARTEAU'):
os.environ['MACAUTH_USER'] = settings.MARTEAU_USER
os.environ['MACAUTH_SECRET'] = settings.MARTEAU_SECRET
ctx.local('%s %s --server %s' % (settings.MARTEAU, repo,
settings.MARTEAU_SERVER))
@task
def update_products(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local('%s manage.py update_product_details' % settings.PYTHON)
@task
def compress_assets(ctx, arg=''):
with ctx.lcd(settings.SRC_DIR):
ctx.local("%s manage.py compress_assets -t %s" % (settings.PYTHON,
arg))
@task
def collectstatic(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local("%s manage.py collectstatic --noinput" % (settings.PYTHON, ))
@task
def schematic(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local("%s %s/bin/schematic migrations" %
(settings.PYTHON, VIRTUALENV))
@task
def update_code(ctx, ref='origin/master'):
with ctx.lcd(settings.SRC_DIR):
ctx.local("git fetch && git fetch -t")
ctx.local("git reset --hard %s" % ref)
@task
def update_info(ctx, ref='origin/master'):
with ctx.lcd(settings.SRC_DIR):
ctx.local("git status")
ctx.local("git log -1")
ctx.local("/bin/bash -c "
"'source /etc/bash_completion.d/git && __git_ps1'")
ctx.local('git show -s {0} --pretty="format:%h" '
'> media/git-rev.txt'.format(ref))
@task
def checkin_changes(ctx):
ctx.local(settings.DEPLOY_SCRIPT)
@task
def disable_cron(ctx):
ctx.local("rm -f /etc/cron.d/%s" % settings.CRON_NAME)
@task
def install_cron(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local('%s ./scripts/crontab/gen-cron.py '
'-z %s -u apache -p %s > /etc/cron.d/.%s' %
(settings.PYTHON, settings.SRC_DIR,
settings.PYTHON, settings.CRON_NAME))
ctx.local('mv /etc/cron.d/.%s /etc/cron.d/%s' % (settings.CRON_NAME,
settings.CRON_NAME))
@hostgroups(settings.WEB_HOSTGROUP,
remote_kwargs={'ssh_key': settings.SSH_KEY})
def sync_code(ctx):
ctx.remote(settings.REMOTE_UPDATE_SCRIPT)
@hostgroups(settings.WEB_HOSTGROUP,
remote_kwargs={'ssh_key': settings.SSH_KEY})
def restart_workers(ctx):
for gservice in settings.GUNICORN:
ctx.remote("/sbin/service %s graceful" % gservice)
for gservice in getattr(settings, 'MULTI_GUNICORN', []):
ctx.remote("/sbin/service %s-a graceful" % gservice)
ctx.remote("/sbin/service %s-b graceful" % gservice)
@task
def deploy_app(ctx):
sync_code()
restart_workers()
@hostgroups(settings.CELERY_HOSTGROUP,
remote_kwargs={'ssh_key': settings.SSH_KEY})
def update_celery(ctx):
ctx.remote(settings.REMOTE_UPDATE_SCRIPT)
if getattr(settings, 'CELERY_SERVICE_PREFIX', False):
ctx.remote("/sbin/service %s restart" % settings.CELERY_SERVICE_PREFIX)
ctx.remote("/sbin/service %s-devhub restart" %
settings.CELERY_SERVICE_PREFIX)
ctx.remote("/sbin/service %s-priority restart" %
settings.CELERY_SERVICE_PREFIX)
@task
def deploy(ctx):
install_cron()
checkin_changes()
deploy_app()
update_celery()
with ctx.lcd(settings.SRC_DIR):
ctx.local('%s manage.py cron cleanup_validation_results' %
settings.PYTHON)
@task
def pre_update(ctx, ref=settings.UPDATE_REF):
ctx.local('date')
disable_cron()
update_code(ref)
update_info(ref)
@task
def update(ctx):
create_virtualenv()
update_locales()
update_products()
compress_assets()
collectstatic()
schematic()
with ctx.lcd(settings.SRC_DIR):
ctx.local('%s manage.py dump_apps' % settings.PYTHON)
ctx.local('%s manage.py statsd_ping --key=update' % settings.PYTHON)
| bsd-3-clause | -2,612,005,332,908,767,000 | 28.936842 | 79 | 0.594233 | false |
windygu/xbmc-addon | plugin.video.xunlei/mechanize/_html.py | 132 | 20888 | """HTML handling.
Copyright 2003-2006 John J. Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it under
the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import codecs
import copy
import htmlentitydefs
import re
import _sgmllib_copy as sgmllib
import _beautifulsoup
import _form
from _headersutil import split_header_words, is_html as _is_html
import _request
import _rfc3986
DEFAULT_ENCODING = "latin-1"
COMPRESS_RE = re.compile(r"\s+")
class CachingGeneratorFunction(object):
"""Caching wrapper around a no-arguments iterable."""
def __init__(self, iterable):
self._cache = []
# wrap iterable to make it non-restartable (otherwise, repeated
# __call__ would give incorrect results)
self._iterator = iter(iterable)
def __call__(self):
cache = self._cache
for item in cache:
yield item
for item in self._iterator:
cache.append(item)
yield item
class EncodingFinder:
def __init__(self, default_encoding):
self._default_encoding = default_encoding
def encoding(self, response):
# HTTPEquivProcessor may be in use, so both HTTP and HTTP-EQUIV
# headers may be in the response. HTTP-EQUIV headers come last,
# so try in order from first to last.
for ct in response.info().getheaders("content-type"):
for k, v in split_header_words([ct])[0]:
if k == "charset":
encoding = v
try:
codecs.lookup(v)
except LookupError:
continue
else:
return encoding
return self._default_encoding
class ResponseTypeFinder:
def __init__(self, allow_xhtml):
self._allow_xhtml = allow_xhtml
def is_html(self, response, encoding):
ct_hdrs = response.info().getheaders("content-type")
url = response.geturl()
# XXX encoding
return _is_html(ct_hdrs, url, self._allow_xhtml)
class Args(object):
# idea for this argument-processing trick is from Peter Otten
def __init__(self, args_map):
self.__dict__["dictionary"] = dict(args_map)
def __getattr__(self, key):
try:
return self.dictionary[key]
except KeyError:
return getattr(self.__class__, key)
def __setattr__(self, key, value):
if key == "dictionary":
raise AttributeError()
self.dictionary[key] = value
def form_parser_args(
select_default=False,
form_parser_class=None,
request_class=None,
backwards_compat=False,
):
return Args(locals())
class Link:
def __init__(self, base_url, url, text, tag, attrs):
assert None not in [url, tag, attrs]
self.base_url = base_url
self.absolute_url = _rfc3986.urljoin(base_url, url)
self.url, self.text, self.tag, self.attrs = url, text, tag, attrs
def __cmp__(self, other):
try:
for name in "url", "text", "tag", "attrs":
if getattr(self, name) != getattr(other, name):
return -1
except AttributeError:
return -1
return 0
def __repr__(self):
return "Link(base_url=%r, url=%r, text=%r, tag=%r, attrs=%r)" % (
self.base_url, self.url, self.text, self.tag, self.attrs)
class LinksFactory:
def __init__(self,
link_parser_class=None,
link_class=Link,
urltags=None,
):
import _pullparser
if link_parser_class is None:
link_parser_class = _pullparser.TolerantPullParser
self.link_parser_class = link_parser_class
self.link_class = link_class
if urltags is None:
urltags = {
"a": "href",
"area": "href",
"frame": "src",
"iframe": "src",
}
self.urltags = urltags
self._response = None
self._encoding = None
def set_response(self, response, base_url, encoding):
self._response = response
self._encoding = encoding
self._base_url = base_url
def links(self):
"""Return an iterator that provides links of the document."""
response = self._response
encoding = self._encoding
base_url = self._base_url
p = self.link_parser_class(response, encoding=encoding)
try:
for token in p.tags(*(self.urltags.keys()+["base"])):
if token.type == "endtag":
continue
if token.data == "base":
base_href = dict(token.attrs).get("href")
if base_href is not None:
base_url = base_href
continue
attrs = dict(token.attrs)
tag = token.data
text = None
# XXX use attr_encoding for ref'd doc if that doc does not
# provide one by other means
#attr_encoding = attrs.get("charset")
url = attrs.get(self.urltags[tag]) # XXX is "" a valid URL?
if not url:
# Probably an <A NAME="blah"> link or <AREA NOHREF...>.
# For our purposes a link is something with a URL, so
# ignore this.
continue
url = _rfc3986.clean_url(url, encoding)
if tag == "a":
if token.type != "startendtag":
# hmm, this'd break if end tag is missing
text = p.get_compressed_text(("endtag", tag))
# but this doesn't work for e.g.
# <a href="blah"><b>Andy</b></a>
#text = p.get_compressed_text()
yield Link(base_url, url, text, tag, token.attrs)
except sgmllib.SGMLParseError, exc:
raise _form.ParseError(exc)
class FormsFactory:
"""Makes a sequence of objects satisfying HTMLForm interface.
After calling .forms(), the .global_form attribute is a form object
containing all controls not a descendant of any FORM element.
For constructor argument docs, see ParseResponse argument docs.
"""
def __init__(self,
select_default=False,
form_parser_class=None,
request_class=None,
backwards_compat=False,
):
self.select_default = select_default
if form_parser_class is None:
form_parser_class = _form.FormParser
self.form_parser_class = form_parser_class
if request_class is None:
request_class = _request.Request
self.request_class = request_class
self.backwards_compat = backwards_compat
self._response = None
self.encoding = None
self.global_form = None
def set_response(self, response, encoding):
self._response = response
self.encoding = encoding
self.global_form = None
def forms(self):
encoding = self.encoding
forms = _form.ParseResponseEx(
self._response,
select_default=self.select_default,
form_parser_class=self.form_parser_class,
request_class=self.request_class,
encoding=encoding,
_urljoin=_rfc3986.urljoin,
_urlparse=_rfc3986.urlsplit,
_urlunparse=_rfc3986.urlunsplit,
)
self.global_form = forms[0]
return forms[1:]
class TitleFactory:
def __init__(self):
self._response = self._encoding = None
def set_response(self, response, encoding):
self._response = response
self._encoding = encoding
def _get_title_text(self, parser):
import _pullparser
text = []
tok = None
while 1:
try:
tok = parser.get_token()
except _pullparser.NoMoreTokensError:
break
if tok.type == "data":
text.append(str(tok))
elif tok.type == "entityref":
t = unescape("&%s;" % tok.data,
parser._entitydefs, parser.encoding)
text.append(t)
elif tok.type == "charref":
t = unescape_charref(tok.data, parser.encoding)
text.append(t)
elif tok.type in ["starttag", "endtag", "startendtag"]:
tag_name = tok.data
if tok.type == "endtag" and tag_name == "title":
break
text.append(str(tok))
return COMPRESS_RE.sub(" ", "".join(text).strip())
def title(self):
import _pullparser
p = _pullparser.TolerantPullParser(
self._response, encoding=self._encoding)
try:
try:
p.get_tag("title")
except _pullparser.NoMoreTokensError:
return None
else:
return self._get_title_text(p)
except sgmllib.SGMLParseError, exc:
raise _form.ParseError(exc)
def unescape(data, entities, encoding):
if data is None or "&" not in data:
return data
def replace_entities(match):
ent = match.group()
if ent[1] == "#":
return unescape_charref(ent[2:-1], encoding)
repl = entities.get(ent[1:-1])
if repl is not None:
repl = unichr(repl)
if type(repl) != type(""):
try:
repl = repl.encode(encoding)
except UnicodeError:
repl = ent
else:
repl = ent
return repl
return re.sub(r"&#?[A-Za-z0-9]+?;", replace_entities, data)
def unescape_charref(data, encoding):
name, base = data, 10
if name.startswith("x"):
name, base= name[1:], 16
uc = unichr(int(name, base))
if encoding is None:
return uc
else:
try:
repl = uc.encode(encoding)
except UnicodeError:
repl = "&#%s;" % data
return repl
class MechanizeBs(_beautifulsoup.BeautifulSoup):
_entitydefs = htmlentitydefs.name2codepoint
# don't want the magic Microsoft-char workaround
PARSER_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda(x):x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda(x):'<!' + x.group(1) + '>')
]
def __init__(self, encoding, text=None, avoidParserProblems=True,
initialTextIsEverything=True):
self._encoding = encoding
_beautifulsoup.BeautifulSoup.__init__(
self, text, avoidParserProblems, initialTextIsEverything)
def handle_charref(self, ref):
t = unescape("&#%s;"%ref, self._entitydefs, self._encoding)
self.handle_data(t)
def handle_entityref(self, ref):
t = unescape("&%s;"%ref, self._entitydefs, self._encoding)
self.handle_data(t)
def unescape_attrs(self, attrs):
escaped_attrs = []
for key, val in attrs:
val = unescape(val, self._entitydefs, self._encoding)
escaped_attrs.append((key, val))
return escaped_attrs
class RobustLinksFactory:
compress_re = COMPRESS_RE
def __init__(self,
link_parser_class=None,
link_class=Link,
urltags=None,
):
if link_parser_class is None:
link_parser_class = MechanizeBs
self.link_parser_class = link_parser_class
self.link_class = link_class
if urltags is None:
urltags = {
"a": "href",
"area": "href",
"frame": "src",
"iframe": "src",
}
self.urltags = urltags
self._bs = None
self._encoding = None
self._base_url = None
def set_soup(self, soup, base_url, encoding):
self._bs = soup
self._base_url = base_url
self._encoding = encoding
def links(self):
bs = self._bs
base_url = self._base_url
encoding = self._encoding
for ch in bs.recursiveChildGenerator():
if (isinstance(ch, _beautifulsoup.Tag) and
ch.name in self.urltags.keys()+["base"]):
link = ch
attrs = bs.unescape_attrs(link.attrs)
attrs_dict = dict(attrs)
if link.name == "base":
base_href = attrs_dict.get("href")
if base_href is not None:
base_url = base_href
continue
url_attr = self.urltags[link.name]
url = attrs_dict.get(url_attr)
if not url:
continue
url = _rfc3986.clean_url(url, encoding)
text = link.fetchText(lambda t: True)
if not text:
# follow _pullparser's weird behaviour rigidly
if link.name == "a":
text = ""
else:
text = None
else:
text = self.compress_re.sub(" ", " ".join(text).strip())
yield Link(base_url, url, text, link.name, attrs)
class RobustFormsFactory(FormsFactory):
def __init__(self, *args, **kwds):
args = form_parser_args(*args, **kwds)
if args.form_parser_class is None:
args.form_parser_class = _form.RobustFormParser
FormsFactory.__init__(self, **args.dictionary)
def set_response(self, response, encoding):
self._response = response
self.encoding = encoding
class RobustTitleFactory:
def __init__(self):
self._bs = self._encoding = None
def set_soup(self, soup, encoding):
self._bs = soup
self._encoding = encoding
def title(self):
title = self._bs.first("title")
if title == _beautifulsoup.Null:
return None
else:
inner_html = "".join([str(node) for node in title.contents])
return COMPRESS_RE.sub(" ", inner_html.strip())
class Factory:
"""Factory for forms, links, etc.
This interface may expand in future.
Public methods:
set_request_class(request_class)
set_response(response)
forms()
links()
Public attributes:
Note that accessing these attributes may raise ParseError.
encoding: string specifying the encoding of response if it contains a text
document (this value is left unspecified for documents that do not have
an encoding, e.g. an image file)
is_html: true if response contains an HTML document (XHTML may be
regarded as HTML too)
title: page title, or None if no title or not HTML
global_form: form object containing all controls that are not descendants
of any FORM element, or None if the forms_factory does not support
supplying a global form
"""
LAZY_ATTRS = ["encoding", "is_html", "title", "global_form"]
def __init__(self, forms_factory, links_factory, title_factory,
encoding_finder=EncodingFinder(DEFAULT_ENCODING),
response_type_finder=ResponseTypeFinder(allow_xhtml=False),
):
"""
Pass keyword arguments only.
default_encoding: character encoding to use if encoding cannot be
determined (or guessed) from the response. You should turn on
HTTP-EQUIV handling if you want the best chance of getting this right
without resorting to this default. The default value of this
parameter (currently latin-1) may change in future.
"""
self._forms_factory = forms_factory
self._links_factory = links_factory
self._title_factory = title_factory
self._encoding_finder = encoding_finder
self._response_type_finder = response_type_finder
self.set_response(None)
def set_request_class(self, request_class):
"""Set request class (mechanize.Request by default).
HTMLForm instances returned by .forms() will return instances of this
class when .click()ed.
"""
self._forms_factory.request_class = request_class
def set_response(self, response):
"""Set response.
The response must either be None or implement the same interface as
objects returned by mechanize.urlopen().
"""
self._response = response
self._forms_genf = self._links_genf = None
self._get_title = None
for name in self.LAZY_ATTRS:
try:
delattr(self, name)
except AttributeError:
pass
def __getattr__(self, name):
if name not in self.LAZY_ATTRS:
return getattr(self.__class__, name)
if name == "encoding":
self.encoding = self._encoding_finder.encoding(
copy.copy(self._response))
return self.encoding
elif name == "is_html":
self.is_html = self._response_type_finder.is_html(
copy.copy(self._response), self.encoding)
return self.is_html
elif name == "title":
if self.is_html:
self.title = self._title_factory.title()
else:
self.title = None
return self.title
elif name == "global_form":
self.forms()
return self.global_form
def forms(self):
"""Return iterable over HTMLForm-like objects.
Raises mechanize.ParseError on failure.
"""
# this implementation sets .global_form as a side-effect, for benefit
# of __getattr__ impl
if self._forms_genf is None:
try:
self._forms_genf = CachingGeneratorFunction(
self._forms_factory.forms())
except: # XXXX define exception!
self.set_response(self._response)
raise
self.global_form = getattr(
self._forms_factory, "global_form", None)
return self._forms_genf()
def links(self):
"""Return iterable over mechanize.Link-like objects.
Raises mechanize.ParseError on failure.
"""
if self._links_genf is None:
try:
self._links_genf = CachingGeneratorFunction(
self._links_factory.links())
except: # XXXX define exception!
self.set_response(self._response)
raise
return self._links_genf()
class DefaultFactory(Factory):
"""Based on sgmllib."""
def __init__(self, i_want_broken_xhtml_support=False):
Factory.__init__(
self,
forms_factory=FormsFactory(),
links_factory=LinksFactory(),
title_factory=TitleFactory(),
response_type_finder=ResponseTypeFinder(
allow_xhtml=i_want_broken_xhtml_support),
)
def set_response(self, response):
Factory.set_response(self, response)
if response is not None:
self._forms_factory.set_response(
copy.copy(response), self.encoding)
self._links_factory.set_response(
copy.copy(response), response.geturl(), self.encoding)
self._title_factory.set_response(
copy.copy(response), self.encoding)
class RobustFactory(Factory):
"""Based on BeautifulSoup, hopefully a bit more robust to bad HTML than is
DefaultFactory.
"""
def __init__(self, i_want_broken_xhtml_support=False,
soup_class=None):
Factory.__init__(
self,
forms_factory=RobustFormsFactory(),
links_factory=RobustLinksFactory(),
title_factory=RobustTitleFactory(),
response_type_finder=ResponseTypeFinder(
allow_xhtml=i_want_broken_xhtml_support),
)
if soup_class is None:
soup_class = MechanizeBs
self._soup_class = soup_class
def set_response(self, response):
Factory.set_response(self, response)
if response is not None:
data = response.read()
soup = self._soup_class(self.encoding, data)
self._forms_factory.set_response(
copy.copy(response), self.encoding)
self._links_factory.set_soup(
soup, response.geturl(), self.encoding)
self._title_factory.set_soup(soup, self.encoding)
| apache-2.0 | 7,856,159,933,690,499,000 | 32.208267 | 78 | 0.548209 | false |
bitifirefly/edx-platform | common/djangoapps/cache_toolbox/templatetags/cache_toolbox.py | 239 | 2059 | from django import template
from django.core.cache import cache
from django.template import Node, TemplateSyntaxError, Variable
from django.template import resolve_variable
register = template.Library()
class CacheNode(Node):
def __init__(self, nodelist, expire_time, key):
self.nodelist = nodelist
self.expire_time = Variable(expire_time)
self.key = key
def render(self, context):
key = resolve_variable(self.key, context)
expire_time = int(self.expire_time.resolve(context))
value = cache.get(key)
if value is None:
value = self.nodelist.render(context)
cache.set(key, value, expire_time)
return value
@register.tag
def cachedeterministic(parser, token):
"""
This will cache the contents of a template fragment for a given amount of
time, just like {% cache .. %} except that the key is deterministic and not
mangled or run through MD5.
Usage::
{% cachedeterministic [expire_time] [key] %}
.. some expensive processing ..
{% endcachedeterministic %}
"""
nodelist = parser.parse(('endcachedeterministic',))
parser.delete_first_token()
tokens = token.contents.split()
if len(tokens) != 3:
raise TemplateSyntaxError(u"'%r' tag requires 2 arguments." % tokens[0])
return CacheNode(nodelist, tokens[1], tokens[2])
class ShowIfCachedNode(Node):
def __init__(self, key):
self.key = key
def render(self, context):
key = resolve_variable(self.key, context)
return cache.get(key) or ''
@register.tag
def showifcached(parser, token):
"""
Show content if it exists in the cache, otherwise display nothing.
The key is entirely deterministic and not mangled or run through MD5 (cf.
{% cache %})
Usage::
{% showifcached [key] %}
"""
tokens = token.contents.split()
if len(tokens) != 2:
raise TemplateSyntaxError(u"'%r' tag requires 1 argument." % tokens[0])
return ShowIfCachedNode(tokens[1])
| agpl-3.0 | -5,734,013,720,264,495,000 | 27.205479 | 80 | 0.647402 | false |
g12mcgov/home-assistant | homeassistant/components/thermostat/demo.py | 19 | 2080 | """
homeassistant.components.thermostat.demo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Demo platform that offers a fake thermostat.
"""
from homeassistant.components.thermostat import ThermostatDevice
from homeassistant.const import TEMP_CELCIUS, TEMP_FAHRENHEIT
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the Demo thermostats. """
add_devices([
DemoThermostat("Nest", 21, TEMP_CELCIUS, False, 19),
DemoThermostat("Thermostat", 68, TEMP_FAHRENHEIT, True, 77),
])
# pylint: disable=too-many-arguments
class DemoThermostat(ThermostatDevice):
""" Represents a HeatControl thermostat. """
def __init__(self, name, target_temperature, unit_of_measurement,
away, current_temperature):
self._name = name
self._target_temperature = target_temperature
self._unit_of_measurement = unit_of_measurement
self._away = away
self._current_temperature = current_temperature
@property
def should_poll(self):
""" No polling needed for a demo thermostat. """
return False
@property
def name(self):
""" Returns the name. """
return self._name
@property
def unit_of_measurement(self):
""" Returns the unit of measurement. """
return self._unit_of_measurement
@property
def current_temperature(self):
""" Returns the current temperature. """
return self._current_temperature
@property
def target_temperature(self):
""" Returns the temperature we try to reach. """
return self._target_temperature
@property
def is_away_mode_on(self):
""" Returns if away mode is on. """
return self._away
def set_temperature(self, temperature):
""" Set new target temperature. """
self._target_temperature = temperature
def turn_away_mode_on(self):
""" Turns away mode on. """
self._away = True
def turn_away_mode_off(self):
""" Turns away mode off. """
self._away = False
| mit | 5,450,782,867,154,622,000 | 28.295775 | 69 | 0.622596 | false |
drewokane/seaborn | seaborn/tests/test_utils.py | 11 | 11338 | """Tests for plotting utilities."""
import warnings
import tempfile
import shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import nose
import nose.tools as nt
from nose.tools import assert_equal, raises
import numpy.testing as npt
import pandas.util.testing as pdt
from distutils.version import LooseVersion
pandas_has_categoricals = LooseVersion(pd.__version__) >= "0.15"
from pandas.util.testing import network
try:
from bs4 import BeautifulSoup
except ImportError:
BeautifulSoup = None
from . import PlotTestCase
from .. import utils, rcmod
from ..utils import get_dataset_names, load_dataset
a_norm = np.random.randn(100)
def test_pmf_hist_basics():
"""Test the function to return barplot args for pmf hist."""
out = utils.pmf_hist(a_norm)
assert_equal(len(out), 3)
x, h, w = out
assert_equal(len(x), len(h))
# Test simple case
a = np.arange(10)
x, h, w = utils.pmf_hist(a, 10)
nose.tools.assert_true(np.all(h == h[0]))
def test_pmf_hist_widths():
"""Test histogram width is correct."""
x, h, w = utils.pmf_hist(a_norm)
assert_equal(x[1] - x[0], w)
def test_pmf_hist_normalization():
"""Test that output data behaves like a PMF."""
x, h, w = utils.pmf_hist(a_norm)
nose.tools.assert_almost_equal(sum(h), 1)
nose.tools.assert_less_equal(h.max(), 1)
def test_pmf_hist_bins():
"""Test bin specification."""
x, h, w = utils.pmf_hist(a_norm, 20)
assert_equal(len(x), 20)
def test_ci_to_errsize():
"""Test behavior of ci_to_errsize."""
cis = [[.5, .5],
[1.25, 1.5]]
heights = [1, 1.5]
actual_errsize = np.array([[.5, 1],
[.25, 0]])
test_errsize = utils.ci_to_errsize(cis, heights)
npt.assert_array_equal(actual_errsize, test_errsize)
def test_desaturate():
"""Test color desaturation."""
out1 = utils.desaturate("red", .5)
assert_equal(out1, (.75, .25, .25))
out2 = utils.desaturate("#00FF00", .5)
assert_equal(out2, (.25, .75, .25))
out3 = utils.desaturate((0, 0, 1), .5)
assert_equal(out3, (.25, .25, .75))
out4 = utils.desaturate("red", .5)
assert_equal(out4, (.75, .25, .25))
@raises(ValueError)
def test_desaturation_prop():
"""Test that pct outside of [0, 1] raises exception."""
utils.desaturate("blue", 50)
def test_saturate():
"""Test performance of saturation function."""
out = utils.saturate((.75, .25, .25))
assert_equal(out, (1, 0, 0))
def test_iqr():
"""Test the IQR function."""
a = np.arange(5)
iqr = utils.iqr(a)
assert_equal(iqr, 2)
class TestSpineUtils(PlotTestCase):
sides = ["left", "right", "bottom", "top"]
outer_sides = ["top", "right"]
inner_sides = ["left", "bottom"]
offset = 10
original_position = ("outward", 0)
offset_position = ("outward", offset)
def test_despine(self):
f, ax = plt.subplots()
for side in self.sides:
nt.assert_true(ax.spines[side].get_visible())
utils.despine()
for side in self.outer_sides:
nt.assert_true(~ax.spines[side].get_visible())
for side in self.inner_sides:
nt.assert_true(ax.spines[side].get_visible())
utils.despine(**dict(zip(self.sides, [True] * 4)))
for side in self.sides:
nt.assert_true(~ax.spines[side].get_visible())
def test_despine_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(ax=ax2)
for side in self.sides:
nt.assert_true(ax1.spines[side].get_visible())
for side in self.outer_sides:
nt.assert_true(~ax2.spines[side].get_visible())
for side in self.inner_sides:
nt.assert_true(ax2.spines[side].get_visible())
def test_despine_with_offset(self):
f, ax = plt.subplots()
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.original_position)
utils.despine(ax=ax, offset=self.offset)
for side in self.sides:
is_visible = ax.spines[side].get_visible()
new_position = ax.spines[side].get_position()
if is_visible:
nt.assert_equal(new_position, self.offset_position)
else:
nt.assert_equal(new_position, self.original_position)
def test_despine_with_offset_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(offset=self.offset, ax=ax2)
for side in self.sides:
nt.assert_equal(ax1.spines[side].get_position(),
self.original_position)
if ax2.spines[side].get_visible():
nt.assert_equal(ax2.spines[side].get_position(),
self.offset_position)
else:
nt.assert_equal(ax2.spines[side].get_position(),
self.original_position)
def test_despine_trim_spines(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_xlim(.75, 3.25)
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
nt.assert_equal(bounds, (1, 3))
def test_despine_trim_inverted(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_ylim(.85, 3.15)
ax.invert_yaxis()
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
nt.assert_equal(bounds, (1, 3))
def test_despine_trim_noticks(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_yticks([])
utils.despine(trim=True)
nt.assert_equal(ax.get_yticks().size, 0)
def test_offset_spines_warns(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, ax = plt.subplots()
utils.offset_spines(offset=self.offset)
nt.assert_true('deprecated' in str(w[0].message))
nt.assert_true(issubclass(w[0].category, UserWarning))
def test_offset_spines(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, ax = plt.subplots()
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.original_position)
utils.offset_spines(offset=self.offset)
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.offset_position)
def test_offset_spines_specific_axes(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, (ax1, ax2) = plt.subplots(2, 1)
utils.offset_spines(offset=self.offset, ax=ax2)
for side in self.sides:
nt.assert_equal(ax1.spines[side].get_position(),
self.original_position)
nt.assert_equal(ax2.spines[side].get_position(),
self.offset_position)
def test_ticklabels_overlap():
rcmod.set()
f, ax = plt.subplots(figsize=(2, 2))
f.tight_layout() # This gets the Agg renderer working
assert not utils.axis_ticklabels_overlap(ax.get_xticklabels())
big_strings = "abcdefgh", "ijklmnop"
ax.set_xlim(-.5, 1.5)
ax.set_xticks([0, 1])
ax.set_xticklabels(big_strings)
assert utils.axis_ticklabels_overlap(ax.get_xticklabels())
x, y = utils.axes_ticklabels_overlap(ax)
assert x
assert not y
def test_categorical_order():
x = ["a", "c", "c", "b", "a", "d"]
y = [3, 2, 5, 1, 4]
order = ["a", "b", "c", "d"]
out = utils.categorical_order(x)
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(x, order)
nt.assert_equal(out, order)
out = utils.categorical_order(x, ["b", "a"])
nt.assert_equal(out, ["b", "a"])
out = utils.categorical_order(np.array(x))
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(pd.Series(x))
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(y)
nt.assert_equal(out, [1, 2, 3, 4, 5])
out = utils.categorical_order(np.array(y))
nt.assert_equal(out, [1, 2, 3, 4, 5])
out = utils.categorical_order(pd.Series(y))
nt.assert_equal(out, [1, 2, 3, 4, 5])
if pandas_has_categoricals:
x = pd.Categorical(x, order)
out = utils.categorical_order(x)
nt.assert_equal(out, list(x.categories))
x = pd.Series(x)
out = utils.categorical_order(x)
nt.assert_equal(out, list(x.cat.categories))
out = utils.categorical_order(x, ["b", "a"])
nt.assert_equal(out, ["b", "a"])
x = ["a", np.nan, "c", "c", "b", "a", "d"]
out = utils.categorical_order(x)
nt.assert_equal(out, ["a", "c", "b", "d"])
if LooseVersion(pd.__version__) >= "0.15":
def check_load_dataset(name):
ds = load_dataset(name, cache=False)
assert(isinstance(ds, pd.DataFrame))
def check_load_cached_dataset(name):
# Test the cacheing using a temporary file.
# With Python 3.2+, we could use the tempfile.TemporaryDirectory()
# context manager instead of this try...finally statement
tmpdir = tempfile.mkdtemp()
try:
# download and cache
ds = load_dataset(name, cache=True, data_home=tmpdir)
# use cached version
ds2 = load_dataset(name, cache=True, data_home=tmpdir)
pdt.assert_frame_equal(ds, ds2)
finally:
shutil.rmtree(tmpdir)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_get_dataset_names():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
names = get_dataset_names()
assert(len(names) > 0)
assert(u"titanic" in names)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_load_datasets():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_dataset(name)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_load_cached_datasets():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_cached_dataset(name)
| bsd-3-clause | 2,918,748,942,632,722,400 | 29.643243 | 78 | 0.5867 | false |
cbclab/MDT | mdt/gui/maps_visualizer/config_tabs/tab_general.py | 1 | 18316 | import copy
import os
from PyQt5.QtCore import pyqtSlot, Qt, QPoint, QUrl
from PyQt5.QtGui import QDesktopServices
from PyQt5.QtWidgets import QWidget, QAbstractItemView, QMenu, QWidgetAction, QLabel
from mdt.gui.maps_visualizer.actions import SetDimension, SetSliceIndex, SetVolumeIndex, SetColormap, SetRotate, \
SetZoom, SetShowAxis, SetColorBarNmrTicks, SetMapsToShow, SetFont, SetInterpolation, SetFlipud, SetPlotTitle, \
SetGeneralMask, NewDataAction, SetShowPlotColorbars, SetColorbarLocation, SetShowPlotTitles
from mdt.gui.maps_visualizer.base import DataConfigModel
from mdt.gui.maps_visualizer.design.ui_TabGeneral import Ui_TabGeneral
from mdt.gui.utils import blocked_signals, TimedUpdate, split_long_path_elements
from mdt.visualization.maps.base import Zoom, Point2d, DataInfo, Font, MapPlotConfig
__author__ = 'Robbert Harms'
__date__ = "2016-09-03"
__maintainer__ = "Robbert Harms"
__email__ = "[email protected]"
class TabGeneral(QWidget, Ui_TabGeneral):
def __init__(self, controller, parent=None):
super().__init__(parent)
self.setupUi(self)
self._controller = controller
self._controller.model_updated.connect(self.update_model)
self._previous_model = None
current_model = self._controller.get_model()
self.general_display_order.setDragDropMode(QAbstractItemView.InternalMove)
self.general_display_order.setSelectionMode(QAbstractItemView.SingleSelection)
self.general_colormap.addItems(current_model.get_config().get_available_colormaps())
self.general_rotate.addItems(['0', '90', '180', '270'])
self.general_rotate.setCurrentText(str(current_model.get_config().rotate))
self.general_DisplayOrder.set_collapse(True)
self.general_Miscellaneous.set_collapse(True)
self.general_Zoom.set_collapse(True)
self.general_Font.set_collapse(True)
self.general_Colorbar.set_collapse(True)
self.general_dimension.valueChanged.connect(lambda v: self._controller.apply_action(SetDimension(v)))
self.general_slice_index.valueChanged.connect(lambda v: self._controller.apply_action(SetSliceIndex(v)))
self.general_volume_index.valueChanged.connect(lambda v: self._controller.apply_action(SetVolumeIndex(v)))
self.general_colormap.currentIndexChanged.connect(
lambda i: self._controller.apply_action(SetColormap(self.general_colormap.itemText(i))))
self.general_rotate.currentIndexChanged.connect(
lambda i: self._controller.apply_action(SetRotate(int(self.general_rotate.itemText(i)))))
self._map_selection_timer = TimedUpdate(self._update_maps_to_show)
self.general_map_selection.itemSelectionChanged.connect(
lambda: self._map_selection_timer.add_delayed_callback(500))
self.general_map_selection.setContextMenuPolicy(Qt.CustomContextMenu)
self.general_map_selection.customContextMenuRequested.connect(self.select_maps_context_menu)
self.general_deselect_all_maps.clicked.connect(self._deleselect_all_maps)
self.general_invert_map_selection.clicked.connect(self._invert_map_selection)
self.general_zoom_x_0.valueChanged.connect(self._update_zoom)
self.general_zoom_x_1.valueChanged.connect(self._update_zoom)
self.general_zoom_y_0.valueChanged.connect(self._update_zoom)
self.general_zoom_y_1.valueChanged.connect(self._update_zoom)
self.plot_title.textEdited.connect(lambda txt: self._controller.apply_action(SetPlotTitle(txt)))
self.general_zoom_reset.clicked.connect(lambda: self._controller.apply_action(SetZoom(Zoom.no_zoom())))
self.general_zoom_fit.clicked.connect(self._zoom_fit)
self.general_display_order.items_reordered.connect(self._reorder_maps)
self.general_show_axis.clicked.connect(lambda: self._controller.apply_action(
SetShowAxis(self.general_show_axis.isChecked())))
self.general_font_family.addItems(Font.font_names())
self.general_font_family.currentTextChanged.connect(
lambda v: self._controller.apply_action(SetFont(
self._controller.get_model().get_config().font.get_updated(family=v))))
self.general_font_size.valueChanged.connect(
lambda: self._controller.apply_action(SetFont(
self._controller.get_model().get_config().font.get_updated(size=self.general_font_size.value()))))
self.general_interpolation.addItems(current_model.get_config().get_available_interpolations())
self.general_interpolation.currentTextChanged.connect(
lambda v: self._controller.apply_action(SetInterpolation(v)))
self.general_flipud.clicked.connect(lambda: self._controller.apply_action(
SetFlipud(self.general_flipud.isChecked())))
self.general_show_plot_titles.clicked.connect(lambda: self._controller.apply_action(
SetShowPlotTitles(self.general_show_plot_titles.isChecked())))
self.mask_name.currentIndexChanged.connect(self._update_mask_name)
self.general_colorbar_nmr_ticks.valueChanged.connect(
lambda v: self._controller.apply_action(SetColorBarNmrTicks(v)))
self.general_show_colorbar.clicked.connect(lambda: self._controller.apply_action(
SetShowPlotColorbars(self.general_show_colorbar.isChecked())))
self.general_colorbar_location.currentTextChanged.connect(
lambda v: self._controller.apply_action(SetColorbarLocation(v.lower())))
@pyqtSlot(DataConfigModel)
def update_model(self, model):
data = model.get_data()
config = model.get_config()
if not self._previous_model or model.get_data() != self._previous_model.get_data():
self._previous_model = model
self._update_data(data)
self._update_config(data, config)
def select_maps_context_menu(self, position):
global_position = self.general_map_selection.mapToGlobal(position)
def get_header_action(parent, map_name):
label = QLabel(map_name)
font = label.font()
font.setBold(True)
label.setFont(font)
label.setStyleSheet('color: black; margin:5px; margin-left: 15px;')
action = QWidgetAction(parent)
action.setDisabled(True)
action.setDefaultWidget(label)
return action
if self.general_map_selection.count():
row = self.general_map_selection.indexAt(position)
if row:
element = self.general_map_selection.item(row.row())
if element:
map_name = element.data(Qt.UserRole)
file_path = self._controller.get_model().get_data().get_file_path(map_name)
menu = QMenu()
menu.addAction(get_header_action(menu, map_name))
menu.addSeparator()
show_in_folder = menu.addAction('&Show in folder', lambda:
QDesktopServices.openUrl(QUrl.fromLocalFile(os.path.dirname(file_path))))
if file_path is None:
show_in_folder.setEnabled(False)
menu.addAction('Use as &mask', lambda: self._controller.apply_action(SetGeneralMask(map_name)))
menu.addAction('R&emove', lambda: self._controller.apply_action(
NewDataAction(self._controller.get_model().get_data().get_updated(removals=[map_name]))))
menu.exec(global_position)
@pyqtSlot()
def _reorder_maps(self):
items = [self.general_display_order.item(ind) for ind in range(self.general_display_order.count())]
map_names = [item.data(Qt.UserRole) for item in items]
self._controller.apply_action(SetMapsToShow(map_names))
@pyqtSlot()
def _update_maps_to_show(self):
current_model = self._controller.get_model()
map_names = copy.copy(current_model.get_config().maps_to_show)
for item in [self.general_map_selection.item(ind) for ind in range(self.general_map_selection.count())]:
map_name = item.data(Qt.UserRole)
if item.isSelected():
if map_name not in map_names:
self._insert_alphabetically(map_name, map_names)
else:
if map_name in map_names:
map_names.remove(map_name)
self._controller.apply_action(SetMapsToShow(map_names))
@pyqtSlot()
def _deleselect_all_maps(self):
self._controller.apply_action(SetMapsToShow([]))
@pyqtSlot()
def _invert_map_selection(self):
current_model = self._controller.get_model()
self._controller.apply_action(SetMapsToShow(
list(set(current_model.get_data().get_map_names()).difference(
set(current_model.get_config().maps_to_show)))))
@pyqtSlot()
def _zoom_fit(self):
current_model = self._controller.get_model()
data_info = current_model.get_data()
config = current_model.get_config()
def add_padding(bounding_box, max_x, max_y):
bounding_box[0].x = max(bounding_box[0].x - 1, 0)
bounding_box[0].y = max(bounding_box[0].y - 1, 0)
bounding_box[1].y = min(bounding_box[1].y + 2, max_y)
bounding_box[1].x = min(bounding_box[1].x + 2, max_x)
return bounding_box
if config.maps_to_show or len(data_info.get_map_names()):
bounding_box = data_info.get_bounding_box(config.dimension, config.slice_index,
config.volume_index, config.rotate, config.maps_to_show)
max_y = data_info.get_max_y_index(config.dimension, rotate=config.rotate, map_names=config.maps_to_show)
max_x = data_info.get_max_x_index(config.dimension, rotate=config.rotate, map_names=config.maps_to_show)
if not config.flipud:
# Since the renderer plots with a left top coordinate system,
# we need to flip the y coordinates upside down by default.
tmp = max_y - bounding_box[0].y
bounding_box[0].y = max_y - bounding_box[1].y
bounding_box[1].y = tmp
bounding_box = add_padding(bounding_box, max_x, max_y)
self._controller.apply_action(SetZoom(Zoom(*bounding_box)))
@pyqtSlot()
def _update_zoom(self):
np0x, np0y = self.general_zoom_x_0.value(), self.general_zoom_y_0.value()
np1x, np1y = self.general_zoom_x_1.value(), self.general_zoom_y_1.value()
if np0x > np1x:
np1x = np0x
if np0y > np1y:
np1y = np0y
self._controller.apply_action(SetZoom(Zoom.from_coords(np0x, np0y, np1x, np1y)))
@staticmethod
def _insert_alphabetically(new_item, item_list):
for ind, item in enumerate(item_list):
if item > new_item:
item_list.insert(ind, new_item)
return
item_list.append(new_item)
@pyqtSlot(int)
def _update_mask_name(self, index):
if index == 0:
self._controller.apply_action(SetGeneralMask(None))
else:
self._controller.apply_action(SetGeneralMask(self.mask_name.itemText(index)))
def _update_data(self, data_info):
sorted_keys = list(sorted(data_info.get_map_names()))
if len(data_info.get_map_names()):
self.general_info_nmr_maps.setText(str(len(data_info.get_map_names())))
else:
self.general_info_nmr_maps.setText('0')
with blocked_signals(self.general_map_selection):
self.general_map_selection.clear()
self.general_map_selection.addItems(sorted_keys)
for index, map_name in enumerate(sorted_keys):
item = self.general_map_selection.item(index)
item.setData(Qt.UserRole, map_name)
with blocked_signals(self.mask_name):
self.mask_name.clear()
self.mask_name.insertItem(0, '-- None --')
self.mask_name.insertItems(1, sorted_keys)
def _update_config(self, data, config):
map_names = config.maps_to_show
with blocked_signals(self.general_dimension):
try:
max_dimension = data.get_max_dimension(map_names)
self.general_dimension.setMaximum(max_dimension)
self.maximumDimension.setText(str(max_dimension))
except ValueError:
self.general_dimension.setMaximum(0)
self.maximumDimension.setText(str(0))
self.general_dimension.setValue(config.dimension)
with blocked_signals(self.general_slice_index):
try:
max_slice = data.get_max_slice_index(config.dimension, map_names)
self.general_slice_index.setMaximum(max_slice)
self.maximumIndex.setText(str(max_slice))
except ValueError:
self.general_slice_index.setMaximum(0)
self.maximumIndex.setText(str(0))
self.general_slice_index.setValue(config.slice_index)
with blocked_signals(self.general_volume_index):
try:
max_volume = data.get_max_volume_index(map_names)
self.general_volume_index.setMaximum(max_volume)
self.maximumVolume.setText(str(max_volume))
except ValueError:
self.general_volume_index.setMaximum(0)
self.maximumVolume.setText(str(0))
self.general_volume_index.setValue(config.volume_index)
with blocked_signals(self.general_colormap):
self.general_colormap.setCurrentText(config.colormap)
with blocked_signals(self.general_rotate):
self.general_rotate.setCurrentText(str(config.rotate))
if self.general_map_selection.count():
for map_name, map_config in config.map_plot_options.items():
if map_config.title:
index = list(sorted(data.get_map_names())).index(map_name)
item = self.general_map_selection.item(index)
item.setData(Qt.DisplayRole, map_name + ' (' + map_config.title + ')')
self.general_map_selection.blockSignals(True)
for index, map_name in enumerate(list(sorted(data.get_map_names()))):
item = self.general_map_selection.item(index)
if item:
item.setSelected(map_name in map_names)
self.general_map_selection.blockSignals(False)
try:
max_x = data.get_max_x_index(config.dimension, config.rotate, map_names)
max_y = data.get_max_y_index(config.dimension, config.rotate, map_names)
with blocked_signals(self.general_zoom_x_0, self.general_zoom_x_1,
self.general_zoom_y_0, self.general_zoom_y_1):
self.general_zoom_x_0.setMaximum(max_x)
self.general_zoom_x_0.setValue(config.zoom.p0.x)
self.general_zoom_x_1.setMaximum(max_x)
self.general_zoom_x_1.setMinimum(config.zoom.p0.x)
self.general_zoom_x_1.setValue(config.zoom.p1.x)
self.general_zoom_y_0.setMaximum(max_y)
self.general_zoom_y_0.setValue(config.zoom.p0.y)
self.general_zoom_y_1.setMaximum(max_y)
self.general_zoom_y_1.setMinimum(config.zoom.p0.y)
self.general_zoom_y_1.setValue(config.zoom.p1.y)
if config.zoom.p0.x == 0 and config.zoom.p1.x == 0:
self.general_zoom_x_1.setValue(max_x)
if config.zoom.p0.y == 0 and config.zoom.p1.y == 0:
self.general_zoom_y_1.setValue(max_y)
except ValueError:
pass
with blocked_signals(self.plot_title):
self.plot_title.setText(config.title)
with blocked_signals(self.general_display_order):
self.general_display_order.clear()
self.general_display_order.addItems(map_names)
for index, map_name in enumerate(map_names):
item = self.general_display_order.item(index)
item.setData(Qt.UserRole, map_name)
if map_name in config.map_plot_options and config.map_plot_options[map_name].title:
title = config.map_plot_options[map_name].title
item.setData(Qt.DisplayRole, map_name + ' (' + title + ')')
with blocked_signals(self.general_show_axis):
self.general_show_axis.setChecked(config.show_axis)
with blocked_signals(self.general_colorbar_nmr_ticks):
self.general_colorbar_nmr_ticks.setValue(config.colorbar_settings.get_preferred('nmr_ticks'))
with blocked_signals(self.general_show_colorbar):
self.general_show_colorbar.setChecked(config.colorbar_settings.get_preferred('visible'))
with blocked_signals(self.general_colorbar_location):
self.general_colorbar_location.setCurrentText(config.colorbar_settings.get_preferred('location').title())
with blocked_signals(self.general_show_plot_titles):
self.general_show_plot_titles.setChecked(config.show_titles)
with blocked_signals(self.general_font_family):
self.general_font_family.setCurrentText(config.font.family)
with blocked_signals(self.general_font_size):
self.general_font_size.setValue(config.font.size)
with blocked_signals(self.general_interpolation):
self.general_interpolation.setCurrentText(config.interpolation)
with blocked_signals(self.general_flipud):
self.general_flipud.setChecked(config.flipud)
with blocked_signals(self.mask_name):
if config.mask_name and config.mask_name in data.get_map_names():
for ind in range(self.mask_name.count()):
if self.mask_name.itemText(ind) == config.mask_name:
self.mask_name.setCurrentIndex(ind)
break
else:
self.mask_name.setCurrentIndex(0)
| lgpl-3.0 | -4,285,017,083,319,772,000 | 44.79 | 117 | 0.634582 | false |
kiszk/spark | python/pyspark/mllib/util.py | 18 | 19611 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import numpy as np
if sys.version > '3':
xrange = range
basestring = str
from pyspark import SparkContext, since
from pyspark.mllib.common import callMLlibFunc, inherit_doc
from pyspark.mllib.linalg import Vectors, SparseVector, _convert_to_vector
from pyspark.sql import DataFrame
class MLUtils(object):
"""
Helper methods to load, save and pre-process data used in MLlib.
.. versionadded:: 1.0.0
"""
@staticmethod
def _parse_libsvm_line(line):
"""
Parses a line in LIBSVM format into (label, indices, values).
"""
items = line.split(None)
label = float(items[0])
nnz = len(items) - 1
indices = np.zeros(nnz, dtype=np.int32)
values = np.zeros(nnz)
for i in xrange(nnz):
index, value = items[1 + i].split(":")
indices[i] = int(index) - 1
values[i] = float(value)
return label, indices, values
@staticmethod
def _convert_labeled_point_to_libsvm(p):
"""Converts a LabeledPoint to a string in LIBSVM format."""
from pyspark.mllib.regression import LabeledPoint
assert isinstance(p, LabeledPoint)
items = [str(p.label)]
v = _convert_to_vector(p.features)
if isinstance(v, SparseVector):
nnz = len(v.indices)
for i in xrange(nnz):
items.append(str(v.indices[i] + 1) + ":" + str(v.values[i]))
else:
for i in xrange(len(v)):
items.append(str(i + 1) + ":" + str(v[i]))
return " ".join(items)
@staticmethod
@since("1.0.0")
def loadLibSVMFile(sc, path, numFeatures=-1, minPartitions=None):
"""
Loads labeled data in the LIBSVM format into an RDD of
LabeledPoint. The LIBSVM format is a text-based format used by
LIBSVM and LIBLINEAR. Each line represents a labeled sparse
feature vector using the following format:
label index1:value1 index2:value2 ...
where the indices are one-based and in ascending order. This
method parses each line into a LabeledPoint, where the feature
indices are converted to zero-based.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param numFeatures: number of features, which will be determined
from the input data if a nonpositive value
is given. This is useful when the dataset is
already split into multiple files and you
want to load them separately, because some
features may not present in certain files,
which leads to inconsistent feature
dimensions.
:param minPartitions: min number of partitions
:return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> tempFile = NamedTemporaryFile(delete=True)
>>> _ = tempFile.write(b"+1 1:1.0 3:2.0 5:3.0\\n-1\\n-1 2:4.0 4:5.0 6:6.0")
>>> tempFile.flush()
>>> examples = MLUtils.loadLibSVMFile(sc, tempFile.name).collect()
>>> tempFile.close()
>>> examples[0]
LabeledPoint(1.0, (6,[0,2,4],[1.0,2.0,3.0]))
>>> examples[1]
LabeledPoint(-1.0, (6,[],[]))
>>> examples[2]
LabeledPoint(-1.0, (6,[1,3,5],[4.0,5.0,6.0]))
"""
from pyspark.mllib.regression import LabeledPoint
lines = sc.textFile(path, minPartitions)
parsed = lines.map(lambda l: MLUtils._parse_libsvm_line(l))
if numFeatures <= 0:
parsed.cache()
numFeatures = parsed.map(lambda x: -1 if x[1].size == 0 else x[1][-1]).reduce(max) + 1
return parsed.map(lambda x: LabeledPoint(x[0], Vectors.sparse(numFeatures, x[1], x[2])))
@staticmethod
@since("1.0.0")
def saveAsLibSVMFile(data, dir):
"""
Save labeled data in LIBSVM format.
:param data: an RDD of LabeledPoint to be saved
:param dir: directory to save the data
>>> from tempfile import NamedTemporaryFile
>>> from fileinput import input
>>> from pyspark.mllib.regression import LabeledPoint
>>> from glob import glob
>>> from pyspark.mllib.util import MLUtils
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, 1.23), (2, 4.56)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> MLUtils.saveAsLibSVMFile(sc.parallelize(examples), tempFile.name)
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0.0 1:1.01 2:2.02 3:3.03\\n1.1 1:1.23 3:4.56\\n'
"""
lines = data.map(lambda p: MLUtils._convert_labeled_point_to_libsvm(p))
lines.saveAsTextFile(dir)
@staticmethod
@since("1.1.0")
def loadLabeledPoints(sc, path, minPartitions=None):
"""
Load labeled points saved using RDD.saveAsTextFile.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param minPartitions: min number of partitions
:return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name)
>>> MLUtils.loadLabeledPoints(sc, tempFile.name).collect()
[LabeledPoint(1.1, (3,[0,2],[-1.23,4.56e-07])), LabeledPoint(0.0, [1.01,2.02,3.03])]
"""
minPartitions = minPartitions or min(sc.defaultParallelism, 2)
return callMLlibFunc("loadLabeledPoints", sc, path, minPartitions)
@staticmethod
@since("1.5.0")
def appendBias(data):
"""
Returns a new vector with `1.0` (bias) appended to
the end of the input vector.
"""
vec = _convert_to_vector(data)
if isinstance(vec, SparseVector):
newIndices = np.append(vec.indices, len(vec))
newValues = np.append(vec.values, 1.0)
return SparseVector(len(vec) + 1, newIndices, newValues)
else:
return _convert_to_vector(np.append(vec.toArray(), 1.0))
@staticmethod
@since("1.5.0")
def loadVectors(sc, path):
"""
Loads vectors saved using `RDD[Vector].saveAsTextFile`
with the default number of partitions.
"""
return callMLlibFunc("loadVectors", sc, path)
@staticmethod
@since("2.0.0")
def convertVectorColumnsToML(dataset, *cols):
"""
Converts vector columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Vector` type to the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of vector columns to be converted.
New vector columns will be ignored. If unspecified, all old
vector columns will be converted excepted nested ones.
:return:
the input dataset with old vector columns converted to the
new vector type
>>> import pyspark
>>> from pyspark.mllib.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseVector)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertVectorColumnsToML", dataset, list(cols))
@staticmethod
@since("2.0.0")
def convertVectorColumnsFromML(dataset, *cols):
"""
Converts vector columns in an input DataFrame to the
:py:class:`pyspark.mllib.linalg.Vector` type from the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of vector columns to be converted.
Old vector columns will be ignored. If unspecified, all new
vector columns will be converted except nested ones.
:return:
the input dataset with new vector columns converted to the
old vector type
>>> import pyspark
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsFromML(df).first()
>>> isinstance(r1.x, pyspark.mllib.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.mllib.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsFromML(df, "x").first()
>>> isinstance(r2.x, pyspark.mllib.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.ml.linalg.DenseVector)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertVectorColumnsFromML", dataset, list(cols))
@staticmethod
@since("2.0.0")
def convertMatrixColumnsToML(dataset, *cols):
"""
Converts matrix columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Matrix` type to the new
:py:class:`pyspark.ml.linalg.Matrix` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of matrix columns to be converted.
New matrix columns will be ignored. If unspecified, all old
matrix columns will be converted excepted nested ones.
:return:
the input dataset with old matrix columns converted to the
new matrix type
>>> import pyspark
>>> from pyspark.mllib.linalg import Matrices
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]),
... Matrices.dense(2, 2, range(4)))], ["id", "x", "y"])
>>> r1 = MLUtils.convertMatrixColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseMatrix)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseMatrix)
True
>>> r2 = MLUtils.convertMatrixColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseMatrix)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseMatrix)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertMatrixColumnsToML", dataset, list(cols))
@staticmethod
@since("2.0.0")
def convertMatrixColumnsFromML(dataset, *cols):
"""
Converts matrix columns in an input DataFrame to the
:py:class:`pyspark.mllib.linalg.Matrix` type from the new
:py:class:`pyspark.ml.linalg.Matrix` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of matrix columns to be converted.
Old matrix columns will be ignored. If unspecified, all new
matrix columns will be converted except nested ones.
:return:
the input dataset with new matrix columns converted to the
old matrix type
>>> import pyspark
>>> from pyspark.ml.linalg import Matrices
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]),
... Matrices.dense(2, 2, range(4)))], ["id", "x", "y"])
>>> r1 = MLUtils.convertMatrixColumnsFromML(df).first()
>>> isinstance(r1.x, pyspark.mllib.linalg.SparseMatrix)
True
>>> isinstance(r1.y, pyspark.mllib.linalg.DenseMatrix)
True
>>> r2 = MLUtils.convertMatrixColumnsFromML(df, "x").first()
>>> isinstance(r2.x, pyspark.mllib.linalg.SparseMatrix)
True
>>> isinstance(r2.y, pyspark.ml.linalg.DenseMatrix)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertMatrixColumnsFromML", dataset, list(cols))
class Saveable(object):
"""
Mixin for models and transformers which may be saved as files.
.. versionadded:: 1.3.0
"""
def save(self, sc, path):
"""
Save this model to the given path.
This saves:
* human-readable (JSON) model metadata to path/metadata/
* Parquet formatted data to path/data/
The model may be loaded using py:meth:`Loader.load`.
:param sc: Spark context used to save model data.
:param path: Path specifying the directory in which to save
this model. If the directory already exists,
this method throws an exception.
"""
raise NotImplementedError
@inherit_doc
class JavaSaveable(Saveable):
"""
Mixin for models that provide save() through their Scala
implementation.
.. versionadded:: 1.3.0
"""
@since("1.3.0")
def save(self, sc, path):
"""Save this model to the given path."""
if not isinstance(sc, SparkContext):
raise TypeError("sc should be a SparkContext, got type %s" % type(sc))
if not isinstance(path, basestring):
raise TypeError("path should be a basestring, got type %s" % type(path))
self._java_model.save(sc._jsc.sc(), path)
class Loader(object):
"""
Mixin for classes which can load saved models from files.
.. versionadded:: 1.3.0
"""
@classmethod
def load(cls, sc, path):
"""
Load a model from the given path. The model should have been
saved using py:meth:`Saveable.save`.
:param sc: Spark context used for loading model files.
:param path: Path specifying the directory to which the model
was saved.
:return: model instance
"""
raise NotImplementedError
@inherit_doc
class JavaLoader(Loader):
"""
Mixin for classes which can load saved models using its Scala
implementation.
.. versionadded:: 1.3.0
"""
@classmethod
def _java_loader_class(cls):
"""
Returns the full class name of the Java loader. The default
implementation replaces "pyspark" by "org.apache.spark" in
the Python full class name.
"""
java_package = cls.__module__.replace("pyspark", "org.apache.spark")
return ".".join([java_package, cls.__name__])
@classmethod
def _load_java(cls, sc, path):
"""
Load a Java model from the given path.
"""
java_class = cls._java_loader_class()
java_obj = sc._jvm
for name in java_class.split("."):
java_obj = getattr(java_obj, name)
return java_obj.load(sc._jsc.sc(), path)
@classmethod
@since("1.3.0")
def load(cls, sc, path):
"""Load a model from the given path."""
java_model = cls._load_java(sc, path)
return cls(java_model)
class LinearDataGenerator(object):
"""Utils for generating linear data.
.. versionadded:: 1.5.0
"""
@staticmethod
@since("1.5.0")
def generateLinearInput(intercept, weights, xMean, xVariance,
nPoints, seed, eps):
"""
:param: intercept bias factor, the term c in X'w + c
:param: weights feature vector, the term w in X'w + c
:param: xMean Point around which the data X is centered.
:param: xVariance Variance of the given data
:param: nPoints Number of points to be generated
:param: seed Random Seed
:param: eps Used to scale the noise. If eps is set high,
the amount of gaussian noise added is more.
Returns a list of LabeledPoints of length nPoints
"""
weights = [float(weight) for weight in weights]
xMean = [float(mean) for mean in xMean]
xVariance = [float(var) for var in xVariance]
return list(callMLlibFunc(
"generateLinearInputWrapper", float(intercept), weights, xMean,
xVariance, int(nPoints), int(seed), float(eps)))
@staticmethod
@since("1.5.0")
def generateLinearRDD(sc, nexamples, nfeatures, eps,
nParts=2, intercept=0.0):
"""
Generate an RDD of LabeledPoints.
"""
return callMLlibFunc(
"generateLinearRDDWrapper", sc, int(nexamples), int(nfeatures),
float(eps), int(nParts), float(intercept))
def _test():
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("mllib.util tests")\
.getOrCreate()
globs['spark'] = spark
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 | 8,811,259,484,909,665,000 | 36.497132 | 98 | 0.601856 | false |
kostaspl/SpiderMonkey38 | python/mozbuild/mozbuild/test/backend/test_recursivemake.py | 1 | 29117 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
import json
import os
import unittest
from mozpack.manifests import (
InstallManifest,
)
from mozunit import main
from mozbuild.backend.recursivemake import (
RecursiveMakeBackend,
RecursiveMakeTraversal,
)
from mozbuild.frontend.emitter import TreeMetadataEmitter
from mozbuild.frontend.reader import BuildReader
from mozbuild.test.backend.common import BackendTester
import mozpack.path as mozpath
class TestRecursiveMakeTraversal(unittest.TestCase):
def test_traversal(self):
traversal = RecursiveMakeTraversal()
traversal.add('', dirs=['A', 'B', 'C'])
traversal.add('', dirs=['D'])
traversal.add('A')
traversal.add('B', dirs=['E', 'F'])
traversal.add('C', dirs=['G', 'H'])
traversal.add('D', dirs=['I', 'K'])
traversal.add('D', dirs=['J', 'L'])
traversal.add('E')
traversal.add('F')
traversal.add('G')
traversal.add('H')
traversal.add('I', dirs=['M', 'N'])
traversal.add('J', dirs=['O', 'P'])
traversal.add('K', dirs=['Q', 'R'])
traversal.add('L', dirs=['S'])
traversal.add('M')
traversal.add('N', dirs=['T'])
traversal.add('O')
traversal.add('P', dirs=['U'])
traversal.add('Q')
traversal.add('R', dirs=['V'])
traversal.add('S', dirs=['W'])
traversal.add('T')
traversal.add('U')
traversal.add('V')
traversal.add('W', dirs=['X'])
traversal.add('X')
parallels = set(('G', 'H', 'I', 'J', 'O', 'P', 'Q', 'R', 'U'))
def filter(current, subdirs):
return (current, [d for d in subdirs.dirs if d in parallels],
[d for d in subdirs.dirs if d not in parallels])
start, deps = traversal.compute_dependencies(filter)
self.assertEqual(start, ('X',))
self.maxDiff = None
self.assertEqual(deps, {
'A': ('',),
'B': ('A',),
'C': ('F',),
'D': ('G', 'H'),
'E': ('B',),
'F': ('E',),
'G': ('C',),
'H': ('C',),
'I': ('D',),
'J': ('D',),
'K': ('T', 'O', 'U'),
'L': ('Q', 'V'),
'M': ('I',),
'N': ('M',),
'O': ('J',),
'P': ('J',),
'Q': ('K',),
'R': ('K',),
'S': ('L',),
'T': ('N',),
'U': ('P',),
'V': ('R',),
'W': ('S',),
'X': ('W',),
})
self.assertEqual(list(traversal.traverse('', filter)),
['', 'A', 'B', 'E', 'F', 'C', 'G', 'H', 'D', 'I',
'M', 'N', 'T', 'J', 'O', 'P', 'U', 'K', 'Q', 'R',
'V', 'L', 'S', 'W', 'X'])
self.assertEqual(list(traversal.traverse('C', filter)),
['C', 'G', 'H'])
def test_traversal_2(self):
traversal = RecursiveMakeTraversal()
traversal.add('', dirs=['A', 'B', 'C'])
traversal.add('A')
traversal.add('B', dirs=['D', 'E', 'F'])
traversal.add('C', dirs=['G', 'H', 'I'])
traversal.add('D')
traversal.add('E')
traversal.add('F')
traversal.add('G')
traversal.add('H')
traversal.add('I')
start, deps = traversal.compute_dependencies()
self.assertEqual(start, ('I',))
self.assertEqual(deps, {
'A': ('',),
'B': ('A',),
'C': ('F',),
'D': ('B',),
'E': ('D',),
'F': ('E',),
'G': ('C',),
'H': ('G',),
'I': ('H',),
})
def test_traversal_filter(self):
traversal = RecursiveMakeTraversal()
traversal.add('', dirs=['A', 'B', 'C'])
traversal.add('A')
traversal.add('B', dirs=['D', 'E', 'F'])
traversal.add('C', dirs=['G', 'H', 'I'])
traversal.add('D')
traversal.add('E')
traversal.add('F')
traversal.add('G')
traversal.add('H')
traversal.add('I')
def filter(current, subdirs):
if current == 'B':
current = None
return current, [], subdirs.dirs
start, deps = traversal.compute_dependencies(filter)
self.assertEqual(start, ('I',))
self.assertEqual(deps, {
'A': ('',),
'C': ('F',),
'D': ('A',),
'E': ('D',),
'F': ('E',),
'G': ('C',),
'H': ('G',),
'I': ('H',),
})
class TestRecursiveMakeBackend(BackendTester):
def test_basic(self):
"""Ensure the RecursiveMakeBackend works without error."""
env = self._consume('stub0', RecursiveMakeBackend)
self.assertTrue(os.path.exists(mozpath.join(env.topobjdir,
'backend.RecursiveMakeBackend')))
self.assertTrue(os.path.exists(mozpath.join(env.topobjdir,
'backend.RecursiveMakeBackend.pp')))
def test_output_files(self):
"""Ensure proper files are generated."""
env = self._consume('stub0', RecursiveMakeBackend)
expected = ['', 'dir1', 'dir2']
for d in expected:
out_makefile = mozpath.join(env.topobjdir, d, 'Makefile')
out_backend = mozpath.join(env.topobjdir, d, 'backend.mk')
self.assertTrue(os.path.exists(out_makefile))
self.assertTrue(os.path.exists(out_backend))
def test_makefile_conversion(self):
"""Ensure Makefile.in is converted properly."""
env = self._consume('stub0', RecursiveMakeBackend)
p = mozpath.join(env.topobjdir, 'Makefile')
lines = [l.strip() for l in open(p, 'rt').readlines()[1:] if not l.startswith('#')]
self.assertEqual(lines, [
'DEPTH := .',
'topsrcdir := %s' % env.topsrcdir,
'srcdir := %s' % env.topsrcdir,
'VPATH := %s' % env.topsrcdir,
'relativesrcdir := .',
'include $(DEPTH)/config/autoconf.mk',
'',
'FOO := foo',
'',
'include $(topsrcdir)/config/recurse.mk',
])
def test_missing_makefile_in(self):
"""Ensure missing Makefile.in results in Makefile creation."""
env = self._consume('stub0', RecursiveMakeBackend)
p = mozpath.join(env.topobjdir, 'dir2', 'Makefile')
self.assertTrue(os.path.exists(p))
lines = [l.strip() for l in open(p, 'rt').readlines()]
self.assertEqual(len(lines), 9)
self.assertTrue(lines[0].startswith('# THIS FILE WAS AUTOMATICALLY'))
def test_backend_mk(self):
"""Ensure backend.mk file is written out properly."""
env = self._consume('stub0', RecursiveMakeBackend)
p = mozpath.join(env.topobjdir, 'backend.mk')
lines = [l.strip() for l in open(p, 'rt').readlines()[2:]]
self.assertEqual(lines, [
'DIRS := dir1 dir2',
'TEST_DIRS := dir3',
])
def test_mtime_no_change(self):
"""Ensure mtime is not updated if file content does not change."""
env = self._consume('stub0', RecursiveMakeBackend)
makefile_path = mozpath.join(env.topobjdir, 'Makefile')
backend_path = mozpath.join(env.topobjdir, 'backend.mk')
makefile_mtime = os.path.getmtime(makefile_path)
backend_mtime = os.path.getmtime(backend_path)
reader = BuildReader(env)
emitter = TreeMetadataEmitter(env)
backend = RecursiveMakeBackend(env)
backend.consume(emitter.emit(reader.read_topsrcdir()))
self.assertEqual(os.path.getmtime(makefile_path), makefile_mtime)
self.assertEqual(os.path.getmtime(backend_path), backend_mtime)
def test_substitute_config_files(self):
"""Ensure substituted config files are produced."""
env = self._consume('substitute_config_files', RecursiveMakeBackend)
p = mozpath.join(env.topobjdir, 'foo')
self.assertTrue(os.path.exists(p))
lines = [l.strip() for l in open(p, 'rt').readlines()]
self.assertEqual(lines, [
'TEST = foo',
])
def test_variable_passthru(self):
"""Ensure variable passthru is written out correctly."""
env = self._consume('variable_passthru', RecursiveMakeBackend)
backend_path = mozpath.join(env.topobjdir, 'backend.mk')
lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]]
expected = {
'DISABLE_STL_WRAPPING': [
'DISABLE_STL_WRAPPING := 1',
],
'EXTRA_COMPONENTS': [
'EXTRA_COMPONENTS += bar.js',
'EXTRA_COMPONENTS += foo.js',
],
'EXTRA_PP_COMPONENTS': [
'EXTRA_PP_COMPONENTS += bar.pp.js',
'EXTRA_PP_COMPONENTS += foo.pp.js',
],
'FAIL_ON_WARNINGS': [
'FAIL_ON_WARNINGS := 1',
],
'MSVC_ENABLE_PGO': [
'MSVC_ENABLE_PGO := 1',
],
'VISIBILITY_FLAGS': [
'VISIBILITY_FLAGS :=',
],
'RCFILE': [
'RCFILE := foo.rc',
],
'RESFILE': [
'RESFILE := bar.res',
],
'RCINCLUDE': [
'RCINCLUDE := bar.rc',
],
'DEFFILE': [
'DEFFILE := baz.def',
],
'USE_STATIC_LIBS': [
'USE_STATIC_LIBS := 1',
],
'MOZBUILD_CFLAGS': [
'MOZBUILD_CFLAGS += -fno-exceptions',
'MOZBUILD_CFLAGS += -w',
],
'MOZBUILD_CXXFLAGS': [
'MOZBUILD_CXXFLAGS += -fcxx-exceptions',
'MOZBUILD_CXXFLAGS += -include foo.h',
],
'MOZBUILD_LDFLAGS': [
'MOZBUILD_LDFLAGS += -framework Foo',
'MOZBUILD_LDFLAGS += -x',
'MOZBUILD_LDFLAGS += -DELAYLOAD:foo.dll',
'MOZBUILD_LDFLAGS += -DELAYLOAD:bar.dll',
],
'WIN32_EXE_LDFLAGS': [
'WIN32_EXE_LDFLAGS += -subsystem:console',
],
}
for var, val in expected.items():
# print("test_variable_passthru[%s]" % (var))
found = [str for str in lines if str.startswith(var)]
self.assertEqual(found, val)
def test_sources(self):
"""Ensure SOURCES and HOST_SOURCES are handled properly."""
env = self._consume('sources', RecursiveMakeBackend)
backend_path = mozpath.join(env.topobjdir, 'backend.mk')
lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]]
expected = {
'ASFILES': [
'ASFILES += bar.s',
'ASFILES += foo.asm',
],
'CMMSRCS': [
'CMMSRCS += bar.mm',
'CMMSRCS += foo.mm',
],
'CSRCS': [
'CSRCS += bar.c',
'CSRCS += foo.c',
],
'HOST_CPPSRCS': [
'HOST_CPPSRCS += bar.cpp',
'HOST_CPPSRCS += foo.cpp',
],
'HOST_CSRCS': [
'HOST_CSRCS += bar.c',
'HOST_CSRCS += foo.c',
],
'SSRCS': [
'SSRCS += baz.S',
'SSRCS += foo.S',
],
}
for var, val in expected.items():
found = [str for str in lines if str.startswith(var)]
self.assertEqual(found, val)
def test_exports(self):
"""Ensure EXPORTS is handled properly."""
env = self._consume('exports', RecursiveMakeBackend)
# EXPORTS files should appear in the dist_include install manifest.
m = InstallManifest(path=mozpath.join(env.topobjdir,
'_build_manifests', 'install', 'dist_include'))
self.assertEqual(len(m), 7)
self.assertIn('foo.h', m)
self.assertIn('mozilla/mozilla1.h', m)
self.assertIn('mozilla/dom/dom2.h', m)
def test_generated_files(self):
"""Ensure GENERATED_FILES is handled properly."""
env = self._consume('generated-files', RecursiveMakeBackend)
backend_path = mozpath.join(env.topobjdir, 'backend.mk')
lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]]
expected = [
'GENERATED_FILES += bar.c',
'bar.c: %s/generate-bar.py' % env.topsrcdir,
'$(call py_action,file_generate,%s/generate-bar.py bar.c)' % env.topsrcdir,
'',
'GENERATED_FILES += foo.c',
'foo.c: %s/generate-foo.py %s/foo-data' % (env.topsrcdir, env.topsrcdir),
'$(call py_action,file_generate,%s/generate-foo.py foo.c %s/foo-data)' % (env.topsrcdir, env.topsrcdir),
'',
'GENERATED_FILES += quux.c',
]
self.maxDiff = None
self.assertEqual(lines, expected)
def test_resources(self):
"""Ensure RESOURCE_FILES is handled properly."""
env = self._consume('resources', RecursiveMakeBackend)
# RESOURCE_FILES should appear in the dist_bin install manifest.
m = InstallManifest(path=os.path.join(env.topobjdir,
'_build_manifests', 'install', 'dist_bin'))
self.assertEqual(len(m), 10)
self.assertIn('res/foo.res', m)
self.assertIn('res/fonts/font1.ttf', m)
self.assertIn('res/fonts/desktop/desktop2.ttf', m)
self.assertIn('res/bar.res', m)
self.assertIn('res/tests/test.manifest', m)
self.assertIn('res/tests/extra.manifest', m)
def test_js_preference_files(self):
"""Ensure PREF_JS_EXPORTS is written out correctly."""
env = self._consume('js_preference_files', RecursiveMakeBackend)
backend_path = os.path.join(env.topobjdir, 'backend.mk')
lines = [l.strip() for l in open(backend_path, 'rt').readlines()]
# Avoid positional parameter and async related breakage
var = 'PREF_JS_EXPORTS'
found = [val for val in lines if val.startswith(var)]
# Assignment[aa], append[cc], conditional[valid]
expected = ('aa/aa.js', 'bb/bb.js', 'cc/cc.js', 'dd/dd.js', 'valid_val/prefs.js')
expected_top = ('ee/ee.js', 'ff/ff.js')
self.assertEqual(found,
['PREF_JS_EXPORTS += $(topsrcdir)/%s' % val for val in expected_top] +
['PREF_JS_EXPORTS += $(srcdir)/%s' % val for val in expected])
def test_test_manifests_files_written(self):
"""Ensure test manifests get turned into files."""
env = self._consume('test-manifests-written', RecursiveMakeBackend)
tests_dir = mozpath.join(env.topobjdir, '_tests')
m_master = mozpath.join(tests_dir, 'testing', 'mochitest', 'tests', 'mochitest.ini')
x_master = mozpath.join(tests_dir, 'xpcshell', 'xpcshell.ini')
self.assertTrue(os.path.exists(m_master))
self.assertTrue(os.path.exists(x_master))
lines = [l.strip() for l in open(x_master, 'rt').readlines()]
self.assertEqual(lines, [
'; THIS FILE WAS AUTOMATICALLY GENERATED. DO NOT MODIFY BY HAND.',
'',
'[include:dir1/xpcshell.ini]',
'[include:xpcshell.ini]',
])
all_tests_path = mozpath.join(env.topobjdir, 'all-tests.json')
self.assertTrue(os.path.exists(all_tests_path))
with open(all_tests_path, 'rt') as fh:
o = json.load(fh)
self.assertIn('xpcshell.js', o)
self.assertIn('dir1/test_bar.js', o)
self.assertEqual(len(o['xpcshell.js']), 1)
def test_test_manifest_pattern_matches_recorded(self):
"""Pattern matches in test manifests' support-files should be recorded."""
env = self._consume('test-manifests-written', RecursiveMakeBackend)
m = InstallManifest(path=mozpath.join(env.topobjdir,
'_build_manifests', 'install', 'tests'))
# This is not the most robust test in the world, but it gets the job
# done.
entries = [e for e in m._dests.keys() if '**' in e]
self.assertEqual(len(entries), 1)
self.assertIn('support/**', entries[0])
def test_xpidl_generation(self):
"""Ensure xpidl files and directories are written out."""
env = self._consume('xpidl', RecursiveMakeBackend)
# Install manifests should contain entries.
install_dir = mozpath.join(env.topobjdir, '_build_manifests',
'install')
self.assertTrue(os.path.isfile(mozpath.join(install_dir, 'dist_idl')))
self.assertTrue(os.path.isfile(mozpath.join(install_dir, 'xpidl')))
m = InstallManifest(path=mozpath.join(install_dir, 'dist_idl'))
self.assertEqual(len(m), 2)
self.assertIn('bar.idl', m)
self.assertIn('foo.idl', m)
m = InstallManifest(path=mozpath.join(install_dir, 'xpidl'))
self.assertIn('.deps/my_module.pp', m)
m = InstallManifest(path=os.path.join(install_dir, 'dist_bin'))
self.assertIn('components/my_module.xpt', m)
m = InstallManifest(path=mozpath.join(install_dir, 'dist_include'))
self.assertIn('foo.h', m)
p = mozpath.join(env.topobjdir, 'config/makefiles/xpidl')
self.assertTrue(os.path.isdir(p))
self.assertTrue(os.path.isfile(mozpath.join(p, 'Makefile')))
def test_old_install_manifest_deleted(self):
# Simulate an install manifest from a previous backend version. Ensure
# it is deleted.
env = self._get_environment('stub0')
purge_dir = mozpath.join(env.topobjdir, '_build_manifests', 'install')
manifest_path = mozpath.join(purge_dir, 'old_manifest')
os.makedirs(purge_dir)
m = InstallManifest()
m.write(path=manifest_path)
self.assertTrue(os.path.exists(manifest_path))
self._consume('stub0', RecursiveMakeBackend, env)
self.assertFalse(os.path.exists(manifest_path))
def test_install_manifests_written(self):
env, objs = self._emit('stub0')
backend = RecursiveMakeBackend(env)
m = InstallManifest()
backend._install_manifests['testing'] = m
m.add_symlink(__file__, 'self')
backend.consume(objs)
man_dir = mozpath.join(env.topobjdir, '_build_manifests', 'install')
self.assertTrue(os.path.isdir(man_dir))
expected = ['testing']
for e in expected:
full = mozpath.join(man_dir, e)
self.assertTrue(os.path.exists(full))
m2 = InstallManifest(path=full)
self.assertEqual(m, m2)
def test_ipdl_sources(self):
"""Test that IPDL_SOURCES are written to ipdlsrcs.mk correctly."""
env = self._consume('ipdl_sources', RecursiveMakeBackend)
manifest_path = mozpath.join(env.topobjdir,
'ipc', 'ipdl', 'ipdlsrcs.mk')
lines = [l.strip() for l in open(manifest_path, 'rt').readlines()]
# Handle Windows paths correctly
topsrcdir = env.topsrcdir.replace(os.sep, '/')
expected = [
"ALL_IPDLSRCS := %s/bar/bar.ipdl %s/bar/bar2.ipdlh %s/foo/foo.ipdl %s/foo/foo2.ipdlh" % tuple([topsrcdir] * 4),
"CPPSRCS := UnifiedProtocols0.cpp",
"IPDLDIRS := %s/bar %s/foo" % (topsrcdir, topsrcdir),
]
found = [str for str in lines if str.startswith(('ALL_IPDLSRCS',
'CPPSRCS',
'IPDLDIRS'))]
self.assertEqual(found, expected)
def test_defines(self):
"""Test that DEFINES are written to backend.mk correctly."""
env = self._consume('defines', RecursiveMakeBackend)
backend_path = mozpath.join(env.topobjdir, 'backend.mk')
lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]]
var = 'DEFINES'
defines = [val for val in lines if val.startswith(var)]
expected = ['DEFINES += -DFOO -DBAZ=\'"ab\'\\\'\'cd"\' -UQUX -DBAR=7 -DVALUE=\'xyz\'']
self.assertEqual(defines, expected)
def test_local_includes(self):
"""Test that LOCAL_INCLUDES are written to backend.mk correctly."""
env = self._consume('local_includes', RecursiveMakeBackend)
backend_path = mozpath.join(env.topobjdir, 'backend.mk')
lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]]
expected = [
'LOCAL_INCLUDES += -I$(topsrcdir)/bar/baz',
'LOCAL_INCLUDES += -I$(srcdir)/foo',
]
found = [str for str in lines if str.startswith('LOCAL_INCLUDES')]
self.assertEqual(found, expected)
def test_generated_includes(self):
"""Test that GENERATED_INCLUDES are written to backend.mk correctly."""
env = self._consume('generated_includes', RecursiveMakeBackend)
backend_path = mozpath.join(env.topobjdir, 'backend.mk')
lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]]
topobjdir = env.topobjdir.replace('\\', '/')
expected = [
'LOCAL_INCLUDES += -I%s/bar/baz' % topobjdir,
'LOCAL_INCLUDES += -Ifoo',
]
found = [str for str in lines if str.startswith('LOCAL_INCLUDES')]
self.assertEqual(found, expected)
def test_final_target(self):
"""Test that FINAL_TARGET is written to backend.mk correctly."""
env = self._consume('final_target', RecursiveMakeBackend)
final_target_rule = "FINAL_TARGET = $(if $(XPI_NAME),$(DIST)/xpi-stage/$(XPI_NAME),$(DIST)/bin)$(DIST_SUBDIR:%=/%)"
expected = dict()
expected[env.topobjdir] = []
expected[mozpath.join(env.topobjdir, 'both')] = [
'XPI_NAME = mycrazyxpi',
'DIST_SUBDIR = asubdir',
final_target_rule
]
expected[mozpath.join(env.topobjdir, 'dist-subdir')] = [
'DIST_SUBDIR = asubdir',
final_target_rule
]
expected[mozpath.join(env.topobjdir, 'xpi-name')] = [
'XPI_NAME = mycrazyxpi',
final_target_rule
]
expected[mozpath.join(env.topobjdir, 'final-target')] = [
'FINAL_TARGET = $(DEPTH)/random-final-target'
]
for key, expected_rules in expected.iteritems():
backend_path = mozpath.join(key, 'backend.mk')
lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]]
found = [str for str in lines if
str.startswith('FINAL_TARGET') or str.startswith('XPI_NAME') or
str.startswith('DIST_SUBDIR')]
self.assertEqual(found, expected_rules)
def test_config(self):
"""Test that CONFIGURE_SUBST_FILES and CONFIGURE_DEFINE_FILES are
properly handled."""
env = self._consume('test_config', RecursiveMakeBackend)
self.assertEqual(
open(os.path.join(env.topobjdir, 'file'), 'r').readlines(), [
'#ifdef foo\n',
'bar baz\n',
'@bar@\n',
])
self.assertEqual(
open(os.path.join(env.topobjdir, 'file.h'), 'r').readlines(), [
'/* Comment */\n',
'#define foo\n',
'#define foo baz qux\n',
'#define foo baz qux\n',
'#define bar\n',
'#define bar 42\n',
'/* #undef bar */\n',
'\n',
'# define baz 1\n',
'\n',
'#ifdef foo\n',
'# define foo baz qux\n',
'# define foo baz qux\n',
' # define foo baz qux \n',
'#endif\n',
])
def test_jar_manifests(self):
env = self._consume('jar-manifests', RecursiveMakeBackend)
with open(os.path.join(env.topobjdir, 'backend.mk'), 'rb') as fh:
lines = fh.readlines()
lines = [line.rstrip() for line in lines]
self.assertIn('JAR_MANIFEST := %s/jar.mn' % env.topsrcdir, lines)
def test_extra_js_modules(self):
env = self._consume('extra-js-modules', RecursiveMakeBackend)
with open(os.path.join(env.topobjdir, 'backend.mk'), 'rb') as fh:
lines = fh.readlines()
lines = [line.rstrip() for line in lines]
self.maxDiff = None
expected = [
'extra_js__FILES := module1.js module2.js',
'extra_js__DEST = $(FINAL_TARGET)/modules/',
'extra_js__TARGET := misc',
'INSTALL_TARGETS += extra_js_',
'extra_js_submodule_FILES := module3.js module4.js',
'extra_js_submodule_DEST = $(FINAL_TARGET)/modules/submodule',
'extra_js_submodule_TARGET := misc',
'INSTALL_TARGETS += extra_js_submodule',
'extra_pp_js_ := pp-module1.js',
'extra_pp_js__PATH = $(FINAL_TARGET)/modules/',
'extra_pp_js__TARGET := misc',
'PP_TARGETS += extra_pp_js_',
'extra_pp_js_ppsub := pp-module2.js',
'extra_pp_js_ppsub_PATH = $(FINAL_TARGET)/modules/ppsub',
'extra_pp_js_ppsub_TARGET := misc',
'PP_TARGETS += extra_pp_js_ppsub',
]
found = [line for line in lines if line.startswith(('extra_',
'INSTALL_TARGETS',
'PP_TARGETS'))]
self.assertEqual(expected, found)
def test_test_manifests_duplicate_support_files(self):
"""Ensure duplicate support-files in test manifests work."""
env = self._consume('test-manifests-duplicate-support-files',
RecursiveMakeBackend)
p = os.path.join(env.topobjdir, '_build_manifests', 'install', 'tests')
m = InstallManifest(p)
self.assertIn('testing/mochitest/tests/support-file.txt', m)
def test_android_eclipse(self):
env = self._consume('android_eclipse', RecursiveMakeBackend)
with open(mozpath.join(env.topobjdir, 'backend.mk'), 'rb') as fh:
lines = fh.readlines()
lines = [line.rstrip() for line in lines]
# Dependencies first.
self.assertIn('ANDROID_ECLIPSE_PROJECT_main1: target1 target2', lines)
self.assertIn('ANDROID_ECLIPSE_PROJECT_main4: target3 target4', lines)
command_template = '\t$(call py_action,process_install_manifest,' + \
'--no-remove --no-remove-all-directory-symlinks ' + \
'--no-remove-empty-directories %s %s.manifest)'
# Commands second.
for project_name in ['main1', 'main2', 'library1', 'library2']:
stem = '%s/android_eclipse/%s' % (env.topobjdir, project_name)
self.assertIn(command_template % (stem, stem), lines)
# Projects declared in subdirectories.
with open(mozpath.join(env.topobjdir, 'subdir', 'backend.mk'), 'rb') as fh:
lines = fh.readlines()
lines = [line.rstrip() for line in lines]
self.assertIn('ANDROID_ECLIPSE_PROJECT_submain: subtarget1 subtarget2', lines)
for project_name in ['submain', 'sublibrary']:
# Destination and install manifest are relative to topobjdir.
stem = '%s/android_eclipse/%s' % (env.topobjdir, project_name)
self.assertIn(command_template % (stem, stem), lines)
def test_install_manifests_package_tests(self):
"""Ensure test suites honor package_tests=False."""
env = self._consume('test-manifests-package-tests', RecursiveMakeBackend)
tests_dir = mozpath.join(env.topobjdir, '_tests')
all_tests_path = mozpath.join(env.topobjdir, 'all-tests.json')
self.assertTrue(os.path.exists(all_tests_path))
with open(all_tests_path, 'rt') as fh:
o = json.load(fh)
self.assertIn('mochitest.js', o)
self.assertIn('not_packaged.java', o)
man_dir = mozpath.join(env.topobjdir, '_build_manifests', 'install')
self.assertTrue(os.path.isdir(man_dir))
full = mozpath.join(man_dir, 'tests')
self.assertTrue(os.path.exists(full))
m = InstallManifest(path=full)
# Only mochitest.js should be in the install manifest.
self.assertTrue('testing/mochitest/tests/mochitest.js' in m)
# The path is odd here because we do not normalize at test manifest
# processing time. This is a fragile test because there's currently no
# way to iterate the manifest.
self.assertFalse('instrumentation/./not_packaged.java' in m)
if __name__ == '__main__':
main()
| mpl-2.0 | -2,563,099,380,182,302,700 | 36.42545 | 123 | 0.538414 | false |
Mazecreator/tensorflow | tensorflow/compiler/tests/xla_device_test.py | 76 | 1639 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for XLA devices."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class XlaDeviceTest(test.TestCase):
def testCopies(self):
"""Tests that copies between GPU and XLA devices work."""
if not test.is_gpu_available():
return
with session_lib.Session() as sess:
x = array_ops.placeholder(dtypes.float32, [2])
with ops.device("GPU"):
y = x * 2
with ops.device("device:XLA_CPU:0"):
z = y * y
with ops.device("GPU"):
w = y + z
result = sess.run(w, {x: [1.5, 0.5]})
self.assertAllClose(result, [12., 2.], rtol=1e-3)
if __name__ == "__main__":
test.main()
| apache-2.0 | 4,938,639,235,002,573,000 | 33.145833 | 80 | 0.66687 | false |
datakortet/django-cms | cms/tests/security.py | 11 | 8935 | from __future__ import with_statement
from cms.api import create_page, add_plugin
from cms.models.pluginmodel import CMSPlugin
from cms.plugins.text.models import Text
from cms.test_utils.testcases import (CMSTestCase, URL_CMS_PLUGIN_ADD,
URL_CMS_PLUGIN_EDIT, URL_CMS_PLUGIN_REMOVE)
from django.conf import settings
from django.core.urlresolvers import reverse
class SecurityTests(CMSTestCase):
"""
Test security issues by trying some naive requests to add/alter/delete data.
"""
def get_data(self):
page = create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
superuser = self.get_superuser()
staff = self.get_staff_user_with_no_permissions()
return page, placeholder, superuser, staff
def test_add(self):
"""
Test adding a plugin to a *PAGE*.
"""
page, placeholder, superuser, staff = self.get_data()
plugin_data = {
'plugin_type':"TextPlugin",
'language':settings.LANGUAGES[0][0],
'placeholder':page.placeholders.get(slot="body").pk,
}
self.assertEqual(CMSPlugin.objects.count(), 0)
# log the user out and post the plugin data to the cms add-plugin URL.
self.client.logout()
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
# since the user is not logged in, they should be prompted to log in.
self.assertTemplateUsed(response, 'admin/login.html')
self.assertEqual(CMSPlugin.objects.count(), 0)
# now log a staff user without permissions in and do the same as above.
self.client.login(username='staff', password='staff')
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
# the user is logged in and the security check fails, so it should 403.
self.assertEqual(response.status_code, 403)
self.assertEqual(CMSPlugin.objects.count(), 0)
def test_edit(self):
"""
Test editing a *PAGE* plugin
"""
page, placeholder, superuser, staff = self.get_data()
# create the plugin using a superuser
plugin = add_plugin(placeholder, 'TextPlugin', 'en', body='body')
plugin_data = {
'plugin_id': plugin.pk,
'body': 'newbody',
}
self.assertEqual(plugin.body, 'body') # check the body is as expected.
# log the user out, try to edit the plugin
self.client.logout()
url = URL_CMS_PLUGIN_EDIT + '%s/' % plugin.pk
response = self.client.post(url, plugin_data)
# since the user is not logged in, they should be prompted to log in.
self.assertTemplateUsed(response, 'admin/login.html')
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
# now log a staff user without permissions in and do the same as above.
self.client.login(username='staff', password='staff')
response = self.client.post(url, plugin_data)
# the user is logged in and the security check fails, so it should 403.
self.assertEqual(response.status_code, 403)
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
def test_delete(self):
"""
Test deleting a *PAGE* plugin
"""
page, placeholder, superuser, staff = self.get_data()
plugin = add_plugin(placeholder, 'TextPlugin', 'en', body='body')
plugin_data = {
'plugin_id': plugin.pk,
}
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
# log the user out, try to remove the plugin
self.client.logout()
response = self.client.post(URL_CMS_PLUGIN_REMOVE, plugin_data)
# since the user is not logged in, they should be prompted to log in.
self.assertTemplateUsed(response, 'admin/login.html')
self.assertEqual(CMSPlugin.objects.count(), 1)
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
# now log a staff user without permissions in and do the same as above.
self.client.login(username='staff', password='staff')
response = self.client.post(URL_CMS_PLUGIN_REMOVE, plugin_data)
# the user is logged in and the security check fails, so it should 403.
self.assertEqual(response.status_code, 403)
self.assertEqual(CMSPlugin.objects.count(), 1)
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
def test_add_ph(self):
"""
Test adding a *NON PAGE* plugin
"""
page, placeholder, superuser, staff = self.get_data()
plugin_data = {
'plugin_type':"TextPlugin",
'language':settings.LANGUAGES[0][0],
'placeholder':page.placeholders.get(slot="body").pk,
}
url = reverse('admin:placeholderapp_example1_add_plugin')
self.assertEqual(CMSPlugin.objects.count(), 0)
# log the user out and try to add a plugin using PlaceholderAdmin
self.client.logout()
response = self.client.post(url, plugin_data)
# since the user is not logged in, they should be prompted to log in.
self.assertTemplateUsed(response, 'admin/login.html')
self.assertEqual(CMSPlugin.objects.count(), 0)
# now log a staff user without permissions in and do the same as above.
self.client.login(username='staff', password='staff')
response = self.client.post(url, plugin_data)
# the user is logged in and the security check fails, so it should 403.
self.assertEqual(response.status_code, 403)
self.assertEqual(CMSPlugin.objects.count(), 0)
def test_edit_ph(self):
"""
Test editing a *NON PAGE* plugin
"""
page, placeholder, superuser, staff = self.get_data()
plugin = add_plugin(placeholder, 'TextPlugin', 'en', body='body')
url = reverse('admin:placeholderapp_example1_edit_plugin', args=(plugin.pk,))
plugin_data = {
'body': 'newbody',
'language': 'en',
'plugin_id': plugin.pk,
}
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
# log the user out and try to edit a plugin using PlaceholderAdmin
self.client.logout()
response = self.client.post(url, plugin_data)
# since the user is not logged in, they should be prompted to log in.
self.assertTemplateUsed(response, 'admin/login.html')
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
# now log a staff user without permissions in and do the same as above.
self.client.login(username='staff', password='staff')
response = self.client.post(url, plugin_data)
# the user is logged in and the security check fails, so it should 403.
self.assertEqual(response.status_code, 403)
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
def test_delete_ph(self):
page, placeholder, superuser, staff = self.get_data()
plugin = add_plugin(placeholder, 'TextPlugin', 'en', body='body')
plugin_data = {
'plugin_id': plugin.pk,
}
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
url = reverse('admin:placeholderapp_example1_remove_plugin')
# log the user out and try to remove a plugin using PlaceholderAdmin
self.client.logout()
response = self.client.post(url, plugin_data)
# since the user is not logged in, they should be prompted to log in.
self.assertTemplateUsed(response, 'admin/login.html')
self.assertEqual(CMSPlugin.objects.count(), 1)
# now log a staff user without permissions in and do the same as above.
self.client.login(username='staff', password='staff')
response = self.client.post(url, plugin_data)
# the user is logged in and the security check fails, so it should 403.
self.assertEqual(response.status_code, 403)
self.assertEqual(CMSPlugin.objects.count(), 1)
def test_text_plugin_xss(self):
page, placeholder, superuser, staff = self.get_data()
with self.login_user_context(superuser):
plugin = add_plugin(placeholder, 'TextPlugin', 'en', body='body')
# ACTUAL TEST STARTS HERE.
data = {
"body": "<div onload='do_evil_stuff();'>divcontent</div><a href='javascript:do_evil_stuff()'>acontent</a>"
}
edit_url = '%s%s/' % (URL_CMS_PLUGIN_EDIT, plugin.pk)
response = self.client.post(edit_url, data)
self.assertEquals(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEquals(txt.body, '<div>divcontent</div><a>acontent</a>')
| bsd-3-clause | 7,899,653,857,456,746,000 | 46.026316 | 122 | 0.625182 | false |
arkmaxim/grpc | src/python/grpcio_tests/tests/unit/beta/_connectivity_channel_test.py | 15 | 2117 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests of grpc.beta._connectivity_channel."""
import unittest
from grpc.beta import interfaces
class ConnectivityStatesTest(unittest.TestCase):
def testBetaConnectivityStates(self):
self.assertIsNotNone(interfaces.ChannelConnectivity.IDLE)
self.assertIsNotNone(interfaces.ChannelConnectivity.CONNECTING)
self.assertIsNotNone(interfaces.ChannelConnectivity.READY)
self.assertIsNotNone(interfaces.ChannelConnectivity.TRANSIENT_FAILURE)
self.assertIsNotNone(interfaces.ChannelConnectivity.FATAL_FAILURE)
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause | -180,901,055,696,816,800 | 43.104167 | 74 | 0.787435 | false |
mzdaniel/oh-mainline | vendor/packages/Django/django/db/backends/mysql/client.py | 524 | 1380 | import os
import sys
from django.db.backends import BaseDatabaseClient
class DatabaseClient(BaseDatabaseClient):
executable_name = 'mysql'
def runshell(self):
settings_dict = self.connection.settings_dict
args = [self.executable_name]
db = settings_dict['OPTIONS'].get('db', settings_dict['NAME'])
user = settings_dict['OPTIONS'].get('user', settings_dict['USER'])
passwd = settings_dict['OPTIONS'].get('passwd', settings_dict['PASSWORD'])
host = settings_dict['OPTIONS'].get('host', settings_dict['HOST'])
port = settings_dict['OPTIONS'].get('port', settings_dict['PORT'])
defaults_file = settings_dict['OPTIONS'].get('read_default_file')
# Seems to be no good way to set sql_mode with CLI.
if defaults_file:
args += ["--defaults-file=%s" % defaults_file]
if user:
args += ["--user=%s" % user]
if passwd:
args += ["--password=%s" % passwd]
if host:
if '/' in host:
args += ["--socket=%s" % host]
else:
args += ["--host=%s" % host]
if port:
args += ["--port=%s" % port]
if db:
args += [db]
if os.name == 'nt':
sys.exit(os.system(" ".join(args)))
else:
os.execvp(self.executable_name, args)
| agpl-3.0 | -3,163,106,734,556,864,500 | 33.5 | 82 | 0.538406 | false |
stefanhahmann/pybossa | test/test_repository/test_user_repository.py | 6 | 8682 | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2014 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
# Cache global variables for timeouts
from default import Test, db
from nose.tools import assert_raises
from factories import UserFactory
from pybossa.repositories import UserRepository
from pybossa.exc import WrongObjectError, DBIntegrityError
class TestUserRepository(Test):
def setUp(self):
super(TestUserRepository, self).setUp()
self.user_repo = UserRepository(db)
def test_get_return_none_if_no_user(self):
"""Test get method returns None if there is no user with the
specified id"""
user = self.user_repo.get(200)
assert user is None, user
def test_get_returns_user(self):
"""Test get method returns a user if exists"""
user = UserFactory.create()
retrieved_user = self.user_repo.get(user.id)
assert user == retrieved_user, retrieved_user
def test_get_by_name_return_none_if_no_user(self):
"""Test get_by_name returns None when a user with the specified
name does not exist"""
user = self.user_repo.get_by_name('thisuserdoesnotexist')
assert user is None, user
def test_get_by_name_returns_the_user(self):
"""Test get_by_name returns a user if exists"""
user = UserFactory.create()
retrieved_user = self.user_repo.get_by_name(user.name)
assert user == retrieved_user, retrieved_user
def test_get_by(self):
"""Test get_by returns a user with the specified attribute"""
user = UserFactory.create(name='Jon Snow')
retrieved_user = self.user_repo.get_by(name=user.name)
assert user == retrieved_user, retrieved_user
def test_get_by_returns_none_if_no_user(self):
"""Test get_by returns None if no user matches the query"""
UserFactory.create(name='Tyrion Lannister')
user = self.user_repo.get_by(name='no_name')
assert user is None, user
def get_all_returns_list_of_all_users(self):
"""Test get_all returns a list of all the existing users"""
users = UserFactory.create_batch(3)
retrieved_users = self.user_repo.get_all()
assert isinstance(retrieved_users, list)
assert len(retrieved_users) == len(users), retrieved_users
for user in retrieved_users:
assert user in users, user
def test_filter_by_no_matches(self):
"""Test filter_by returns an empty list if no users match the query"""
UserFactory.create(name='reek', fullname='Theon Greyjoy')
retrieved_users = self.user_repo.filter_by(name='asha')
assert isinstance(retrieved_users, list)
assert len(retrieved_users) == 0, retrieved_users
def test_filter_by_one_condition(self):
"""Test filter_by returns a list of users that meet the filtering
condition"""
UserFactory.create_batch(3, locale='es')
should_be_missing = UserFactory.create(locale='fr')
retrieved_users = self.user_repo.filter_by(locale='es')
assert len(retrieved_users) == 3, retrieved_users
assert should_be_missing not in retrieved_users, retrieved_users
def test_filter_by_multiple_conditions(self):
"""Test filter_by supports multiple-condition queries"""
UserFactory.create_batch(2, locale='es', privacy_mode=True)
user = UserFactory.create(locale='es', privacy_mode=False)
retrieved_users = self.user_repo.filter_by(locale='es',
privacy_mode=False)
assert len(retrieved_users) == 1, retrieved_users
assert user in retrieved_users, retrieved_users
def test_filter_by_limit_offset(self):
"""Test that filter_by supports limit and offset options"""
UserFactory.create_batch(4)
all_users = self.user_repo.filter_by()
first_two = self.user_repo.filter_by(limit=2)
last_two = self.user_repo.filter_by(limit=2, offset=2)
assert len(first_two) == 2, first_two
assert len(last_two) == 2, last_two
assert first_two == all_users[:2]
assert last_two == all_users[2:]
def test_search_by_name_returns_list(self):
"""Test search_by_name returns a list with search results"""
search = self.user_repo.search_by_name('')
assert isinstance(search, list), search.__class__
def test_search_by_name(self):
"""Test search_by_name returns a list with the user if searching by
either its name or fullname"""
user = UserFactory.create(name='greenseer', fullname='Jojen Reed')
search_by_name = self.user_repo.search_by_name('greenseer')
search_by_fullname = self.user_repo.search_by_name('Jojen Reed')
assert user in search_by_name, search_by_name
assert user in search_by_fullname, search_by_fullname
def test_search_by_name_capital_lower_letters(self):
"""Test search_by_name works the same with capital or lower letters"""
user_capitals = UserFactory.create(name='JOJEN')
user_lowers = UserFactory.create(name='meera')
search_lower = self.user_repo.search_by_name('jojen')
search_capital = self.user_repo.search_by_name('MEERA')
assert user_capitals in search_lower, search_lower
assert user_lowers in search_capital, search_capital
def test_search_by_name_substrings(self):
"""Test search_by_name works when searching by a substring"""
user = UserFactory.create(name='Hodor')
search = self.user_repo.search_by_name('odo')
assert user in search, search
def test_search_by_name_empty_string(self):
"""Test search_by_name returns an empty list when searching by '' """
user = UserFactory.create(name='Brandon')
search = self.user_repo.search_by_name('')
assert len(search) == 0, search
def test_total_users_no_users(self):
"""Test total_users return 0 if there are no users"""
count = self.user_repo.total_users()
assert count == 0, count
def test_total_users_count(self):
"""Test total_users return 1 if there is one user"""
UserFactory.create()
count = self.user_repo.total_users()
assert count == 1, count
def test_save(self):
"""Test save persist the user"""
user = UserFactory.build()
assert self.user_repo.get(user.id) is None
self.user_repo.save(user)
assert self.user_repo.get(user.id) == user, "User not saved"
def test_save_fails_if_integrity_error(self):
"""Test save raises a DBIntegrityError if the instance to be saved lacks
a required value"""
user = UserFactory.build(name=None)
assert_raises(DBIntegrityError, self.user_repo.save, user)
def test_save_only_saves_users(self):
"""Test save raises a WrongObjectError when an object which is not
a User instance is saved"""
bad_object = dict()
assert_raises(WrongObjectError, self.user_repo.save, bad_object)
def test_update(self):
"""Test update persists the changes made to the user"""
user = UserFactory.create(locale='en')
user.locale = 'it'
self.user_repo.update(user)
updated_user = self.user_repo.get(user.id)
assert updated_user.locale == 'it', updated_user
def test_update_fails_if_integrity_error(self):
"""Test update raises a DBIntegrityError if the instance to be updated
lacks a required value"""
user = UserFactory.create()
user.name = None
assert_raises(DBIntegrityError, self.user_repo.update, user)
def test_update_only_updates_users(self):
"""Test update raises a WrongObjectError when an object which is not
a User instance is updated"""
bad_object = dict()
assert_raises(WrongObjectError, self.user_repo.update, bad_object)
| agpl-3.0 | 1,027,715,820,202,144,600 | 29.356643 | 80 | 0.651578 | false |
albertliangcode/DiceRoll | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/sbcsgroupprober.py | 2936 | 3291 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .sbcharsetprober import SingleByteCharSetProber
from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel,
Latin5CyrillicModel, MacCyrillicModel,
Ibm866Model, Ibm855Model)
from .langgreekmodel import Latin7GreekModel, Win1253GreekModel
from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from .langthaimodel import TIS620ThaiModel
from .langhebrewmodel import Win1255HebrewModel
from .hebrewprober import HebrewProber
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
SingleByteCharSetProber(Latin2HungarianModel),
SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
]
hebrewProber = HebrewProber()
logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel,
False, hebrewProber)
visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True,
hebrewProber)
hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)
self._mProbers.extend([hebrewProber, logicalHebrewProber,
visualHebrewProber])
self.reset()
| mit | -6,324,612,056,457,639,000 | 46.695652 | 79 | 0.69766 | false |
rsunder10/PopularityBased-SearchEngine | lib/python3.4/site-packages/django/core/cache/backends/locmem.py | 586 | 4287 | "Thread-safe in-memory cache backend."
import time
from contextlib import contextmanager
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.utils.synch import RWLock
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# Global in-memory store of cache data. Keyed by name, to provide
# multiple named local memory caches.
_caches = {}
_expire_info = {}
_locks = {}
@contextmanager
def dummy():
"""A context manager that does nothing special."""
yield
class LocMemCache(BaseCache):
def __init__(self, name, params):
BaseCache.__init__(self, params)
self._cache = _caches.setdefault(name, {})
self._expire_info = _expire_info.setdefault(name, {})
self._lock = _locks.setdefault(name, RWLock())
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
if self._has_expired(key):
self._set(key, pickled, timeout)
return True
return False
def get(self, key, default=None, version=None, acquire_lock=True):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = None
with (self._lock.reader() if acquire_lock else dummy()):
if not self._has_expired(key):
pickled = self._cache[key]
if pickled is not None:
try:
return pickle.loads(pickled)
except pickle.PickleError:
return default
with (self._lock.writer() if acquire_lock else dummy()):
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return default
def _set(self, key, value, timeout=DEFAULT_TIMEOUT):
if len(self._cache) >= self._max_entries:
self._cull()
self._cache[key] = value
self._expire_info[key] = self.get_backend_timeout(timeout)
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
self._set(key, pickled, timeout)
def incr(self, key, delta=1, version=None):
with self._lock.writer():
value = self.get(key, version=version, acquire_lock=False)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
key = self.make_key(key, version=version)
pickled = pickle.dumps(new_value, pickle.HIGHEST_PROTOCOL)
self._cache[key] = pickled
return new_value
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.reader():
if not self._has_expired(key):
return True
with self._lock.writer():
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return False
def _has_expired(self, key):
exp = self._expire_info.get(key, -1)
if exp is None or exp > time.time():
return False
return True
def _cull(self):
if self._cull_frequency == 0:
self.clear()
else:
doomed = [k for (i, k) in enumerate(self._cache) if i % self._cull_frequency == 0]
for k in doomed:
self._delete(k)
def _delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
del self._expire_info[key]
except KeyError:
pass
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.writer():
self._delete(key)
def clear(self):
self._cache.clear()
self._expire_info.clear()
| mit | 8,109,650,493,505,624,000 | 30.291971 | 94 | 0.567996 | false |
dony71/OpenWrt_Backfire | scripts/dl_cleanup.py | 31 | 5878 | #!/usr/bin/env python
"""
# OpenWRT download directory cleanup utility.
# Delete all but the very last version of the program tarballs.
#
# Copyright (c) 2010 Michael Buesch <[email protected]>
"""
import sys
import os
import re
import getopt
# Commandline options
opt_dryrun = False
def parseVer_1234(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32) |\
(int(match.group(5)) << 16)
return (progname, progversion)
def parseVer_123(match, filepath):
progname = match.group(1)
try:
patchlevel = match.group(5)
except (IndexError), e:
patchlevel = None
if patchlevel:
patchlevel = ord(patchlevel[0])
else:
patchlevel = 0
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32) |\
patchlevel
return (progname, progversion)
def parseVer_12(match, filepath):
progname = match.group(1)
try:
patchlevel = match.group(4)
except (IndexError), e:
patchlevel = None
if patchlevel:
patchlevel = ord(patchlevel[0])
else:
patchlevel = 0
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
patchlevel
return (progname, progversion)
def parseVer_r(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64)
return (progname, progversion)
def parseVer_ymd(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32)
return (progname, progversion)
def parseVer_GIT(match, filepath):
progname = match.group(1)
st = os.stat(filepath)
progversion = int(st.st_mtime) << 64
return (progname, progversion)
extensions = (
".tar.gz",
".tar.bz2",
".orig.tar.gz",
".orig.tar.bz2",
".zip",
".tgz",
".tbz",
)
versionRegex = (
(re.compile(r"(.+)[-_]([0-9a-fA-F]{40,40})"), parseVer_GIT), # xxx-GIT_SHASUM
(re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)\.(\d+)"), parseVer_1234), # xxx-1.2.3.4
(re.compile(r"(.+)[-_](\d\d\d\d)-?(\d\d)-?(\d\d)"), parseVer_ymd), # xxx-YYYY-MM-DD
(re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)(\w?)"), parseVer_123), # xxx-1.2.3a
(re.compile(r"(.+)[-_](\d+)_(\d+)_(\d+)"), parseVer_123), # xxx-1_2_3
(re.compile(r"(.+)[-_](\d+)\.(\d+)(\w?)"), parseVer_12), # xxx-1.2a
(re.compile(r"(.+)[-_]r?(\d+)"), parseVer_r), # xxx-r1111
)
blacklist = [
("linux", re.compile(r"linux-.*")),
("gcc", re.compile(r"gcc-.*")),
("wl_apsta", re.compile(r"wl_apsta.*")),
(".fw", re.compile(r".*\.fw")),
(".arm", re.compile(r".*\.arm")),
(".bin", re.compile(r".*\.bin")),
("rt-firmware", re.compile(r"RT[\d\w]+_Firmware.*")),
]
class EntryParseError(Exception): pass
class Entry:
def __init__(self, directory, filename):
self.directory = directory
self.filename = filename
self.progname = ""
self.fileext = ""
for ext in extensions:
if filename.endswith(ext):
filename = filename[0:0-len(ext)]
self.fileext = ext
break
else:
print self.filename, "has an unknown file-extension"
raise EntryParseError("ext")
for (regex, parseVersion) in versionRegex:
match = regex.match(filename)
if match:
(self.progname, self.version) = parseVersion(
match, directory + "/" + filename + self.fileext)
break
else:
print self.filename, "has an unknown version pattern"
raise EntryParseError("ver")
def deleteFile(self):
path = (self.directory + "/" + self.filename).replace("//", "/")
print "Deleting", path
if not opt_dryrun:
os.unlink(path)
def __eq__(self, y):
return self.filename == y.filename
def __ge__(self, y):
return self.version >= y.version
def usage():
print "OpenWRT download directory cleanup utility"
print "Usage: " + sys.argv[0] + " [OPTIONS] <path/to/dl>"
print ""
print " -d|--dry-run Do a dry-run. Don't delete any files"
print " -B|--show-blacklist Show the blacklist and exit"
print " -w|--whitelist ITEM Remove ITEM from blacklist"
def main(argv):
global opt_dryrun
try:
(opts, args) = getopt.getopt(argv[1:],
"hdBw:",
[ "help", "dry-run", "show-blacklist", "whitelist=", ])
if len(args) != 1:
raise getopt.GetoptError()
except getopt.GetoptError:
usage()
return 1
directory = args[0]
for (o, v) in opts:
if o in ("-h", "--help"):
usage()
return 0
if o in ("-d", "--dry-run"):
opt_dryrun = True
if o in ("-w", "--whitelist"):
for i in range(0, len(blacklist)):
(name, regex) = blacklist[i]
if name == v:
del blacklist[i]
break
else:
print "Whitelist error: Item", v,\
"is not in blacklist"
return 1
if o in ("-B", "--show-blacklist"):
for (name, regex) in blacklist:
print name
return 0
# Create a directory listing and parse the file names.
entries = []
for filename in os.listdir(directory):
if filename == "." or filename == "..":
continue
for (name, regex) in blacklist:
if regex.match(filename):
if opt_dryrun:
print filename, "is blacklisted"
break
else:
try:
entries.append(Entry(directory, filename))
except (EntryParseError), e: pass
# Create a map of programs
progmap = {}
for entry in entries:
if entry.progname in progmap.keys():
progmap[entry.progname].append(entry)
else:
progmap[entry.progname] = [entry,]
# Traverse the program map and delete everything but the last version
for prog in progmap:
lastVersion = None
versions = progmap[prog]
for version in versions:
if lastVersion is None or version >= lastVersion:
lastVersion = version
if lastVersion:
for version in versions:
if version != lastVersion:
version.deleteFile()
if opt_dryrun:
print "Keeping", lastVersion.filename
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| gpl-2.0 | -7,498,248,065,641,068,000 | 25.00885 | 84 | 0.618067 | false |
weimingtom/python-for-android | python3-alpha/python3-src/Lib/plat-linux3/DLFCN.py | 171 | 1625 | # Generated by h2py from /usr/include/dlfcn.h
_DLFCN_H = 1
# Included from features.h
_FEATURES_H = 1
__USE_ANSI = 1
__FAVOR_BSD = 1
_ISOC99_SOURCE = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 199506
_XOPEN_SOURCE = 600
_XOPEN_SOURCE_EXTENDED = 1
_LARGEFILE64_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
__USE_ISOC99 = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 2
_POSIX_C_SOURCE = 199506
__USE_POSIX = 1
__USE_POSIX2 = 1
__USE_POSIX199309 = 1
__USE_POSIX199506 = 1
__USE_XOPEN = 1
__USE_XOPEN_EXTENDED = 1
__USE_UNIX98 = 1
_LARGEFILE_SOURCE = 1
__USE_XOPEN2K = 1
__USE_ISOC99 = 1
__USE_XOPEN_EXTENDED = 1
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_FILE_OFFSET64 = 1
__USE_MISC = 1
__USE_BSD = 1
__USE_SVID = 1
__USE_GNU = 1
__USE_REENTRANT = 1
__STDC_IEC_559__ = 1
__STDC_IEC_559_COMPLEX__ = 1
__STDC_ISO_10646__ = 200009
__GNU_LIBRARY__ = 6
__GLIBC__ = 2
__GLIBC_MINOR__ = 2
# Included from sys/cdefs.h
_SYS_CDEFS_H = 1
def __PMT(args): return args
def __P(args): return args
def __PMT(args): return args
def __STRING(x): return #x
__flexarr = []
__flexarr = [0]
__flexarr = []
__flexarr = [1]
def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
def __attribute__(xyz): return
def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
def __attribute_format_arg__(x): return
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_EXTERN_INLINES = 1
# Included from gnu/stubs.h
# Included from bits/dlfcn.h
RTLD_LAZY = 0x00001
RTLD_NOW = 0x00002
RTLD_BINDING_MASK = 0x3
RTLD_NOLOAD = 0x00004
RTLD_GLOBAL = 0x00100
RTLD_LOCAL = 0
RTLD_NODELETE = 0x01000
| apache-2.0 | 3,264,714,216,779,279,000 | 18.578313 | 76 | 0.652308 | false |
alexproca/askbot-devel | askbot/management/commands/askbot_rebuild_index.py | 9 | 4020 | import sys
from optparse import make_option
from django.core.management import get_commands, load_command_class
from django.utils.translation import activate as activate_language
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
try:
from haystack.management.commands.clear_index import Command as ClearCommand
from haystack.management.commands.update_index import Command as UpdateCommand
haystack_option_list = [option for option in UpdateCommand.base_options if option.get_opt_string() != '--verbosity'] + \
[option for option in ClearCommand.base_options if not option.get_opt_string() in ['--using', '--verbosity']]
except ImportError:
haystack_option_list = []
class Command(BaseCommand):
help = "Completely rebuilds the search index by removing the old data and then updating."
base_options = [make_option("-l", "--language", action="store", type="string", dest="language",
help='Language to user, in language code format'),]
option_list = list(BaseCommand.option_list) + haystack_option_list + base_options
def handle(self, *args, **options):
lang_code = options.get('language', settings.LANGUAGE_CODE.lower())
options['using'] = ['default_%s' % lang_code[:2],]
activate_language(lang_code)
klass = self._get_command_class('clear_index')
klass.handle(*args, **options)
klass = self._get_command_class('update_index')
klass.handle(*args, **options)
def _get_command_class(self, name):
try:
app_name = get_commands()[name]
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, name)
except KeyError:
raise CommandError("Unknown command: %r" % name)
return klass
def execute(self, *args, **options):
"""
Try to execute this command, performing model validation if
needed (as controlled by the attribute
``self.requires_model_validation``). If the command raises a
``CommandError``, intercept it and print it sensibly to
stderr.
"""
show_traceback = options.get('traceback', False)
if self.can_import_settings:
try:
#language part used to be here
pass
except ImportError, e:
# If settings should be available, but aren't,
# raise the error and quit.
if show_traceback:
traceback.print_exc()
else:
sys.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
try:
self.stdout = options.get('stdout', sys.stdout)
self.stderr = options.get('stderr', sys.stderr)
if self.requires_model_validation:
self.validate()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()) + '\n')
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD("COMMIT;") + '\n')
except CommandError, e:
if show_traceback:
traceback.print_exc()
else:
self.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
| gpl-3.0 | 7,014,667,089,417,214,000 | 42.695652 | 127 | 0.590796 | false |
oceanobservatories/mi-instrument | mi/dataset/parser/test/test_cg_stc_eng_stc.py | 5 | 16780 | #!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_cg_stc_eng_stc
@file marine-integrations/mi/dataset/parser/test/test_cg_stc_eng_stc.py
@author Mike Nicoletti
@brief Test code for a Cg_stc_eng_stc data parser
"""
import os
import re
import ntplib
from nose.plugins.attrib import attr
from mi.core.exceptions import SampleException
from mi.core.instrument.dataset_data_particle import DataParticleKey
from mi.core.log import get_logger
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.parser.cg_stc_eng_stc import CgStcEngStcParser, CgStcEngStcParserDataParticle
from mi.dataset.parser.cg_stc_eng_stc import CgStcEngStcParserDataParticleKey
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.driver.cg_stc_eng.stc.resource import RESOURCE_PATH
log = get_logger()
@attr('UNIT', group='mi')
class CgParserUnitTestCase(ParserUnitTestCase):
"""
Cg_stc_eng_stc Parser unit test suite
"""
def state_callback(self, state, file_ingested):
""" Call back method to watch what comes in via the position callback """
self.state_callback_value = state
self.file_ingested_value = file_ingested
def pub_callback(self, pub):
""" Call back method to watch what comes in via the publish callback """
self.publish_callback_value = pub
def exception_callback(self, exception):
""" Callback method to watch what comes in via the exception callback """
self.exception_callback_value = exception
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.cg_stc_eng_stc',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'CgStcEngStcParserDataParticle'
}
# Define test data particles and their associated timestamps which will be
# compared with returned results
fid = open(os.path.join(RESOURCE_PATH, 'stc_status.txt'))
data = fid.read()
fid.close()
utime_grp = re.search(r'Platform.utime=(.+?)(\r\n?|\n)', data)
self.timestamp_a = ntplib.system_to_ntp_time(float(utime_grp.group(1)))
self.particle_a = CgStcEngStcParserDataParticle(data,
internal_timestamp=self.timestamp_a,
preferred_timestamp=DataParticleKey.INTERNAL_TIMESTAMP)
self.comparison_list = [{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_PLATFORM_TIME,
DataParticleKey.VALUE: '2013/10/04 16:07:02.253'},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_PLATFORM_UTIME,
DataParticleKey.VALUE: 1380902822.253},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_C_GPS,
DataParticleKey.VALUE: 83},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_C_NTP,
DataParticleKey.VALUE: 0},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_C_PPS,
DataParticleKey.VALUE: 4},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_C_POWER_SYS,
DataParticleKey.VALUE: 0},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_C_SUPERV,
DataParticleKey.VALUE: 7},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_C_TELEM,
DataParticleKey.VALUE: 0},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_ERR_C_GPS,
DataParticleKey.VALUE: 1},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_ERR_C_PPS,
DataParticleKey.VALUE: 1},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_ERR_C_TELEM_SYS,
DataParticleKey.VALUE: 3},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_C_GPS,
DataParticleKey.VALUE: '***Warning, BAD GPS CHECKSUM'},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_C_PPS,
DataParticleKey.VALUE: 'C_PPS: Warning: Pulse delta [790] above warning level [500], still within window [900]'},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_C_TELEM_SYS,
DataParticleKey.VALUE: ' "***Error turning on fb1 [ret=No Device]'},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_CPU_UPTIME,
DataParticleKey.VALUE: '0 days 00:01:22'},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_CPU_LOAD1,
DataParticleKey.VALUE: 1.03},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_CPU_LOAD5,
DataParticleKey.VALUE: 0.36},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_CPU_LOAD15,
DataParticleKey.VALUE: 0.12},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MEMORY_RAM,
DataParticleKey.VALUE: 127460},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MEMORY_FREE,
DataParticleKey.VALUE: 93396},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_NPROC,
DataParticleKey.VALUE: 76},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_EFLAG,
DataParticleKey.VALUE: 0},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_MAIN_V,
DataParticleKey.VALUE: 17.90},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_MAIN_C,
DataParticleKey.VALUE: 379.20},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_BAT_V,
DataParticleKey.VALUE: 0.0},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_BAT_C,
DataParticleKey.VALUE: 0.0},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_TEMP1,
DataParticleKey.VALUE: 25.0},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_TEMP2,
DataParticleKey.VALUE: 23.3},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_HUMID,
DataParticleKey.VALUE: 31.6},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_PRESS,
DataParticleKey.VALUE: 14.7},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_GF_ENA,
DataParticleKey.VALUE: 15},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_GFLT1,
DataParticleKey.VALUE: 7.7},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_GFLT2,
DataParticleKey.VALUE: 5.2},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_GFLT3,
DataParticleKey.VALUE: 2.8},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_GFLT4,
DataParticleKey.VALUE: 4.0},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_LD_ENA,
DataParticleKey.VALUE: 3},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_GPS_DATE,
DataParticleKey.VALUE: 41013},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_GPS_TIME,
DataParticleKey.VALUE: 160701},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_GPS_LATSTR,
DataParticleKey.VALUE: '4132.1353 N'},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_GPS_LONSTR,
DataParticleKey.VALUE: '07038.8306 W'},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_GPS_LAT,
DataParticleKey.VALUE: 41.535588},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_GPS_LON,
DataParticleKey.VALUE: -70.647177},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_GPS_SPD,
DataParticleKey.VALUE: 0.0}]
# uncomment the following to write the above comparison list in yml format to a file
# self.write_comparison_to_yml()
self.file_ingested_value = None
self.state_callback_value = None
self.publish_callback_value = None
self.exception_callback_value = None
def write_comparison_to_yml(self):
"""
Helper class to create a yml file for driver tests
"""
fid = open('particle.yml', 'a')
fid.write('header:\n')
fid.write(' particle_object: CgStcEngStcParserDataParticle\n')
fid.write(' particle_type: cg_stc_eng_stc\n')
fid.write('data:\n')
fid.write(' - _index: 1\n')
fid.write(' internal_timestamp: 0.0\n')
for item in self.comparison_list:
if isinstance(item.get('value'), float):
fid.write(' %s: %16.20f\n' % (item.get('value_id'), item.get('value')))
else:
fid.write(' %s: %s\n' % (item.get('value_id'), item.get('value')))
fid.close()
def assert_result(self, result, particle, ingested):
if result[0].raw_data == particle.raw_data:
log.debug("raw data match")
log.debug("comparing result %s, particle %s", result[0].contents, particle.contents)
self.assertEqual(result, [particle])
self.assertEqual(self.file_ingested_value, ingested)
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], particle)
def test_simple(self):
"""
Read test data and pull out data particles one at a time.
Assert that the results are those we expected.
"""
stream_handle = open(os.path.join(RESOURCE_PATH, 'stc_status.txt'))
self.parser = CgStcEngStcParser(self.config, None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
result = self.parser.get_records(1)
self.assert_result(result, self.particle_a, True)
# no data left, do not move the position
result = self.parser.get_records(1)
self.assertEqual(result, [])
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], self.particle_a)
self.assertEqual(self.exception_callback_value, None)
def test_simple_particles(self):
"""
Read test data and pull out data particles one at a time.
Assert that the results are those we expected.
"""
stream_handle = open(os.path.join(RESOURCE_PATH, 'stc_status2.txt'))
self.parser = CgStcEngStcParser(self.config, None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
result = self.parser.get_records(1)
self.assert_particles(result, 'stc_first2.result.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, None)
def test_get_many(self):
"""
Read test data and try to pull out multiple data particles at one time,
but we should only get 1 .
Assert that the results are those we expected.
"""
stream_handle = open(os.path.join(RESOURCE_PATH, 'stc_status.txt'))
self.parser = CgStcEngStcParser(self.config, None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
result = self.parser.get_records(4)
self.assert_result(result, self.particle_a, True)
self.assertEqual(len(self.publish_callback_value), 1)
# no data left, do not move the position
result = self.parser.get_records(1)
self.assertEqual(result, [])
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], self.particle_a)
self.assertEqual(self.exception_callback_value, None)
def test_generate(self):
"""
Ensure we can generate the particle dictionary and compare it to expected ones
"""
stream_handle = open(os.path.join(RESOURCE_PATH, 'stc_status.txt'))
self.parser = CgStcEngStcParser(self.config, None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
result = self.parser.get_records(1)
res_dict = result[0].generate_dict()
# assert two lists of generated dictionaries are the same
for cdict in self.comparison_list:
for rdict in res_dict['values']:
if cdict.get('value_id') == rdict.get('value_id'):
if cdict.get('value') != rdict.get('value'):
log.error("mismatch for key %s, values '%s' '%s'", cdict.get('value_id'),
cdict.get('value'),
rdict.get('value'))
self.fail("mismatch for key %s, values '%s', '%s'" % (cdict.get('value_id'),
cdict.get('value'),
rdict.get('value')))
def test_bad_data(self):
"""
Ensure that the missing timestamp field causes a sample exception
"""
with self.assertRaises(SampleException):
stream_handle = open(os.path.join(RESOURCE_PATH, 'stc_status_missing_time.txt'))
self.parser = CgStcEngStcParser(self.config, None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
self.parser.get_records(1)
def test_encoding(self):
"""
Create an encoding error in the data and make sure an encoding error shows up
"""
stream_handle = open(os.path.join(RESOURCE_PATH, 'stc_status_bad_encode.txt'))
self.parser = CgStcEngStcParser(self.config, None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
result = self.parser.get_records(1)
errors = result[0].get_encoding_errors()
log.debug("encoding errors: %s", errors)
self.assertNotEqual(errors, [])
| bsd-2-clause | -5,599,959,587,224,635,000 | 57.466899 | 146 | 0.566329 | false |
foss-for-synopsys-dwc-arc-processors/RIOT | tests/lwip/tests/01-run.py | 15 | 11444 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 Martine Lenders <[email protected]>
#
# Distributed under terms of the MIT license.
from __future__ import print_function
import argparse
import os, sys
import random
import pexpect
import subprocess
import time
import types
DEFAULT_TIMEOUT = 5
class Strategy(object):
def __init__(self, func=None):
if func != None:
if sys.version_info < (3,):
self.__class__.execute = types.MethodType(func, self, self.__class__)
else:
self.__class__.execute = types.MethodType(func, self)
def execute(self, *args, **kwargs):
raise NotImplementedError()
class ApplicationStrategy(Strategy):
def __init__(self, app_dir=os.getcwd(), func=None):
super(ApplicationStrategy, self).__init__(func)
self.app_dir = app_dir
class BoardStrategy(Strategy):
def __init__(self, board, func=None):
super(BoardStrategy, self).__init__(func)
self.board = board
def __run_make(self, application, make_targets, env=None):
env = os.environ.copy()
if env != None:
env.update(env)
env.update(self.board.to_env())
cmd = ("make", "-C", application) + make_targets
print(' '.join(cmd))
print(subprocess.check_output(cmd, env=env))
def execute(self, application):
super(BoardStrategy, self).execute(application)
class CleanStrategy(BoardStrategy):
def execute(self, application, env=None):
super(CleanStrategy, self).__run_make(application, ("-B", "clean"), env)
class BuildStrategy(BoardStrategy):
def execute(self, application, env=None):
super(BuildStrategy, self).__run_make(application, ("all",), env)
class FlashStrategy(BoardStrategy):
def execute(self, application, env=None):
super(FlashStrategy, self).__run_make(application, ("all",), env)
class ResetStrategy(BoardStrategy):
def execute(self, application, env=None):
super(ResetStrategy, self).__run_make(application, ("reset",), env)
class Board(object):
def __init__(self, name, port=None, serial=None, clean=None,
build=None, flash=None,
reset=None, term=None):
def _reset_native_execute(obj, application, env=None, *args, **kwargs):
pass
if (name == "native") and (reset == None):
reset = _reset_native_execute
self.name = name
self.port = port
self.serial = serial
self.clean_strategy = CleanStrategy(self, clean)
self.build_strategy = BuildStrategy(self, build)
self.flash_strategy = FlashStrategy(self, flash)
self.reset_strategy = ResetStrategy(self, reset)
def __len__(self):
return 1
def __iter__(self):
return self
def next(self):
raise StopIteration()
def __repr__(self):
return ("<Board %s,port=%s,serial=%s>" %
(repr(self.name), repr(self.port), repr(self.serial)))
def to_env(self):
env = {}
if self.name:
env['BOARD'] = self.name
if self.port:
env['PORT'] = self.port
if self.serial:
env['SERIAL'] = self.serial
return env
def clean(self, application=os.getcwd(), env=None):
self.build_strategy.execute(application, env)
def build(self, application=os.getcwd(), env=None):
self.build_strategy.execute(application, env)
def flash(self, application=os.getcwd(), env=None):
self.flash_strategy.execute(application, env)
def reset(self, application=os.getcwd(), env=None):
self.reset_strategy.execute(application, env)
class BoardGroup(object):
def __init__(self, boards):
self.boards = boards
def __len__(self):
return len(self.boards)
def __iter__(self):
return iter(self.boards)
def __repr__(self):
return str(self.boards)
def clean(self, application=os.getcwd(), env=None):
for board in self.boards:
board.clean(application, env)
def build(self, application=os.getcwd(), env=None):
for board in self.boards:
board.build(application, env)
def flash(self, application=os.getcwd(), env=None):
for board in self.boards:
board.flash(application, env)
def reset(self, application=os.getcwd(), env=None):
for board in self.boards:
board.reset(application, env)
def default_test_case(board_group, application, env=None):
for board in board_group:
env = os.environ.copy()
if env != None:
env.update(env)
env.update(board.to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env,
timeout=DEFAULT_TIMEOUT,
logfile=sys.stdout) as spawn:
spawn.expect("TEST: SUCCESS")
class TestStrategy(ApplicationStrategy):
def execute(self, board_groups, test_cases=[default_test_case],
timeout=DEFAULT_TIMEOUT, env=None):
for board_group in board_groups:
print("Testing for %s: " % board_group)
for test_case in test_cases:
board_group.reset()
test_case(board_group, self.app_dir, env=None)
sys.stdout.write('.')
sys.stdout.flush()
print()
def get_ipv6_address(spawn):
spawn.sendline(u"ifconfig")
spawn.expect(u"[A-Za-z0-9]{2}_[0-9]+: inet6 (fe80::[0-9a-f:]+)")
return spawn.match.group(1)
def test_ipv6_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env != None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env != None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
ipprot = random.randint(0x00, 0xff)
receiver_ip = get_ipv6_address(receiver)
receiver.sendline(u"ip server start %d" % ipprot)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"ip send %s %d 01:23:45:67:89:ab:cd:ef" % (receiver_ip, ipprot))
sender.expect_exact(u"Success: send 8 byte over IPv6 to %s (next header: %d)" %
(receiver_ip, ipprot))
receiver.expect(u"00000000 01 23 45 67 89 AB CD EF")
def test_udpv6_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env != None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env != None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
port = random.randint(0x0000, 0xffff)
receiver_ip = get_ipv6_address(receiver)
receiver.sendline(u"udp server start %d" % port)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"udp send %s %d ab:cd:ef" % (receiver_ip, port))
sender.expect_exact(u"Success: send 3 byte over UDP to [%s]:%d" %
(receiver_ip, port))
receiver.expect(u"00000000 AB CD EF")
def test_tcpv6_send(board_group, application, env=None):
env_client = os.environ.copy()
if env != None:
env_client.update(env)
env_client.update(board_group.boards[0].to_env())
env_server = os.environ.copy()
if env != None:
env_server.update(env)
env_server.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_client,
timeout=DEFAULT_TIMEOUT) as client, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_server,
timeout=DEFAULT_TIMEOUT) as server:
port = random.randint(0x0000, 0xffff)
server_ip = get_ipv6_address(server)
client_ip = get_ipv6_address(client)
server.sendline(u"tcp server start %d" % port)
# wait for neighbor discovery to be done
time.sleep(5)
client.sendline(u"tcp connect %s %d" % (server_ip, port))
server.expect(u"TCP client \\[%s\\]:[0-9]+ connected" % client_ip)
client.sendline(u"tcp send affe:abe")
client.expect_exact(u"Success: send 4 byte over TCP to server")
server.expect(u"00000000 AF FE AB E0")
client.sendline(u"tcp disconnect")
client.sendline(u"tcp send affe:abe")
client.expect_exact(u"could not send")
def test_triple_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env != None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env != None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
udp_port = random.randint(0x0000, 0xffff)
tcp_port = random.randint(0x0000, 0xffff)
ipprot = random.randint(0x00, 0xff)
receiver_ip = get_ipv6_address(receiver)
sender_ip = get_ipv6_address(sender)
receiver.sendline(u"ip server start %d" % ipprot)
receiver.sendline(u"udp server start %d" % udp_port)
receiver.sendline(u"tcp server start %d" % tcp_port)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"udp send %s %d 01:23" % (receiver_ip, udp_port))
sender.expect_exact(u"Success: send 2 byte over UDP to [%s]:%d" %
(receiver_ip, udp_port))
receiver.expect(u"00000000 01 23")
sender.sendline(u"ip send %s %d 01:02:03:04" % (receiver_ip, ipprot))
sender.expect_exact(u"Success: send 4 byte over IPv6 to %s (next header: %d)" %
(receiver_ip, ipprot))
receiver.expect(u"00000000 01 02 03 04")
sender.sendline(u"tcp connect %s %d" % (receiver_ip, tcp_port))
receiver.expect(u"TCP client \\[%s\\]:[0-9]+ connected" % sender_ip)
sender.sendline(u"tcp send dead:beef")
sender.expect_exact(u"Success: send 4 byte over TCP to server")
receiver.expect(u"00000000 DE AD BE EF")
if __name__ == "__main__":
del os.environ['TERMFLAGS']
TestStrategy().execute([BoardGroup((Board("native", "tap0"), \
Board("native", "tap1")))], \
[test_ipv6_send, test_udpv6_send, test_tcpv6_send,
test_triple_send])
| lgpl-2.1 | 6,660,543,257,646,934,000 | 37.270903 | 89 | 0.595211 | false |
UstadMobile/exelearning-extjs5-mirror | twisted/plugins/twisted_trial.py | 14 | 1880 |
from zope.interface import implements
from twisted.python.components import backwardsCompatImplements
from twisted.trial.itrial import IReporter
from twisted.plugin import IPlugin
class _Reporter(object):
implements(IPlugin, IReporter)
def __init__(self, name, module, description, longOpt, shortOpt, klass):
self.name = name
self.module = module
self.description = description
self.longOpt = longOpt
self.shortOpt = shortOpt
self.klass = klass
backwardsCompatImplements(_Reporter)
Tree = _Reporter("Tree Reporter",
"twisted.trial.reporter",
description="verbose color output (default reporter)",
longOpt="verbose",
shortOpt="v",
klass="TreeReporter")
BlackAndWhite = _Reporter("Black-And-White Reporter",
"twisted.trial.reporter",
description="Colorless verbose output",
longOpt="bwverbose",
shortOpt="o",
klass="VerboseTextReporter")
Minimal = _Reporter("Minimal Reporter",
"twisted.trial.reporter",
description="minimal summary output",
longOpt="summary",
shortOpt="s",
klass="MinimalReporter")
Classic = _Reporter("Classic Reporter",
"twisted.trial.reporter",
description="terse text output",
longOpt="text",
shortOpt="t",
klass="TextReporter")
Timing = _Reporter("Timing Reporter",
"twisted.trial.reporter",
description="Timing output",
longOpt="timing",
shortOpt=None,
klass="TimingTextReporter")
| gpl-2.0 | 3,590,320,245,898,108,400 | 33.181818 | 76 | 0.543617 | false |
thomazs/geraldo | site/newsite/site-geraldo/django/db/models/__init__.py | 14 | 1245 | from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, ImproperlyConfigured
from django.db import connection
from django.db.models.loading import get_apps, get_app, get_models, get_model, register_models
from django.db.models.query import Q
from django.db.models.manager import Manager
from django.db.models.base import Model
from django.db.models.fields import *
from django.db.models.fields.subclassing import SubfieldBase
from django.db.models.fields.files import FileField, ImageField
from django.db.models.fields.related import ForeignKey, OneToOneField, ManyToManyField, ManyToOneRel, ManyToManyRel, OneToOneRel
from django.db.models import signals
# Admin stages.
ADD, CHANGE, BOTH = 1, 2, 3
def permalink(func):
"""
Decorator that calls urlresolvers.reverse() to return a URL using
parameters returned by the decorated function "func".
"func" should be a function that returns a tuple in one of the
following formats:
(viewname, viewargs)
(viewname, viewargs, viewkwargs)
"""
from django.core.urlresolvers import reverse
def inner(*args, **kwargs):
bits = func(*args, **kwargs)
return reverse(bits[0], None, *bits[1:3])
return inner
| lgpl-3.0 | -2,294,360,048,874,690,000 | 39.16129 | 128 | 0.75502 | false |
ycl2045/nova-master | nova/tests/api/openstack/compute/plugins/v3/test_agents.py | 19 | 13966 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack.compute.plugins.v3 import agents
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import test
fake_agents_list = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'id': 2},
{'hypervisor': 'xen', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx2',
'md5hash': 'add6bb58e139be103324d04d82d8f547',
'id': 3},
{'hypervisor': 'xen', 'os': 'win',
'architecture': 'power',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx3',
'md5hash': 'add6bb58e139be103324d04d82d8f548',
'id': 4},
]
def fake_agent_build_get_all(context, hypervisor):
agent_build_all = []
for agent in fake_agents_list:
if hypervisor and hypervisor != agent['hypervisor']:
continue
agent_build_ref = models.AgentBuild()
agent_build_ref.update(agent)
agent_build_all.append(agent_build_ref)
return agent_build_all
def fake_agent_build_update(context, agent_build_id, values):
pass
def fake_agent_build_destroy(context, agent_update_id):
pass
def fake_agent_build_create(context, values):
values['id'] = 1
agent_build_ref = models.AgentBuild()
agent_build_ref.update(values)
return agent_build_ref
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
GET = {}
class FakeRequestWithHypervisor(object):
environ = {"nova.context": context.get_admin_context()}
GET = {'hypervisor': 'kvm'}
def fake_agent_build_create_with_exited_agent(context, values):
raise exception.AgentBuildExists(**values)
class AgentsTest(test.NoDBTestCase):
def setUp(self):
super(AgentsTest, self).setUp()
self.stubs.Set(db, "agent_build_get_all",
fake_agent_build_get_all)
self.stubs.Set(db, "agent_build_update",
fake_agent_build_update)
self.stubs.Set(db, "agent_build_destroy",
fake_agent_build_destroy)
self.stubs.Set(db, "agent_build_create",
fake_agent_build_create)
self.context = context.get_admin_context()
self.controller = agents.AgentController()
def test_agents_create(self):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
response = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1}}
res_dict = self.controller.create(req, body=body)
self.assertEqual(res_dict, response)
self.assertEqual(self.controller.create.wsgi_code, 201)
def test_agents_create_with_existed_agent(self):
self.stubs.Set(db, 'agent_build_create',
fake_agent_build_create_with_exited_agent)
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exc.HTTPConflict, self.controller.create, req,
body=body)
def test_agents_create_without_md5hash(self):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx'}}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
def test_agents_create_without_url(self):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
def test_agents_create_without_version(self):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
def test_agents_create_without_architecture(self):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
def test_agents_create_without_os(self):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
def test_agents_create_without_hypervisor(self):
req = FakeRequest()
body = {'agent': {'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
def test_agents_create_with_wrong_type(self):
req = FakeRequest()
body = {'agent': None}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
def test_agents_create_with_empty_type(self):
req = FakeRequest()
body = {}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
def _test_agents_create_with_invalid_length(self, key):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
body['agent'][key] = 'x' * 256
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
def test_agents_create_with_invalid_length_hypervisor(self):
self._test_agents_create_with_invalid_length('hypervisor')
def test_agents_create_with_invalid_length_os(self):
self._test_agents_create_with_invalid_length('os')
def test_agents_create_with_invalid_length_architecture(self):
self._test_agents_create_with_invalid_length('architecture')
def test_agents_create_with_invalid_length_version(self):
self._test_agents_create_with_invalid_length('version')
def test_agents_create_with_invalid_length_url(self):
self._test_agents_create_with_invalid_length('url')
def test_agents_create_with_invalid_length_md5hash(self):
self._test_agents_create_with_invalid_length('md5hash')
def test_agents_delete(self):
req = FakeRequest()
self.controller.delete(req, 1)
def test_agents_list(self):
req = FakeRequest()
res_dict = self.controller.index(req)
agents_list = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'agent_id': 2},
{'hypervisor': 'xen', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx2',
'md5hash': 'add6bb58e139be103324d04d82d8f547',
'agent_id': 3},
{'hypervisor': 'xen', 'os': 'win',
'architecture': 'power',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx3',
'md5hash': 'add6bb58e139be103324d04d82d8f548',
'agent_id': 4},
]
self.assertEqual(res_dict, {'agents': agents_list})
def test_agents_list_with_hypervisor(self):
req = FakeRequestWithHypervisor()
res_dict = self.controller.index(req)
response = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'agent_id': 2},
]
self.assertEqual(res_dict, {'agents': response})
def test_agents_update(self):
req = FakeRequest()
body = {'agent': {'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
response = {'agent': {'agent_id': 1,
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
res_dict = self.controller.update(req, 1, body=body)
self.assertEqual(res_dict, response)
def test_agents_update_without_md5hash(self):
req = FakeRequest()
body = {'agent': {'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx'}}
self.assertRaises(exception.ValidationError, self.controller.update,
req, 1, body=body)
def test_agents_update_without_url(self):
req = FakeRequest()
body = {'agent': {'version': '7.0'}}
self.assertRaises(exception.ValidationError, self.controller.update,
req, 1, body=body)
def test_agents_update_without_version(self):
req = FakeRequest()
body = {'agent': {}}
self.assertRaises(exception.ValidationError, self.controller.update,
req, 1, body=body)
def test_agents_update_with_wrong_type(self):
req = FakeRequest()
body = {'agent': None}
self.assertRaises(exception.ValidationError, self.controller.update,
req, 1, body=body)
def test_agents_update_with_empty(self):
req = FakeRequest()
body = {}
self.assertRaises(exception.ValidationError, self.controller.update,
req, 1, body=body)
def _test_agents_update_with_invalid_length(self, key):
req = FakeRequest()
body = {'agent': {'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
body['agent'][key] = 'x' * 256
self.assertRaises(exception.ValidationError, self.controller.update,
req, 1, body=body)
def test_agents_update_with_invalid_length_version(self):
self._test_agents_update_with_invalid_length('version')
def test_agents_update_with_invalid_length_url(self):
self._test_agents_update_with_invalid_length('url')
def test_agents_update_with_invalid_length_md5hash(self):
self._test_agents_update_with_invalid_length('md5hash')
| apache-2.0 | 6,622,349,208,270,206,000 | 38.789174 | 78 | 0.532221 | false |
FTBZ/librenms | LibreNMS/service.py | 14 | 31186 | import LibreNMS
import json
import logging
import os
import subprocess
import threading
import sys
import time
import timeit
from datetime import timedelta
from logging import debug, info, warning, error, critical, exception
from platform import python_version
from time import sleep
from socket import gethostname
from signal import signal, SIGTERM
from uuid import uuid1
class PerformanceCounter(object):
"""
This is a simple counter to record execution time and number of jobs. It's unique to each
poller instance, so does not need to be globally syncronised, just locally.
"""
def __init__(self):
self._count = 0
self._jobs = 0
self._lock = threading.Lock()
def add(self, n):
"""
Add n to the counter and increment the number of jobs by 1
:param n: Number to increment by
"""
with self._lock:
self._count += n
self._jobs += 1
def split(self, precise=False):
"""
Return the current counter value and keep going
:param precise: Whether floating point precision is desired
:return: ((INT or FLOAT), INT)
"""
return (self._count if precise else int(self._count)), self._jobs
def reset(self, precise=False):
"""
Return the current counter value and then zero it.
:param precise: Whether floating point precision is desired
:return: ((INT or FLOAT), INT)
"""
with self._lock:
c = self._count
j = self._jobs
self._count = 0
self._jobs = 0
return (c if precise else int(c)), j
class TimeitContext(object):
"""
Wrapper around timeit to allow the timing of larger blocks of code by wrapping them in "with"
"""
def __init__(self):
self._t = timeit.default_timer()
def __enter__(self):
return self
def __exit__(self, *args):
del self._t
def delta(self):
"""
Calculate the elapsed time since the context was initialised
:return: FLOAT
"""
if not self._t:
raise ArithmeticError("Timer has not been started, cannot return delta")
return timeit.default_timer() - self._t
@classmethod
def start(cls):
"""
Factory method for TimeitContext
:param cls:
:return: TimeitContext
"""
return cls()
class ServiceConfig:
def __init__(self):
"""
Stores all of the configuration variables for the LibreNMS service in a common object
Starts with defaults, but can be populated with variables from config.php by calling populate()
"""
self._uuid = str(uuid1())
self.set_name(gethostname())
def set_name(self, name):
if name:
self.name = name.strip()
self.unique_name = "{}-{}".format(self.name, self._uuid)
class PollerConfig:
def __init__(self, workers, frequency, calculate=None):
self.workers = workers
self.frequency = frequency
self.calculate = calculate
# config variables with defaults
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
node_id = None
name = None
unique_name = None
single_instance = True
distributed = False
group = 0
debug = False
log_level = 20
alerting = PollerConfig(1, 60)
poller = PollerConfig(24, 300)
services = PollerConfig(8, 300)
discovery = PollerConfig(16, 21600)
billing = PollerConfig(2, 300, 60)
down_retry = 60
update_frequency = 86400
master_resolution = 1
master_timeout = 10
redis_host = 'localhost'
redis_port = 6379
redis_db = 0
redis_pass = None
redis_socket = None
db_host = 'localhost'
db_port = 0
db_socket = None
db_user = 'librenms'
db_pass = ''
db_name = 'librenms'
def populate(self):
config = self._get_config_data()
# populate config variables
self.node_id = os.getenv('NODE_ID')
self.set_name(config.get('distributed_poller_name', None))
self.distributed = config.get('distributed_poller', ServiceConfig.distributed)
self.group = ServiceConfig.parse_group(config.get('distributed_poller_group', ServiceConfig.group))
# backward compatible options
self.poller.workers = config.get('poller_service_workers', ServiceConfig.poller.workers)
self.poller.frequency = config.get('poller_service_poll_frequency', ServiceConfig.poller.frequency)
self.discovery.frequency = config.get('poller_service_discover_frequency', ServiceConfig.discovery.frequency)
self.down_retry = config.get('poller_service_down_retry', ServiceConfig.down_retry)
self.log_level = config.get('poller_service_loglevel', ServiceConfig.log_level)
# new options
self.poller.workers = config.get('service_poller_workers', ServiceConfig.poller.workers)
self.poller.frequency = config.get('service_poller_frequency', ServiceConfig.poller.frequency)
self.services.workers = config.get('service_services_workers', ServiceConfig.services.workers)
self.services.frequency = config.get('service_services_frequency', ServiceConfig.services.frequency)
self.discovery.workers = config.get('service_discovery_workers', ServiceConfig.discovery.workers)
self.discovery.frequency = config.get('service_discovery_frequency', ServiceConfig.discovery.frequency)
self.billing.frequency = config.get('service_billing_frequency', ServiceConfig.billing.frequency)
self.billing.calculate = config.get('service_billing_calculate_frequency', ServiceConfig.billing.calculate)
self.down_retry = config.get('service_poller_down_retry', ServiceConfig.down_retry)
self.log_level = config.get('service_loglevel', ServiceConfig.log_level)
self.update_frequency = config.get('service_update_frequency', ServiceConfig.update_frequency)
self.redis_host = os.getenv('REDIS_HOST', config.get('redis_host', ServiceConfig.redis_host))
self.redis_db = os.getenv('REDIS_DB', config.get('redis_db', ServiceConfig.redis_db))
self.redis_pass = os.getenv('REDIS_PASSWORD', config.get('redis_pass', ServiceConfig.redis_pass))
self.redis_port = int(os.getenv('REDIS_PORT', config.get('redis_port', ServiceConfig.redis_port)))
self.redis_socket = os.getenv('REDIS_SOCKET', config.get('redis_socket', ServiceConfig.redis_socket))
self.db_host = os.getenv('DB_HOST', config.get('db_host', ServiceConfig.db_host))
self.db_name = os.getenv('DB_DATABASE', config.get('db_name', ServiceConfig.db_name))
self.db_pass = os.getenv('DB_PASSWORD', config.get('db_pass', ServiceConfig.db_pass))
self.db_port = int(os.getenv('DB_PORT', config.get('db_port', ServiceConfig.db_port)))
self.db_socket = os.getenv('DB_SOCKET', config.get('db_socket', ServiceConfig.db_socket))
self.db_user = os.getenv('DB_USERNAME', config.get('db_user', ServiceConfig.db_user))
# set convenient debug variable
self.debug = logging.getLogger().isEnabledFor(logging.DEBUG)
if not self.debug and self.log_level:
try:
logging.getLogger().setLevel(self.log_level)
except ValueError:
error("Unknown log level {}, must be one of 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'".format(self.log_level))
logging.getLogger().setLevel(logging.INFO)
def _get_config_data(self):
try:
import dotenv
env_path = "{}/.env".format(self.BASE_DIR)
info("Attempting to load .env from '%s'", env_path)
dotenv.load_dotenv(dotenv_path=env_path, verbose=True)
if not os.getenv('NODE_ID'):
raise ImportError(".env does not contain a valid NODE_ID setting.")
except ImportError as e:
exception("Could not import .env - check that the poller user can read the file, and that composer install has been run recently")
sys.exit(3)
config_cmd = ['/usr/bin/env', 'php', '{}/config_to_json.php'.format(self.BASE_DIR), '2>&1']
try:
return json.loads(subprocess.check_output(config_cmd).decode())
except subprocess.CalledProcessError as e:
error("ERROR: Could not load or parse configuration! {}: {}"
.format(subprocess.list2cmdline(e.cmd), e.output.decode()))
@staticmethod
def parse_group(g):
if g is None:
return [0]
elif type(g) is int:
return [g]
elif type(g) is str:
try:
return [int(x) for x in set(g.split(','))]
except ValueError:
pass
error("Could not parse group string, defaulting to 0")
return [0]
class Service:
config = ServiceConfig()
_fp = False
_started = False
alerting_manager = None
poller_manager = None
discovery_manager = None
services_manager = None
billing_manager = None
last_poll = {}
terminate_flag = False
def __init__(self):
self.config.populate()
threading.current_thread().name = self.config.name # rename main thread
self.attach_signals()
# init database connections different ones for different threads
self._db = LibreNMS.DB(self.config) # main
self._services_db = LibreNMS.DB(self.config) # services dispatch
self._discovery_db = LibreNMS.DB(self.config) # discovery dispatch
self._lm = self.create_lock_manager()
self.daily_timer = LibreNMS.RecurringTimer(self.config.update_frequency, self.run_maintenance, 'maintenance')
self.stats_timer = LibreNMS.RecurringTimer(self.config.poller.frequency, self.log_performance_stats, 'performance')
self.is_master = False
self.performance_stats = {'poller': PerformanceCounter(), 'discovery': PerformanceCounter(), 'services': PerformanceCounter()}
def attach_signals(self):
info("Attaching signal handlers on thread %s", threading.current_thread().name)
signal(SIGTERM, self.terminate) # capture sigterm and exit gracefully
def start(self):
debug("Performing startup checks...")
if self.config.single_instance:
self.check_single_instance() # don't allow more than one service at a time
if self._started:
raise RuntimeWarning("Not allowed to start Poller twice")
self._started = True
debug("Starting up queue managers...")
# initialize and start the worker pools
self.poller_manager = LibreNMS.QueueManager(self.config, 'poller', self.poll_device)
self.alerting_manager = LibreNMS.TimedQueueManager(self.config, 'alerting', self.poll_alerting,
self.dispatch_alerting)
self.services_manager = LibreNMS.TimedQueueManager(self.config, 'services', self.poll_services,
self.dispatch_services)
self.discovery_manager = LibreNMS.TimedQueueManager(self.config, 'discovery', self.discover_device,
self.dispatch_discovery)
self.billing_manager = LibreNMS.BillingQueueManager(self.config, self.poll_billing,
self.dispatch_poll_billing, self.dispatch_calculate_billing)
self.daily_timer.start()
self.stats_timer.start()
info("LibreNMS Service: {} started!".format(self.config.unique_name))
info("Poller group {}. Using Python {} and {} locks and queues"
.format('0 (default)' if self.config.group == [0] else self.config.group, python_version(),
'redis' if isinstance(self._lm, LibreNMS.RedisLock) else 'internal'))
info("Maintenance tasks will be run every {}".format(timedelta(seconds=self.config.update_frequency)))
# Main dispatcher loop
try:
while not self.terminate_flag:
master_lock = self._lm.lock('dispatch.master', self.config.unique_name, self.config.master_timeout, True)
if master_lock:
if not self.is_master:
info("{} is now the master dispatcher".format(self.config.name))
self.is_master = True
self.start_dispatch_timers()
devices = self.fetch_immediate_device_list()
for device in devices:
device_id = device[0]
group = device[1]
if device[2]: # polling
self.dispatch_immediate_polling(device_id, group)
if device[3]: # discovery
self.dispatch_immediate_discovery(device_id, group)
else:
if self.is_master:
info("{} is no longer the master dispatcher".format(self.config.name))
self.stop_dispatch_timers()
self.is_master = False # no longer master
sleep(self.config.master_resolution)
except KeyboardInterrupt:
pass
info("Dispatch loop terminated")
self.shutdown()
# ------------ Discovery ------------
def dispatch_immediate_discovery(self, device_id, group):
if self.discovery_manager.get_queue(group).empty() and not self.discovery_is_locked(device_id):
self.discovery_manager.post_work(device_id, group)
def dispatch_discovery(self):
devices = self.fetch_device_list()
for device in devices:
self.discovery_manager.post_work(device[0], device[1])
def discover_device(self, device_id):
if self.lock_discovery(device_id):
try:
with TimeitContext.start() as t:
info("Discovering device {}".format(device_id))
self.call_script('discovery.php', ('-h', device_id))
info('Discovery complete {}'.format(device_id))
self.report_execution_time(t.delta(), 'discovery')
except subprocess.CalledProcessError as e:
if e.returncode == 5:
info("Device {} is down, cannot discover, waiting {}s for retry"
.format(device_id, self.config.down_retry))
self.lock_discovery(device_id, True)
else:
self.unlock_discovery(device_id)
else:
self.unlock_discovery(device_id)
# ------------ Alerting ------------
def dispatch_alerting(self):
self.alerting_manager.post_work('alerts', 0)
def poll_alerting(self, _=None):
try:
info("Checking alerts")
self.call_script('alerts.php')
except subprocess.CalledProcessError as e:
if e.returncode == 1:
warning("There was an error issuing alerts: {}".format(e.output))
else:
raise
# ------------ Services ------------
def dispatch_services(self):
devices = self.fetch_services_device_list()
for device in devices:
self.services_manager.post_work(device[0], device[1])
def poll_services(self, device_id):
if self.lock_services(device_id):
try:
with TimeitContext.start() as t:
info("Checking services on device {}".format(device_id))
self.call_script('check-services.php', ('-h', device_id))
info('Services complete {}'.format(device_id))
self.report_execution_time(t.delta(), 'services')
except subprocess.CalledProcessError as e:
if e.returncode == 5:
info("Device {} is down, cannot poll service, waiting {}s for retry"
.format(device_id, self.config.down_retry))
self.lock_services(device_id, True)
else:
self.unlock_services(device_id)
else:
self.unlock_services(device_id)
# ------------ Billing ------------
def dispatch_calculate_billing(self):
self.billing_manager.post_work('calculate', 0)
def dispatch_poll_billing(self):
self.billing_manager.post_work('poll', 0)
def poll_billing(self, run_type):
if run_type == 'poll':
info("Polling billing")
self.call_script('poll-billing.php')
info("Polling billing complete")
else: # run_type == 'calculate'
info("Calculating billing")
self.call_script('billing-calculate.php')
info("Calculating billing complete")
# ------------ Polling ------------
def dispatch_immediate_polling(self, device_id, group):
if self.poller_manager.get_queue(group).empty() and not self.polling_is_locked(device_id):
self.poller_manager.post_work(device_id, group)
if self.config.debug:
cur_time = time.time()
elapsed = cur_time - self.last_poll.get(device_id, cur_time)
self.last_poll[device_id] = time.time()
# arbitrary limit to reduce spam
if elapsed > (self.config.poller.frequency - self.config.master_resolution):
debug("Dispatching polling for device {}, time since last poll {:.2f}s"
.format(device_id, elapsed))
def poll_device(self, device_id):
if self.lock_polling(device_id):
info('Polling device {}'.format(device_id))
try:
with TimeitContext.start() as t:
self.call_script('poller.php', ('-h', device_id))
self.report_execution_time(t.delta(), 'poller')
except subprocess.CalledProcessError as e:
if e.returncode == 6:
warning('Polling device {} unreachable, waiting {}s for retry'.format(device_id, self.config.down_retry))
# re-lock to set retry timer
self.lock_polling(device_id, True)
else:
error('Polling device {} failed! {}'.format(device_id, e))
self.unlock_polling(device_id)
else:
info('Polling complete {}'.format(device_id))
# self.polling_unlock(device_id)
else:
debug('Tried to poll {}, but it is locked'.format(device_id))
def fetch_services_device_list(self):
return self._services_db.query("SELECT DISTINCT(`device_id`), `poller_group` FROM `services`"
" LEFT JOIN `devices` USING (`device_id`) WHERE `disabled`=0")
def fetch_device_list(self):
return self._discovery_db.query("SELECT `device_id`, `poller_group` FROM `devices` WHERE `disabled`=0")
def fetch_immediate_device_list(self):
poller_find_time = self.config.poller.frequency - 1
discovery_find_time = self.config.discovery.frequency - 1
return self._db.query('''SELECT `device_id`,
`poller_group`,
COALESCE(`last_polled` <= DATE_ADD(DATE_ADD(NOW(), INTERVAL -%s SECOND), INTERVAL `last_polled_timetaken` SECOND), 1) AS `poll`,
COALESCE(`last_discovered` <= DATE_ADD(DATE_ADD(NOW(), INTERVAL -%s SECOND), INTERVAL `last_discovered_timetaken` SECOND), 1) AS `discover`
FROM `devices`
WHERE `disabled` = 0 AND (
`last_polled` IS NULL OR
`last_discovered` IS NULL OR
`last_polled` <= DATE_ADD(DATE_ADD(NOW(), INTERVAL -%s SECOND), INTERVAL `last_polled_timetaken` SECOND) OR
`last_discovered` <= DATE_ADD(DATE_ADD(NOW(), INTERVAL -%s SECOND), INTERVAL `last_discovered_timetaken` SECOND)
)
ORDER BY `last_polled_timetaken` DESC''', (poller_find_time, discovery_find_time, poller_find_time, discovery_find_time))
def run_maintenance(self):
"""
Runs update and cleanup tasks by calling daily.sh. Reloads the python script after the update.
Sets a schema-update lock so no distributed pollers will update until the schema has been updated.
"""
attempt = 0
wait = 5
max_runtime = 86100
max_tries = int(max_runtime / wait)
info("Waiting for schema lock")
while not self._lm.lock('schema-update', self.config.unique_name, max_runtime):
attempt += 1
if attempt >= max_tries: # don't get stuck indefinitely
warning('Reached max wait for other pollers to update, updating now')
break
sleep(wait)
info("Running maintenance tasks")
output = self.call_script('daily.sh')
info("Maintenance tasks complete\n{}".format(output))
self.restart()
# Lock Helpers #
def lock_discovery(self, device_id, retry=False):
lock_name = self.gen_lock_name('discovery', device_id)
timeout = self.config.down_retry if retry else LibreNMS.normalize_wait(self.config.discovery.frequency)
return self._lm.lock(lock_name, self.gen_lock_owner(), timeout, retry)
def unlock_discovery(self, device_id):
lock_name = self.gen_lock_name('discovery', device_id)
return self._lm.unlock(lock_name, self.gen_lock_owner())
def discovery_is_locked(self, device_id):
lock_name = self.gen_lock_name('discovery', device_id)
return self._lm.check_lock(lock_name)
def lock_polling(self, device_id, retry=False):
lock_name = self.gen_lock_name('polling', device_id)
timeout = self.config.down_retry if retry else self.config.poller.frequency
return self._lm.lock(lock_name, self.gen_lock_owner(), timeout, retry)
def unlock_polling(self, device_id):
lock_name = self.gen_lock_name('polling', device_id)
return self._lm.unlock(lock_name, self.gen_lock_owner())
def polling_is_locked(self, device_id):
lock_name = self.gen_lock_name('polling', device_id)
return self._lm.check_lock(lock_name)
def lock_services(self, device_id, retry=False):
lock_name = self.gen_lock_name('services', device_id)
timeout = self.config.down_retry if retry else self.config.services.frequency
return self._lm.lock(lock_name, self.gen_lock_owner(), timeout, retry)
def unlock_services(self, device_id):
lock_name = self.gen_lock_name('services', device_id)
return self._lm.unlock(lock_name, self.gen_lock_owner())
def services_is_locked(self, device_id):
lock_name = self.gen_lock_name('services', device_id)
return self._lm.check_lock(lock_name)
@staticmethod
def gen_lock_name(lock_class, device_id):
return '{}.device.{}'.format(lock_class, device_id)
def gen_lock_owner(self):
return "{}-{}".format(self.config.unique_name, threading.current_thread().name)
def call_script(self, script, args=()):
"""
Run a LibreNMS script. Captures all output and throws an exception if a non-zero
status is returned. Blocks parent signals (like SIGINT and SIGTERM).
:param script: the name of the executable relative to the base directory
:param args: a tuple of arguments to send to the command
:returns the output of the command
"""
if script.endswith('.php'):
# save calling the sh process
base = ('/usr/bin/env', 'php')
else:
base = ()
cmd = base + ("{}/{}".format(self.config.BASE_DIR, script),) + tuple(map(str, args))
# preexec_fn=os.setsid here keeps process signals from propagating
return subprocess.check_output(cmd, stderr=subprocess.STDOUT, preexec_fn=os.setsid, close_fds=True).decode()
def create_lock_manager(self):
"""
Create a new LockManager. Tries to create a Redis LockManager, but falls
back to python's internal threading lock implementation.
Exits if distributing poller is enabled and a Redis LockManager cannot be created.
:return: Instance of LockManager
"""
try:
return LibreNMS.RedisLock(namespace='librenms.lock',
host=self.config.redis_host,
port=self.config.redis_port,
db=self.config.redis_db,
password=self.config.redis_pass,
unix_socket_path=self.config.redis_socket)
except ImportError:
if self.config.distributed:
critical("ERROR: Redis connection required for distributed polling")
critical("Please install redis-py, either through your os software repository or from PyPI")
sys.exit(2)
except Exception as e:
if self.config.distributed:
critical("ERROR: Redis connection required for distributed polling")
critical("Could not connect to Redis. {}".format(e))
sys.exit(2)
return LibreNMS.ThreadingLock()
def restart(self):
"""
Stop then recreate this entire process by re-calling the original script.
Has the effect of reloading the python files from disk.
"""
if sys.version_info < (3, 4, 0):
warning("Skipping restart as running under an incompatible interpreter")
warning("Please restart manually")
return
info('Restarting service... ')
self._stop_managers_and_wait()
self._lm.unlock('dispatch.master', self.config.unique_name)
python = sys.executable
os.execl(python, python, *sys.argv)
def terminate(self, _unused=None, _=None):
"""
Handle a set the terminate flag to begin a clean shutdown
:param _unused:
:param _:
"""
info("Received SIGTERM on thead %s, handling", threading.current_thread().name)
self.terminate_flag = True
def shutdown(self, _unused=None, _=None):
"""
Stop and exit, waiting for all child processes to exit.
:param _unused:
:param _:
"""
info('Shutting down, waiting for running jobs to complete...')
self.stop_dispatch_timers()
self._lm.unlock('dispatch.master', self.config.unique_name)
self.daily_timer.stop()
self.stats_timer.stop()
self._stop_managers_and_wait()
# try to release master lock
info('Shutdown of %s/%s complete', os.getpid(), threading.current_thread().name)
sys.exit(0)
def start_dispatch_timers(self):
"""
Start all dispatch timers and begin pushing events into queues.
This should only be started when we are the master dispatcher.
"""
self.alerting_manager.start_dispatch()
self.billing_manager.start_dispatch()
self.services_manager.start_dispatch()
self.discovery_manager.start_dispatch()
def stop_dispatch_timers(self):
"""
Stop all dispatch timers, this should be called when we are no longer the master dispatcher.
"""
self.alerting_manager.stop_dispatch()
self.billing_manager.stop_dispatch()
self.services_manager.stop_dispatch()
self.discovery_manager.stop_dispatch()
def _stop_managers_and_wait(self):
"""
Stop all QueueManagers, and wait for their processing threads to complete.
We send the stop signal to all QueueManagers first, then wait for them to finish.
"""
self.discovery_manager.stop()
self.poller_manager.stop()
self.services_manager.stop()
self.billing_manager.stop()
self.discovery_manager.stop_and_wait()
self.poller_manager.stop_and_wait()
self.services_manager.stop_and_wait()
self.billing_manager.stop_and_wait()
def check_single_instance(self):
"""
Check that there is only one instance of the service running on this computer.
We do this be creating a file in the base directory (.lock.service) if it doesn't exist and
obtaining an exclusive lock on that file.
"""
lock_file = "{}/{}".format(self.config.BASE_DIR, '.lock.service')
import fcntl
self._fp = open(lock_file, 'w') # keep a reference so the file handle isn't garbage collected
self._fp.flush()
try:
fcntl.lockf(self._fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
warning("Another instance is already running, quitting.")
exit(2)
def report_execution_time(self, time, activity):
self.performance_stats[activity].add(time)
def log_performance_stats(self):
info("Counting up time spent polling")
try:
# Report on the poller instance as a whole
self._db.query('INSERT INTO poller_cluster(node_id, poller_name, poller_version, poller_groups, last_report, master) '
'values("{0}", "{1}", "{2}", "{3}", NOW(), {4}) '
'ON DUPLICATE KEY UPDATE poller_version="{2}", poller_groups="{3}", last_report=NOW(), master={4}; '
.format(self.config.node_id, self.config.name, "librenms-service", ','.join(str(g) for g in self.config.group), 1 if self.is_master else 0))
# Find our ID
self._db.query('SELECT id INTO @parent_poller_id FROM poller_cluster WHERE node_id="{0}"; '.format(self.config.node_id))
for worker_type, counter in self.performance_stats.items():
worker_seconds, devices = counter.reset()
# Record the queue state
self._db.query('INSERT INTO poller_cluster_stats(parent_poller, poller_type, depth, devices, worker_seconds, workers, frequency) '
'values(@parent_poller_id, "{0}", {1}, {2}, {3}, {4}, {5}) '
'ON DUPLICATE KEY UPDATE depth={1}, devices={2}, worker_seconds={3}, workers={4}, frequency={5}; '
.format(worker_type,
sum([getattr(self, ''.join([worker_type, '_manager'])).get_queue(group).qsize() for group in self.config.group]),
devices,
worker_seconds,
getattr(self.config, worker_type).workers,
getattr(self.config, worker_type).frequency)
)
except Exception:
exception("Unable to log performance statistics - is the database still online?")
| gpl-3.0 | 919,063,731,641,398,900 | 41.837912 | 167 | 0.595427 | false |
mmp2/megaman | doc/sphinxext/numpy_ext/edit_on_github.py | 11 | 5895 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This extension makes it easy to edit documentation on github.
It adds links associated with each docstring that go to the
corresponding view source page on Github. From there, the user can
push the "Edit" button, edit the docstring, and submit a pull request.
It has the following configuration options (to be set in the project's
``conf.py``):
* ``edit_on_github_project``
The name of the github project, in the form
"username/projectname".
* ``edit_on_github_branch``
The name of the branch to edit. If this is a released version,
this should be a git tag referring to that version. For a
dev version, it often makes sense for it to be "master". It
may also be a git hash.
* ``edit_on_github_source_root``
The location within the source tree of the root of the
Python package. Defaults to "lib".
* ``edit_on_github_doc_root``
The location within the source tree of the root of the
documentation source. Defaults to "doc", but it may make sense to
set it to "doc/source" if the project uses a separate source
directory.
* ``edit_on_github_docstring_message``
The phrase displayed in the links to edit a docstring. Defaults
to "[edit on github]".
* ``edit_on_github_page_message``
The phrase displayed in the links to edit a RST page. Defaults
to "[edit this page on github]".
* ``edit_on_github_help_message``
The phrase displayed as a tooltip on the edit links. Defaults to
"Push the Edit button on the next page"
* ``edit_on_github_skip_regex``
When the path to the .rst file matches this regular expression,
no "edit this page on github" link will be added. Defaults to
``"_.*"``.
"""
import inspect
import os
import re
import sys
from docutils import nodes
from sphinx import addnodes
def import_object(modname, name):
"""
Import the object given by *modname* and *name* and return it.
If not found, or the import fails, returns None.
"""
try:
__import__(modname)
mod = sys.modules[modname]
obj = mod
for part in name.split('.'):
obj = getattr(obj, part)
return obj
except:
return None
def get_url_base(app):
return 'http://github.com/%s/tree/%s/' % (
app.config.edit_on_github_project,
app.config.edit_on_github_branch)
def doctree_read(app, doctree):
# Get the configuration parameters
if app.config.edit_on_github_project == 'REQUIRED':
raise ValueError(
"The edit_on_github_project configuration variable must be "
"provided in the conf.py")
source_root = app.config.edit_on_github_source_root
url = get_url_base(app)
docstring_message = app.config.edit_on_github_docstring_message
# Handle the docstring-editing links
for objnode in doctree.traverse(addnodes.desc):
if objnode.get('domain') != 'py':
continue
names = set()
for signode in objnode:
if not isinstance(signode, addnodes.desc_signature):
continue
modname = signode.get('module')
if not modname:
continue
fullname = signode.get('fullname')
if fullname in names:
# only one link per name, please
continue
names.add(fullname)
obj = import_object(modname, fullname)
anchor = None
if obj is not None:
try:
lines, lineno = inspect.getsourcelines(obj)
except:
pass
else:
anchor = '#L%d' % lineno
if anchor:
real_modname = inspect.getmodule(obj).__name__
path = '%s%s%s.py%s' % (
url, source_root, real_modname.replace('.', '/'), anchor)
onlynode = addnodes.only(expr='html')
onlynode += nodes.reference(
reftitle=app.config.edit_on_github_help_message,
refuri=path)
onlynode[0] += nodes.inline(
'', '', nodes.raw('', ' ', format='html'),
nodes.Text(docstring_message),
classes=['edit-on-github', 'viewcode-link'])
signode += onlynode
def html_page_context(app, pagename, templatename, context, doctree):
if (templatename == 'page.html' and
not re.match(app.config.edit_on_github_skip_regex, pagename)):
doc_root = app.config.edit_on_github_doc_root
if doc_root != '' and not doc_root.endswith('/'):
doc_root += '/'
doc_path = os.path.relpath(doctree.get('source'), app.builder.srcdir)
url = get_url_base(app)
page_message = app.config.edit_on_github_page_message
context['edit_on_github'] = url + doc_root + doc_path
context['edit_on_github_page_message'] = (
app.config.edit_on_github_page_message)
def setup(app):
app.add_config_value('edit_on_github_project', 'REQUIRED', True)
app.add_config_value('edit_on_github_branch', 'master', True)
app.add_config_value('edit_on_github_source_root', 'lib', True)
app.add_config_value('edit_on_github_doc_root', 'doc', True)
app.add_config_value('edit_on_github_docstring_message',
'[edit on github]', True)
app.add_config_value('edit_on_github_page_message',
'Edit This Page on Github', True)
app.add_config_value('edit_on_github_help_message',
'Push the Edit button on the next page', True)
app.add_config_value('edit_on_github_skip_regex',
'_.*', True)
app.connect('doctree-read', doctree_read)
app.connect('html-page-context', html_page_context)
| bsd-2-clause | -4,112,364,382,127,077,000 | 34.727273 | 77 | 0.604241 | false |
mbareta/edx-platform-ft | common/lib/xmodule/xmodule/modulestore/mixed.py | 10 | 43011 | """
MixedModuleStore allows for aggregation between multiple modulestores.
In this way, courses can be served up via either SplitMongoModuleStore or MongoModuleStore.
"""
import logging
from contextlib import contextmanager
import itertools
import functools
from contracts import contract, new_contract
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, AssetKey
from opaque_keys.edx.locator import LibraryLocator
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.assetstore import AssetMetadata
from . import ModuleStoreWriteBase, ModuleStoreEnum, XMODULE_FIELDS_WITH_USAGE_KEYS
from .exceptions import ItemNotFoundError, DuplicateCourseError
from .draft_and_published import ModuleStoreDraftAndPublished
from .split_migrator import SplitMigrator
new_contract('CourseKey', CourseKey)
new_contract('AssetKey', AssetKey)
new_contract('AssetMetadata', AssetMetadata)
new_contract('LibraryLocator', LibraryLocator)
new_contract('long', long)
log = logging.getLogger(__name__)
def strip_key(func):
"""
A decorator for stripping version and branch information from return values that are, or contain, UsageKeys or
CourseKeys.
Additionally, the decorated function is called with an optional 'field_decorator' parameter that can be used
to strip any location(-containing) fields, which are not directly returned by the function.
The behavior can be controlled by passing 'remove_version' and 'remove_branch' booleans to the decorated
function's kwargs.
"""
@functools.wraps(func)
def inner(*args, **kwargs):
"""
Supported kwargs:
remove_version - If True, calls 'version_agnostic' on all return values, including those in lists and dicts.
remove_branch - If True, calls 'for_branch(None)' on all return values, including those in lists and dicts.
Note: The 'field_decorator' parameter passed to the decorated function is a function that honors the
values of these kwargs.
"""
# remove version and branch, by default
rem_vers = kwargs.pop('remove_version', True)
rem_branch = kwargs.pop('remove_branch', True)
# helper function for stripping individual values
def strip_key_func(val):
"""
Strips the version and branch information according to the settings of rem_vers and rem_branch.
Recursively calls this function if the given value has a 'location' attribute.
"""
retval = val
if rem_vers and hasattr(retval, 'version_agnostic'):
retval = retval.version_agnostic()
if rem_branch and hasattr(retval, 'for_branch'):
retval = retval.for_branch(None)
for field_name in XMODULE_FIELDS_WITH_USAGE_KEYS:
if hasattr(retval, field_name):
setattr(retval, field_name, strip_key_func(getattr(retval, field_name)))
return retval
# function for stripping both, collection of, and individual, values
def strip_key_collection(field_value):
"""
Calls strip_key_func for each element in the given value.
"""
if rem_vers or rem_branch:
if isinstance(field_value, list):
field_value = [strip_key_func(fv) for fv in field_value]
elif isinstance(field_value, dict):
for key, val in field_value.iteritems():
field_value[key] = strip_key_func(val)
else:
field_value = strip_key_func(field_value)
return field_value
# call the decorated function
retval = func(field_decorator=strip_key_collection, *args, **kwargs)
# strip the return value
return strip_key_collection(retval)
return inner
def prepare_asides(func):
"""
A decorator to handle optional asides param
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""
Supported kwargs:
asides - list with connected asides data for the passed block
"""
if 'asides' in kwargs:
kwargs['asides'] = prepare_asides_to_store(kwargs['asides'])
return func(*args, **kwargs)
return wrapper
def prepare_asides_to_store(asides_source):
"""
Convert Asides Xblocks objects to the list of dicts (to store this information in MongoDB)
"""
asides = None
if asides_source:
asides = []
for asd in asides_source:
aside_fields = {}
for asd_field_key, asd_field_val in asd.fields.iteritems():
aside_fields[asd_field_key] = asd_field_val.read_from(asd)
asides.append({
'aside_type': asd.scope_ids.block_type,
'fields': aside_fields
})
return asides
class MixedModuleStore(ModuleStoreDraftAndPublished, ModuleStoreWriteBase):
"""
ModuleStore knows how to route requests to the right persistence ms
"""
def __init__(
self,
contentstore,
mappings,
stores,
i18n_service=None,
fs_service=None,
user_service=None,
create_modulestore_instance=None,
signal_handler=None,
**kwargs
):
"""
Initialize a MixedModuleStore. Here we look into our passed in kwargs which should be a
collection of other modulestore configuration information
"""
super(MixedModuleStore, self).__init__(contentstore, **kwargs)
if create_modulestore_instance is None:
raise ValueError('MixedModuleStore constructor must be passed a create_modulestore_instance function')
self.modulestores = []
self.mappings = {}
for course_id, store_name in mappings.iteritems():
try:
self.mappings[CourseKey.from_string(course_id)] = store_name
except InvalidKeyError:
try:
self.mappings[SlashSeparatedCourseKey.from_deprecated_string(course_id)] = store_name
except InvalidKeyError:
log.exception("Invalid MixedModuleStore configuration. Unable to parse course_id %r", course_id)
continue
for store_settings in stores:
key = store_settings['NAME']
store = create_modulestore_instance(
store_settings['ENGINE'],
self.contentstore,
store_settings.get('DOC_STORE_CONFIG', {}),
store_settings.get('OPTIONS', {}),
i18n_service=i18n_service,
fs_service=fs_service,
user_service=user_service,
signal_handler=signal_handler,
)
# replace all named pointers to the store into actual pointers
for course_key, store_name in self.mappings.iteritems():
if store_name == key:
self.mappings[course_key] = store
self.modulestores.append(store)
def _clean_locator_for_mapping(self, locator):
"""
In order for mapping to work, the locator must be minimal--no version, no branch--
as we never store one version or one branch in one ms and another in another ms.
:param locator: the CourseKey
"""
if hasattr(locator, 'version_agnostic'):
locator = locator.version_agnostic()
if hasattr(locator, 'branch'):
locator = locator.replace(branch=None)
return locator
def _get_modulestore_for_courselike(self, locator=None):
"""
For a given locator, look in the mapping table and see if it has been pinned
to a particular modulestore
If locator is None, returns the first (ordered) store as the default
"""
if locator is not None:
locator = self._clean_locator_for_mapping(locator)
mapping = self.mappings.get(locator, None)
if mapping is not None:
return mapping
else:
if isinstance(locator, LibraryLocator):
has_locator = lambda store: hasattr(store, 'has_library') and store.has_library(locator)
else:
has_locator = lambda store: store.has_course(locator)
for store in self.modulestores:
if has_locator(store):
self.mappings[locator] = store
return store
# return the default store
return self.default_modulestore
def _get_modulestore_by_type(self, modulestore_type):
"""
This method should only really be used by tests and migration scripts when necessary.
Returns the module store as requested by type. The type can be a value from ModuleStoreEnum.Type.
"""
for store in self.modulestores:
if store.get_modulestore_type() == modulestore_type:
return store
return None
def fill_in_run(self, course_key):
"""
Some course_keys are used without runs. This function calls the corresponding
fill_in_run function on the appropriate modulestore.
"""
store = self._get_modulestore_for_courselike(course_key)
if not hasattr(store, 'fill_in_run'):
return course_key
return store.fill_in_run(course_key)
def has_item(self, usage_key, **kwargs):
"""
Does the course include the xblock who's id is reference?
"""
store = self._get_modulestore_for_courselike(usage_key.course_key)
return store.has_item(usage_key, **kwargs)
@strip_key
def get_item(self, usage_key, depth=0, **kwargs):
"""
see parent doc
"""
store = self._get_modulestore_for_courselike(usage_key.course_key)
return store.get_item(usage_key, depth, **kwargs)
@strip_key
def get_items(self, course_key, **kwargs):
"""
Returns:
list of XModuleDescriptor instances for the matching items within the course with
the given course_key
NOTE: don't use this to look for courses
as the course_key is required. Use get_courses.
Args:
course_key (CourseKey): the course identifier
kwargs:
settings (dict): fields to look for which have settings scope. Follows same syntax
and rules as kwargs below
content (dict): fields to look for which have content scope. Follows same syntax and
rules as kwargs below.
qualifiers (dict): what to look for within the course.
Common qualifiers are ``category`` or any field name. if the target field is a list,
then it searches for the given value in the list not list equivalence.
Substring matching pass a regex object.
For some modulestores, ``name`` is another commonly provided key (Location based stores)
For some modulestores,
you can search by ``edited_by``, ``edited_on`` providing either a datetime for == (probably
useless) or a function accepting one arg to do inequality
"""
if not isinstance(course_key, CourseKey):
raise Exception("Must pass in a course_key when calling get_items()")
store = self._get_modulestore_for_courselike(course_key)
return store.get_items(course_key, **kwargs)
@strip_key
def get_course_summaries(self, **kwargs):
"""
Returns a list containing the course information in CourseSummary objects.
Information contains `location`, `display_name`, `locator` of the courses in this modulestore.
"""
course_summaries = {}
for store in self.modulestores:
for course_summary in store.get_course_summaries(**kwargs):
course_id = self._clean_locator_for_mapping(locator=course_summary.id)
# Check if course is indeed unique. Save it in result if unique
if course_id in course_summaries:
log.warning(
u"Modulestore %s have duplicate courses %s; skipping from result.", store, course_id
)
else:
course_summaries[course_id] = course_summary
return course_summaries.values()
@strip_key
def get_courses(self, **kwargs):
'''
Returns a list containing the top level XModuleDescriptors of the courses in this modulestore.
'''
courses = {}
for store in self.modulestores:
# filter out ones which were fetched from earlier stores but locations may not be ==
for course in store.get_courses(**kwargs):
course_id = self._clean_locator_for_mapping(course.id)
if course_id not in courses:
# course is indeed unique. save it in result
courses[course_id] = course
return courses.values()
@strip_key
def get_libraries(self, **kwargs):
"""
Returns a list containing the top level XBlock of the libraries (LibraryRoot) in this modulestore.
"""
libraries = {}
for store in self.modulestores:
if not hasattr(store, 'get_libraries'):
continue
# filter out ones which were fetched from earlier stores but locations may not be ==
for library in store.get_libraries(**kwargs):
library_id = self._clean_locator_for_mapping(library.location)
if library_id not in libraries:
# library is indeed unique. save it in result
libraries[library_id] = library
return libraries.values()
def make_course_key(self, org, course, run):
"""
Return a valid :class:`~opaque_keys.edx.keys.CourseKey` for this modulestore
that matches the supplied `org`, `course`, and `run`.
This key may represent a course that doesn't exist in this modulestore.
"""
# If there is a mapping that match this org/course/run, use that
for course_id, store in self.mappings.iteritems():
candidate_key = store.make_course_key(org, course, run)
if candidate_key == course_id:
return candidate_key
# Otherwise, return the key created by the default store
return self.default_modulestore.make_course_key(org, course, run)
def make_course_usage_key(self, course_key):
"""
Return a valid :class:`~opaque_keys.edx.keys.UsageKey` for the modulestore
that matches the supplied course_key.
"""
assert isinstance(course_key, CourseKey)
store = self._get_modulestore_for_courselike(course_key)
return store.make_course_usage_key(course_key)
@strip_key
def get_course(self, course_key, depth=0, **kwargs):
"""
returns the course module associated with the course_id. If no such course exists,
it returns None
:param course_key: must be a CourseKey
"""
assert isinstance(course_key, CourseKey)
store = self._get_modulestore_for_courselike(course_key)
try:
return store.get_course(course_key, depth=depth, **kwargs)
except ItemNotFoundError:
return None
@strip_key
@contract(library_key='LibraryLocator')
def get_library(self, library_key, depth=0, **kwargs):
"""
returns the library block associated with the given key. If no such library exists,
it returns None
:param library_key: must be a LibraryLocator
"""
try:
store = self._verify_modulestore_support(library_key, 'get_library')
return store.get_library(library_key, depth=depth, **kwargs)
except NotImplementedError:
log.exception("Modulestore configured for %s does not have get_library method", library_key)
return None
except ItemNotFoundError:
return None
@strip_key
def has_course(self, course_id, ignore_case=False, **kwargs):
"""
returns the course_id of the course if it was found, else None
Note: we return the course_id instead of a boolean here since the found course may have
a different id than the given course_id when ignore_case is True.
Args:
* course_id (CourseKey)
* ignore_case (bool): If True, do a case insensitive search. If
False, do a case sensitive search
"""
assert isinstance(course_id, CourseKey)
store = self._get_modulestore_for_courselike(course_id)
return store.has_course(course_id, ignore_case, **kwargs)
def delete_course(self, course_key, user_id):
"""
See xmodule.modulestore.__init__.ModuleStoreWrite.delete_course
"""
assert isinstance(course_key, CourseKey)
store = self._get_modulestore_for_courselike(course_key)
return store.delete_course(course_key, user_id)
@contract(asset_metadata='AssetMetadata', user_id='int|long', import_only=bool)
def save_asset_metadata(self, asset_metadata, user_id, import_only=False):
"""
Saves the asset metadata for a particular course's asset.
Args:
asset_metadata (AssetMetadata): data about the course asset data
user_id (int|long): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if info save was successful, else False
"""
store = self._get_modulestore_for_courselike(asset_metadata.asset_id.course_key)
return store.save_asset_metadata(asset_metadata, user_id, import_only)
@contract(asset_metadata_list='list(AssetMetadata)', user_id='int|long', import_only=bool)
def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only=False):
"""
Saves the asset metadata for each asset in a list of asset metadata.
Optimizes the saving of many assets.
Args:
asset_metadata_list (list(AssetMetadata)): list of data about several course assets
user_id (int|long): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if info save was successful, else False
"""
if len(asset_metadata_list) == 0:
return True
store = self._get_modulestore_for_courselike(asset_metadata_list[0].asset_id.course_key)
return store.save_asset_metadata_list(asset_metadata_list, user_id, import_only)
@strip_key
@contract(asset_key='AssetKey')
def find_asset_metadata(self, asset_key, **kwargs):
"""
Find the metadata for a particular course asset.
Args:
asset_key (AssetKey): locator containing original asset filename
Returns:
asset metadata (AssetMetadata) -or- None if not found
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.find_asset_metadata(asset_key, **kwargs)
@strip_key
@contract(course_key='CourseKey', asset_type='None | basestring', start=int, maxresults=int, sort='tuple|None')
def get_all_asset_metadata(self, course_key, asset_type, start=0, maxresults=-1, sort=None, **kwargs):
"""
Returns a list of static assets for a course.
By default all assets are returned, but start and maxresults can be provided to limit the query.
Args:
course_key (CourseKey): course identifier
asset_type (str): type of asset, such as 'asset', 'video', etc. If None, return assets of all types.
start (int): optional - start at this asset number
maxresults (int): optional - return at most this many, -1 means no limit
sort (array): optional - None means no sort
(sort_by (str), sort_order (str))
sort_by - one of 'uploadDate' or 'displayname'
sort_order - one of 'ascending' or 'descending'
Returns:
List of AssetMetadata objects.
"""
store = self._get_modulestore_for_courselike(course_key)
return store.get_all_asset_metadata(course_key, asset_type, start, maxresults, sort, **kwargs)
@contract(asset_key='AssetKey', user_id='int|long')
def delete_asset_metadata(self, asset_key, user_id):
"""
Deletes a single asset's metadata.
Arguments:
asset_id (AssetKey): locator containing original asset filename
user_id (int_long): user deleting the metadata
Returns:
Number of asset metadata entries deleted (0 or 1)
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.delete_asset_metadata(asset_key, user_id)
@contract(source_course_key='CourseKey', dest_course_key='CourseKey', user_id='int|long')
def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):
"""
Copy all the course assets from source_course_key to dest_course_key.
Arguments:
source_course_key (CourseKey): identifier of course to copy from
dest_course_key (CourseKey): identifier of course to copy to
user_id (int|long): user copying the asset metadata
"""
source_store = self._get_modulestore_for_courselike(source_course_key)
dest_store = self._get_modulestore_for_courselike(dest_course_key)
if source_store != dest_store:
with self.bulk_operations(dest_course_key):
# Get all the asset metadata in the source course.
all_assets = source_store.get_all_asset_metadata(source_course_key, 'asset')
# Store it all in the dest course.
for asset in all_assets:
new_asset_key = dest_course_key.make_asset_key('asset', asset.asset_id.path)
copied_asset = AssetMetadata(new_asset_key)
copied_asset.from_storable(asset.to_storable())
dest_store.save_asset_metadata(copied_asset, user_id)
else:
# Courses in the same modulestore can be handled by the modulestore itself.
source_store.copy_all_asset_metadata(source_course_key, dest_course_key, user_id)
@contract(asset_key='AssetKey', attr=str, user_id='int|long')
def set_asset_metadata_attr(self, asset_key, attr, value, user_id):
"""
Add/set the given attr on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr (str): which attribute to set
value: the value to set it to (any type pymongo accepts such as datetime, number, string)
user_id: (int|long): user setting the attribute
Raises:
NotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.set_asset_metadata_attrs(asset_key, {attr: value}, user_id)
@contract(asset_key='AssetKey', attr_dict=dict, user_id='int|long')
def set_asset_metadata_attrs(self, asset_key, attr_dict, user_id):
"""
Add/set the given dict of attrs on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr_dict (dict): attribute/value pairs to set
user_id: (int|long): user setting the attributes
Raises:
NotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.set_asset_metadata_attrs(asset_key, attr_dict, user_id)
@strip_key
def get_parent_location(self, location, **kwargs):
"""
returns the parent locations for a given location
"""
store = self._get_modulestore_for_courselike(location.course_key)
return store.get_parent_location(location, **kwargs)
def get_block_original_usage(self, usage_key):
"""
If a block was inherited into another structure using copy_from_template,
this will return the original block usage locator from which the
copy was inherited.
"""
try:
store = self._verify_modulestore_support(usage_key.course_key, 'get_block_original_usage')
return store.get_block_original_usage(usage_key)
except NotImplementedError:
return None, None
def get_modulestore_type(self, course_id):
"""
Returns a type which identifies which modulestore is servicing the given course_id.
The return can be one of:
"xml" (for XML based courses),
"mongo" for old-style MongoDB backed courses,
"split" for new-style split MongoDB backed courses.
"""
return self._get_modulestore_for_courselike(course_id).get_modulestore_type()
@strip_key
def get_orphans(self, course_key, **kwargs):
"""
Get all of the xblocks in the given course which have no parents and are not of types which are
usually orphaned. NOTE: may include xblocks which still have references via xblocks which don't
use children to point to their dependents.
"""
store = self._get_modulestore_for_courselike(course_key)
return store.get_orphans(course_key, **kwargs)
def get_errored_courses(self):
"""
Return a dictionary of course_dir -> [(msg, exception_str)], for each
course_dir where course loading failed.
"""
errs = {}
for store in self.modulestores:
errs.update(store.get_errored_courses())
return errs
@strip_key
def create_course(self, org, course, run, user_id, **kwargs):
"""
Creates and returns the course.
Args:
org (str): the organization that owns the course
course (str): the name of the course
run (str): the name of the run
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
Returns: a CourseDescriptor
"""
# first make sure an existing course doesn't already exist in the mapping
course_key = self.make_course_key(org, course, run)
if course_key in self.mappings and self.mappings[course_key].has_course(course_key):
raise DuplicateCourseError(course_key, course_key)
# create the course
store = self._verify_modulestore_support(None, 'create_course')
course = store.create_course(org, course, run, user_id, **kwargs)
# add new course to the mapping
self.mappings[course_key] = store
return course
@strip_key
def create_library(self, org, library, user_id, fields, **kwargs):
"""
Creates and returns a new library.
Args:
org (str): the organization that owns the course
library (str): the code/number/name of the library
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization - e.g. display_name
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
Returns: a LibraryRoot
"""
# first make sure an existing course/lib doesn't already exist in the mapping
lib_key = LibraryLocator(org=org, library=library)
if lib_key in self.mappings:
raise DuplicateCourseError(lib_key, lib_key)
# create the library
store = self._verify_modulestore_support(None, 'create_library')
library = store.create_library(org, library, user_id, fields, **kwargs)
# add new library to the mapping
self.mappings[lib_key] = store
return library
@strip_key
def clone_course(self, source_course_id, dest_course_id, user_id, fields=None, **kwargs):
"""
See the superclass for the general documentation.
If cloning w/in a store, delegates to that store's clone_course which, in order to be self-
sufficient, should handle the asset copying (call the same method as this one does)
If cloning between stores,
* copy the assets
* migrate the courseware
"""
source_modulestore = self._get_modulestore_for_courselike(source_course_id)
# for a temporary period of time, we may want to hardcode dest_modulestore as split if there's a split
# to have only course re-runs go to split. This code, however, uses the config'd priority
dest_modulestore = self._get_modulestore_for_courselike(dest_course_id)
if source_modulestore == dest_modulestore:
return source_modulestore.clone_course(source_course_id, dest_course_id, user_id, fields, **kwargs)
if dest_modulestore.get_modulestore_type() == ModuleStoreEnum.Type.split:
split_migrator = SplitMigrator(dest_modulestore, source_modulestore)
split_migrator.migrate_mongo_course(source_course_id, user_id, dest_course_id.org,
dest_course_id.course, dest_course_id.run, fields, **kwargs)
# the super handles assets and any other necessities
super(MixedModuleStore, self).clone_course(source_course_id, dest_course_id, user_id, fields, **kwargs)
else:
raise NotImplementedError("No code for cloning from {} to {}".format(
source_modulestore, dest_modulestore
))
@strip_key
@prepare_asides
def create_item(self, user_id, course_key, block_type, block_id=None, fields=None, **kwargs):
"""
Creates and saves a new item in a course.
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
course_key: A :class:`~opaque_keys.edx.CourseKey` identifying which course to create
this item in
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
"""
modulestore = self._verify_modulestore_support(course_key, 'create_item')
return modulestore.create_item(user_id, course_key, block_type, block_id=block_id, fields=fields, **kwargs)
@strip_key
@prepare_asides
def create_child(self, user_id, parent_usage_key, block_type, block_id=None, fields=None, **kwargs):
"""
Creates and saves a new xblock that is a child of the specified block
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
parent_usage_key: a :class:`~opaque_key.edx.UsageKey` identifying the
block that this item should be parented under
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
"""
modulestore = self._verify_modulestore_support(parent_usage_key.course_key, 'create_child')
return modulestore.create_child(user_id, parent_usage_key, block_type, block_id=block_id, fields=fields, **kwargs)
@strip_key
@prepare_asides
def import_xblock(self, user_id, course_key, block_type, block_id, fields=None, runtime=None, **kwargs):
"""
See :py:meth `ModuleStoreDraftAndPublished.import_xblock`
Defer to the course's modulestore if it supports this method
"""
store = self._verify_modulestore_support(course_key, 'import_xblock')
return store.import_xblock(user_id, course_key, block_type, block_id, fields, runtime, **kwargs)
@strip_key
def copy_from_template(self, source_keys, dest_key, user_id, **kwargs):
"""
See :py:meth `SplitMongoModuleStore.copy_from_template`
"""
store = self._verify_modulestore_support(dest_key.course_key, 'copy_from_template')
return store.copy_from_template(source_keys, dest_key, user_id)
@strip_key
@prepare_asides
def update_item(self, xblock, user_id, allow_not_found=False, **kwargs):
"""
Update the xblock persisted to be the same as the given for all types of fields
(content, children, and metadata) attribute the change to the given user.
"""
store = self._verify_modulestore_support(xblock.location.course_key, 'update_item')
return store.update_item(xblock, user_id, allow_not_found, **kwargs)
@strip_key
def delete_item(self, location, user_id, **kwargs):
"""
Delete the given item from persistence. kwargs allow modulestore specific parameters.
"""
store = self._verify_modulestore_support(location.course_key, 'delete_item')
return store.delete_item(location, user_id=user_id, **kwargs)
def revert_to_published(self, location, user_id):
"""
Reverts an item to its last published version (recursively traversing all of its descendants).
If no published version exists, an InvalidVersionError is thrown.
If a published version exists but there is no draft version of this item or any of its descendants, this
method is a no-op.
:raises InvalidVersionError: if no published version exists for the location specified
"""
store = self._verify_modulestore_support(location.course_key, 'revert_to_published')
return store.revert_to_published(location, user_id)
def close_all_connections(self):
"""
Close all db connections
"""
for modulestore in self.modulestores:
modulestore.close_connections()
def _drop_database(self, database=True, collections=True, connections=True):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
If database is True, then this should drop the entire database.
Otherwise, if collections is True, then this should drop all of the collections used
by this modulestore.
Otherwise, the modulestore should remove all data from the collections.
If connections is True, then close the connection to the database as well.
"""
for modulestore in self.modulestores:
# drop database if the store supports it (read-only stores do not)
if hasattr(modulestore, '_drop_database'):
modulestore._drop_database(database, collections, connections) # pylint: disable=protected-access
@strip_key
def create_xblock(self, runtime, course_key, block_type, block_id=None, fields=None, **kwargs):
"""
Create the new xmodule but don't save it. Returns the new module.
Args:
runtime: :py:class `xblock.runtime` from another xblock in the same course. Providing this
significantly speeds up processing (inheritance and subsequent persistence)
course_key: :py:class `opaque_keys.CourseKey`
block_type: :py:class `string`: the string identifying the xblock type
block_id: the string uniquely identifying the block within the given course
fields: :py:class `dict` field_name, value pairs for initializing the xblock fields. Values
should be the pythonic types not the json serialized ones.
"""
store = self._verify_modulestore_support(course_key, 'create_xblock')
return store.create_xblock(runtime, course_key, block_type, block_id, fields or {}, **kwargs)
@strip_key
def get_courses_for_wiki(self, wiki_slug, **kwargs):
"""
Return the list of courses which use this wiki_slug
:param wiki_slug: the course wiki root slug
:return: list of course keys
"""
courses = []
for modulestore in self.modulestores:
courses.extend(modulestore.get_courses_for_wiki(wiki_slug, **kwargs))
return courses
def heartbeat(self):
"""
Delegate to each modulestore and package the results for the caller.
"""
# could be done in parallel threads if needed
return dict(
itertools.chain.from_iterable(
store.heartbeat().iteritems()
for store in self.modulestores
)
)
def has_published_version(self, xblock):
"""
Returns whether this xblock is draft, public, or private.
Returns:
PublishState.draft - content is in the process of being edited, but still has a previous
version deployed to LMS
PublishState.public - content is locked and deployed to LMS
PublishState.private - content is editable and not deployed to LMS
"""
course_id = xblock.scope_ids.usage_id.course_key
store = self._get_modulestore_for_courselike(course_id)
return store.has_published_version(xblock)
@strip_key
def publish(self, location, user_id, **kwargs):
"""
Save a current draft to the underlying modulestore
Returns the newly published item.
"""
store = self._verify_modulestore_support(location.course_key, 'publish')
return store.publish(location, user_id, **kwargs)
@strip_key
def unpublish(self, location, user_id, **kwargs):
"""
Save a current draft to the underlying modulestore
Returns the newly unpublished item.
"""
store = self._verify_modulestore_support(location.course_key, 'unpublish')
return store.unpublish(location, user_id, **kwargs)
def convert_to_draft(self, location, user_id):
"""
Create a copy of the source and mark its revision as draft.
Note: This method is to support the Mongo Modulestore and may be deprecated.
:param location: the location of the source (its revision must be None)
"""
store = self._verify_modulestore_support(location.course_key, 'convert_to_draft')
return store.convert_to_draft(location, user_id)
def has_changes(self, xblock):
"""
Checks if the given block has unpublished changes
:param xblock: the block to check
:return: True if the draft and published versions differ
"""
store = self._verify_modulestore_support(xblock.location.course_key, 'has_changes')
return store.has_changes(xblock)
def check_supports(self, course_key, method):
"""
Verifies that the modulestore for a particular course supports a feature.
Returns True/false based on this.
"""
try:
self._verify_modulestore_support(course_key, method)
return True
except NotImplementedError:
return False
def _verify_modulestore_support(self, course_key, method):
"""
Finds and returns the store that contains the course for the given location, and verifying
that the store supports the given method.
Raises NotImplementedError if the found store does not support the given method.
"""
store = self._get_modulestore_for_courselike(course_key)
if hasattr(store, method):
return store
else:
raise NotImplementedError(u"Cannot call {} on store {}".format(method, store))
@property
def default_modulestore(self):
"""
Return the default modulestore
"""
thread_local_default_store = getattr(self.thread_cache, 'default_store', None)
if thread_local_default_store:
# return the thread-local cache, if found
return thread_local_default_store
else:
# else return the default store
return self.modulestores[0]
@contextmanager
def default_store(self, store_type):
"""
A context manager for temporarily changing the default store in the Mixed modulestore to the given store type
"""
# find the store corresponding to the given type
store = next((store for store in self.modulestores if store.get_modulestore_type() == store_type), None)
if not store:
raise Exception(u"Cannot find store of type {}".format(store_type))
prev_thread_local_store = getattr(self.thread_cache, 'default_store', None)
try:
self.thread_cache.default_store = store
yield
finally:
self.thread_cache.default_store = prev_thread_local_store
@contextmanager
def branch_setting(self, branch_setting, course_id=None):
"""
A context manager for temporarily setting the branch value for the given course' store
to the given branch_setting. If course_id is None, the default store is used.
"""
store = self._verify_modulestore_support(course_id, 'branch_setting')
with store.branch_setting(branch_setting, course_id):
yield
@contextmanager
def bulk_operations(self, course_id, emit_signals=True):
"""
A context manager for notifying the store of bulk operations.
If course_id is None, the default store is used.
"""
store = self._get_modulestore_for_courselike(course_id)
with store.bulk_operations(course_id, emit_signals):
yield
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
for store in self.modulestores:
store.ensure_indexes()
| agpl-3.0 | 7,529,883,029,666,540,000 | 41.797015 | 122 | 0.629095 | false |
cezary12/blaze | blaze/catalog/array_provider.py | 13 | 5440 | from __future__ import absolute_import, division, print_function
import os
from os import path
import glob
import shutil
import tempfile
from dynd import nd, ndt
from .. import array
def load_json_file_array(root, array_name):
# Load the datashape
dsfile = root + '.datashape'
if not path.isfile(dsfile):
dsfile = path.dirname(root) + '.datashape'
if not path.isfile(dsfile):
raise Exception('No datashape file found for array %s'
% array_name)
with open(dsfile) as f:
dt = ndt.type(f.read())
# Load the JSON
# TODO: Add stream support to parse_json for compressed JSON, etc.
arr = nd.parse_json(dt, nd.memmap(root + '.json'))
return array(arr)
def load_json_directory_array(root, array_name):
# Load the datashape
dsfile = root + '.datashape'
if not path.isfile(dsfile):
raise Exception('No datashape file found for array %s' % array_name)
with open(dsfile) as f:
dt = ndt.type(f.read())
# Scan for JSON files, assuming they're just #.json
# Sort them numerically
files = sorted([(int(path.splitext(path.basename(x))[0]), x)
for x in glob.glob(path.join(root, '*.json'))])
files = [x[1] for x in files]
# Make an array with an extra fixed dimension, then
# read a JSON file into each element of that array
dt = ndt.make_fixed_dim(len(files), dt)
arr = nd.empty(dt)
for i, fname in enumerate(files):
nd.parse_json(arr[i], nd.memmap(fname))
arr.flag_as_immutable()
return array(arr)
def load_json_file_list_array(root, array_name):
# Load the datashape
dsfile = root + '.datashape'
if not path.isfile(dsfile):
raise Exception('No datashape file found for array %s' % array_name)
with open(dsfile) as f:
dt = ndt.type(f.read())
# Scan for JSON files -- no assumption on file suffix
#open list of files and load into python list
files = root + '.files'
with open(files) as f:
l_files = [fs.strip() for fs in f]
# Make an array with an extra fixed dimension, then
# read a JSON file into each element of that array
dt = ndt.make_fixed_dim(len(l_files), dt)
arr = nd.empty(dt)
for i, fname in enumerate(l_files):
with open(fname) as f:
nd.parse_json(arr[i], f.read())
arr.flag_as_immutable()
return array(arr)
class json_array_provider:
def __init__(self, root_dir):
if not path.isdir(root_dir):
raise ValueError('%s is not a valid directory' % root_dir)
self.root_dir = root_dir
self.array_cache = {}
self.session_dirs = {}
def __call__(self, array_name):
# First check that the .json file at the requested address exists
root = path.join(self.root_dir, array_name[1:])
if (not path.isfile(root + '.json') and
not path.isfile(root + '.deferred.json') and
not path.isfile(root + '.files') and
not path.isdir(root)):
return None
# If we've already read this array into cache, just return it
print('Cache has keys %s' % self.array_cache.keys())
print('Checking cache for %s' % array_name)
if array_name in self.array_cache:
print('Returning cached array %s' % array_name)
return self.array_cache[array_name]
if path.isfile(root + '.json'):
print('Loading array %s from file %s'
% (array_name, root + '.json'))
arr = load_json_file_array(root, array_name)
elif path.isfile(root + '.deferred.json'):
print('Loading deferred array %s from file %s'
% (array_name, root + '.deferred.json'))
with open(root + '.deferred.json') as f:
print(f.read())
raise RuntimeError('TODO: Deferred loading not implemented!')
elif path.isfile(root + '.files'):
print('Loading files from file list: %s' % (root + '.files'))
arr = load_json_file_list_array(root, array_name)
else:
print('Loading array %s from directory %s' % (array_name, root))
arr = load_json_directory_array(root, array_name)
self.array_cache[array_name] = arr
return arr
def create_session_dir(self):
d = tempfile.mkdtemp(prefix='.session_', dir=self.root_dir)
session_name = '/' + os.path.basename(d)
if type(session_name) is unicode:
session_name = session_name.encode('utf-8')
self.session_dirs[session_name] = d
return session_name, d
def delete_session_dir(self, session_name):
shutil.rmtree(self.session_dirs[session_name])
del self.session_dirs[session_name]
def create_deferred_array_filename(self, session_name,
prefix, cache_array):
d = tempfile.mkstemp(suffix='.deferred.json', prefix=prefix,
dir=self.session_dirs[session_name], text=True)
array_name = os.path.basename(d[1])
array_name = session_name + '/' + array_name[:array_name.find('.')]
if type(array_name) is unicode:
array_name = array_name.encode('utf-8')
if cache_array is not None:
self.array_cache[array_name] = cache_array
return (os.fdopen(d[0], "w"), array_name, d[1])
| bsd-3-clause | 7,926,381,133,068,320,000 | 35.756757 | 76 | 0.592647 | false |
rpgplanet/django-avatar | avatar/settings.py | 37 | 1294 | from django.conf import settings
try:
from PIL import Image
dir(Image) # Placate PyFlakes
except ImportError:
import Image
AVATAR_DEFAULT_SIZE = getattr(settings, 'AVATAR_DEFAULT_SIZE', 80)
AUTO_GENERATE_AVATAR_SIZES = getattr(settings, 'AUTO_GENERATE_AVATAR_SIZES', (AVATAR_DEFAULT_SIZE,))
AVATAR_RESIZE_METHOD = getattr(settings, 'AVATAR_RESIZE_METHOD', Image.ANTIALIAS)
AVATAR_STORAGE_DIR = getattr(settings, 'AVATAR_STORAGE_DIR', 'avatars')
AVATAR_GRAVATAR_BACKUP = getattr(settings, 'AVATAR_GRAVATAR_BACKUP', True)
AVATAR_GRAVATAR_DEFAULT = getattr(settings, 'AVATAR_GRAVATAR_DEFAULT', None)
AVATAR_DEFAULT_URL = getattr(settings, 'AVATAR_DEFAULT_URL', 'avatar/img/default.jpg')
AVATAR_MAX_AVATARS_PER_USER = getattr(settings, 'AVATAR_MAX_AVATARS_PER_USER', 42)
AVATAR_MAX_SIZE = getattr(settings, 'AVATAR_MAX_SIZE', 1024 * 1024)
AVATAR_THUMB_FORMAT = getattr(settings, 'AVATAR_THUMB_FORMAT', "JPEG")
AVATAR_THUMB_QUALITY = getattr(settings, 'AVATAR_THUMB_QUALITY', 85)
AVATAR_HASH_FILENAMES = getattr(settings, 'AVATAR_HASH_FILENAMES', False)
AVATAR_HASH_USERDIRNAMES = getattr(settings, 'AVATAR_HASH_USERDIRNAMES', False)
AVATAR_ALLOWED_FILE_EXTS = getattr(settings, 'AVATAR_ALLOWED_FILE_EXTS', None)
AVATAR_CACHE_TIMEOUT = getattr(settings, 'AVATAR_CACHE_TIMEOUT', 60*60)
| bsd-3-clause | -508,282,217,725,415,000 | 55.26087 | 100 | 0.76507 | false |
evaschalde/odoo | odoo.py | 257 | 5618 | #!/usr/bin/env python
#----------------------------------------------------------
# odoo cli
#
# To install your odoo development environement type:
#
# wget -O- https://raw.githubusercontent.com/odoo/odoo/8.0/odoo.py | python
#
# The setup_* subcommands used to boostrap odoo are defined here inline and may
# only depends on the python 2.7 stdlib
#
# The rest of subcommands are defined in odoo/cli or in <module>/cli by
# subclassing the Command object
#
#----------------------------------------------------------
import os
import re
import sys
import subprocess
GIT_HOOKS_PRE_PUSH = """
#!/usr/bin/env python2
import re
import sys
if re.search('github.com[:/]odoo/odoo.git$', sys.argv[2]):
print "Pushing to /odoo/odoo.git is forbidden, please push to odoo-dev, use --no-verify to override"
sys.exit(1)
"""
def printf(f,*l):
print "odoo:" + f % l
def run(*l):
if isinstance(l[0], list):
l = l[0]
printf("running %s", " ".join(l))
subprocess.check_call(l)
def git_locate():
# Locate git dir
# TODO add support for os.environ.get('GIT_DIR')
# check for an odoo child
if os.path.isfile('odoo/.git/config'):
os.chdir('odoo')
path = os.getcwd()
while path != os.path.abspath(os.sep):
gitconfig_path = os.path.join(path, '.git/config')
if os.path.isfile(gitconfig_path):
release_py = os.path.join(path, 'openerp/release.py')
if os.path.isfile(release_py):
break
path = os.path.dirname(path)
if path == os.path.abspath(os.sep):
path = None
return path
def cmd_setup_git():
git_dir = git_locate()
if git_dir:
printf('git repo found at %s',git_dir)
else:
run("git", "init", "odoo")
os.chdir('odoo')
git_dir = os.getcwd()
if git_dir:
# push sane config for git < 2.0, and hooks
#run('git','config','push.default','simple')
# alias
run('git','config','alias.st','status')
# merge bzr style
run('git','config','merge.commit','no')
# pull let me choose between merge or rebase only works in git > 2.0, use an alias for 1
run('git','config','pull.ff','only')
run('git','config','alias.pl','pull --ff-only')
pre_push_path = os.path.join(git_dir, '.git/hooks/pre-push')
open(pre_push_path,'w').write(GIT_HOOKS_PRE_PUSH.strip())
os.chmod(pre_push_path, 0755)
# setup odoo remote
run('git','config','remote.odoo.url','https://github.com/odoo/odoo.git')
run('git','config','remote.odoo.pushurl','[email protected]:odoo/odoo.git')
run('git','config','--add','remote.odoo.fetch','dummy')
run('git','config','--unset-all','remote.odoo.fetch')
run('git','config','--add','remote.odoo.fetch','+refs/heads/*:refs/remotes/odoo/*')
# setup odoo-dev remote
run('git','config','remote.odoo-dev.url','https://github.com/odoo-dev/odoo.git')
run('git','config','remote.odoo-dev.pushurl','[email protected]:odoo-dev/odoo.git')
run('git','remote','update')
# setup 8.0 branch
run('git','config','branch.8.0.remote','odoo')
run('git','config','branch.8.0.merge','refs/heads/8.0')
run('git','checkout','8.0')
else:
printf('no git repo found')
def cmd_setup_git_dev():
git_dir = git_locate()
if git_dir:
# setup odoo-dev remote
run('git','config','--add','remote.odoo-dev.fetch','dummy')
run('git','config','--unset-all','remote.odoo-dev.fetch')
run('git','config','--add','remote.odoo-dev.fetch','+refs/heads/*:refs/remotes/odoo-dev/*')
run('git','config','--add','remote.odoo-dev.fetch','+refs/pull/*:refs/remotes/odoo-dev/pull/*')
run('git','remote','update')
def cmd_setup_git_review():
git_dir = git_locate()
if git_dir:
# setup odoo-dev remote
run('git','config','--add','remote.odoo.fetch','dummy')
run('git','config','--unset-all','remote.odoo.fetch')
run('git','config','--add','remote.odoo.fetch','+refs/heads/*:refs/remotes/odoo/*')
run('git','config','--add','remote.odoo.fetch','+refs/tags/*:refs/remotes/odoo/tags/*')
run('git','config','--add','remote.odoo.fetch','+refs/pull/*:refs/remotes/odoo/pull/*')
def setup_deps_debian(git_dir):
debian_control_path = os.path.join(git_dir, 'debian/control')
debian_control = open(debian_control_path).read()
debs = re.findall('python-[0-9a-z]+',debian_control)
debs += ["postgresql"]
proc = subprocess.Popen(['sudo','apt-get','install'] + debs, stdin=open('/dev/tty'))
proc.communicate()
def cmd_setup_deps():
git_dir = git_locate()
if git_dir:
if os.path.isfile('/etc/debian_version'):
setup_deps_debian(git_dir)
def setup_pg_debian(git_dir):
cmd = ['sudo','su','-','postgres','-c','createuser -s %s' % os.environ['USER']]
subprocess.call(cmd)
def cmd_setup_pg():
git_dir = git_locate()
if git_dir:
if os.path.isfile('/etc/debian_version'):
setup_pg_debian(git_dir)
def cmd_setup():
cmd_setup_git()
cmd_setup_deps()
cmd_setup_pg()
def main():
# regsitry of commands
g = globals()
cmds = dict([(i[4:],g[i]) for i in g if i.startswith('cmd_')])
# if curl URL | python2 then use command setup
if len(sys.argv) == 1 and __file__ == '<stdin>':
cmd_setup()
elif len(sys.argv) == 2 and sys.argv[1] in cmds:
cmds[sys.argv[1]]()
else:
import openerp
openerp.cli.main()
if __name__ == "__main__":
main()
| agpl-3.0 | -5,820,298,900,065,875,000 | 33.89441 | 104 | 0.577252 | false |
internetarchive/warctools | hanzo/warctools/warc.py | 1 | 11905 | """An object to represent warc records, using the abstract record in
record.py"""
import re
import hashlib
from hanzo.warctools.record import ArchiveRecord, ArchiveParser
from hanzo.warctools.archive_detect import register_record_type
import uuid
bad_lines = 5 # when to give up looking for the version stamp
@ArchiveRecord.HEADERS(
DATE=b'WARC-Date',
TYPE=b'WARC-Type',
ID=b'WARC-Record-ID',
CONCURRENT_TO=b'WARC-Concurrent-To',
REFERS_TO=b'WARC-Refers-To',
REFERS_TO_TARGET_URI=b'WARC-Refers-To-Target-URI',
REFERS_TO_DATE=b'WARC-Refers-To-Date',
CONTENT_LENGTH=b'Content-Length',
CONTENT_TYPE=b'Content-Type',
URL=b'WARC-Target-URI',
BLOCK_DIGEST=b'WARC-Block-Digest',
PAYLOAD_DIGEST=b'WARC-Payload-Digest',
IP_ADDRESS=b'WARC-IP-Address',
FILENAME=b'WARC-Filename',
WARCINFO_ID=b'WARC-Warcinfo-ID',
PROFILE=b'WARC-Profile'
)
class WarcRecord(ArchiveRecord):
# Pylint is very bad at decorators, E1101 is the message that says
# a member variable does not exist
# pylint: disable-msg=E1101
VERSION = b"WARC/1.0"
VERSION18 = b"WARC/0.18"
VERSION17 = b"WARC/0.17"
RESPONSE = b"response"
RESOURCE = b"resource"
REQUEST = b"request"
REVISIT = b"revisit"
METADATA = b"metadata"
CONVERSION = b"conversion"
WARCINFO = b"warcinfo"
PROFILE_IDENTICAL_PAYLOAD_DIGEST = b"http://netpreserve.org/warc/1.0/revisit/identical-payload-digest"
TRAILER = b'\r\n\r\n'
def __init__(self, version=VERSION, headers=None, content=None,
errors=None, content_file=None):
"""
WarcRecord constructor.
Either content or content_file must be provided, but not both. If
content, which is a tuple (content_type, content_buffer), is provided,
when writing the warc record, any Content-Type and Content-Length that
appear in the supplied headers are ignored, and the values content[0]
and len(content[1]), respectively, are used.
When reading, the caller can stream content_file or use content, which is
lazily filled using content_file, and after which content_file is
unavailable.
"""
ArchiveRecord.__init__(self, headers, content, errors)
self.version = version
self.content_file = content_file
@property
def id(self):
return self.get_header(self.ID)
def _write_to(self, out, nl):
"""WARC Format:
VERSION NL
(Key: Value NL)*
NL
CONTENT NL
NL
don't write multi line headers
"""
out.write(self.version)
out.write(nl)
for k, v in self.headers:
if self.content_file is not None or k not in (self.CONTENT_TYPE, self.CONTENT_LENGTH):
out.write(k)
out.write(b": ")
out.write(v)
out.write(nl)
if self.content_file is not None:
out.write(nl) # end of header blank nl
while True:
buf = self.content_file.read(8192)
if buf == b'': break
out.write(buf)
else:
# if content tuple is provided, set Content-Type and
# Content-Length based on the values in the tuple
content_type, content_buffer = self.content
if content_type:
out.write(self.CONTENT_TYPE)
out.write(b": ")
out.write(content_type)
out.write(nl)
if content_buffer is None:
content_buffer = b""
content_length = len(content_buffer)
out.write(self.CONTENT_LENGTH)
out.write(b": ")
out.write(str(content_length).encode('ascii'))
out.write(nl)
out.write(nl) # end of header blank nl
if content_buffer:
out.write(content_buffer)
# end of record nl nl
out.write(nl)
out.write(nl)
out.flush()
def repair(self):
pass
def validate(self):
return self.errors
@classmethod
def make_parser(self):
return WarcParser()
def block_digest(self, content_buffer):
block_hash = hashlib.sha256()
block_hash.update(content_buffer)
digest = "sha256:%s" % block_hash.hexdigest()
return digest
@staticmethod
def warc_uuid(text):
return "<urn:uuid:{}>".format(uuid.UUID(hashlib.sha1(text).hexdigest()[0:32])).encode('ascii')
@staticmethod
def random_warc_uuid():
return "<urn:uuid:{}>".format(uuid.uuid4()).encode('ascii')
def rx(pat):
"""Helper to compile regexps with IGNORECASE option set."""
return re.compile(pat, flags=re.IGNORECASE)
version_rx = rx(br'^(?P<prefix>.*?)(?P<version>\s*WARC/(?P<number>.*?))'
b'(?P<nl>\r\n|\r|\n)\\Z')
# a header is key: <ws> value plus any following lines with leading whitespace
header_rx = rx(br'^(?P<name>.*?):\s?(?P<value>.*?)' b'(?P<nl>\r\n|\r|\n)\\Z')
value_rx = rx(br'^\s+(?P<value>.+?)' b'(?P<nl>\r\n|\r|\n)\\Z')
nl_rx = rx(b'^(?P<nl>\r\n|\r|\n\\Z)')
length_rx = rx(b'^' + WarcRecord.CONTENT_LENGTH + b'$' ) # pylint: disable-msg=E1101
type_rx = rx(b'^' + WarcRecord.CONTENT_TYPE + b'$') # pylint: disable-msg=E1101
required_headers = set((
WarcRecord.TYPE.lower(), # pylint: disable-msg=E1101
WarcRecord.ID.lower(), # pylint: disable-msg=E1101
WarcRecord.CONTENT_LENGTH.lower(), # pylint: disable-msg=E1101
WarcRecord.DATE.lower(), # pylint: disable-msg=E1101
))
class WarcParser(ArchiveParser):
KNOWN_VERSIONS = set((b'1.0', b'0.17', b'0.18'))
def parse(self, stream, offset, line=None):
"""Reads a warc record from the stream, returns a tuple
(record, errors). Either records is null or errors is
null. Any record-specific errors are contained in the record -
errors is only used when *nothing* could be parsed"""
# pylint: disable-msg=E1101
errors = []
version = None
# find WARC/.*
if line is None:
line = stream.readline()
while line:
match = version_rx.match(line)
if match:
version = match.group('version')
if offset is not None:
offset += len(match.group('prefix'))
break
else:
if offset is not None:
offset += len(line)
if not nl_rx.match(line):
errors.append(('ignored line', line))
if len(errors) > bad_lines:
errors.append(('too many errors, giving up hope',))
return (None, errors, offset)
line = stream.readline()
if not line:
if version:
errors.append(('warc version but no headers', version))
return (None, errors, offset)
if line:
content_length = 0
content_type = None
record = WarcRecord(errors=errors, version=version)
if match.group('nl') != b'\x0d\x0a':
record.error('incorrect newline in version', match.group('nl'))
if match.group('number') not in self.KNOWN_VERSIONS:
record.error('version field is not known (%s)'
% (",".join(self.KNOWN_VERSIONS)),
match.group('number'))
prefix = match.group('prefix')
if prefix:
record.error('bad prefix on WARC version header', prefix)
#Read headers
line = stream.readline()
while line and not nl_rx.match(line):
#print 'header', repr(line)
match = header_rx.match(line)
if match:
if match.group('nl') != b'\x0d\x0a':
record.error('incorrect newline in header',
match.group('nl'))
name = match.group('name').strip()
value = [match.group('value').strip()]
#print 'match',name, value
line = stream.readline()
match = value_rx.match(line)
while match:
#print 'follow', repr(line)
if match.group('nl') != b'\x0d\x0a':
record.error('incorrect newline in follow header',
line, match.group('nl'))
value.append(match.group('value').strip())
line = stream.readline()
match = value_rx.match(line)
value = b" ".join(value)
record.headers.append((name, value))
if type_rx.match(name):
if value:
content_type = value
else:
record.error('invalid header', name, value)
elif length_rx.match(name):
try:
#print name, value
content_length = int(value)
#print content_length
except ValueError:
record.error('invalid header', name, value)
# have read blank line following headers
record.content_file = stream
record.content_file.bytes_to_eoc = content_length
# check mandatory headers
# WARC-Type WARC-Date WARC-Record-ID Content-Length
return (record, (), offset)
blank_rx = rx(br'^$')
register_record_type(version_rx, WarcRecord)
register_record_type(blank_rx, WarcRecord)
def make_response(id, date, url, content, request_id):
# pylint: disable-msg=E1101
headers = [
(WarcRecord.TYPE, WarcRecord.RESPONSE),
(WarcRecord.ID, id),
(WarcRecord.DATE, date),
(WarcRecord.URL, url),
]
if request_id:
headers.append((WarcRecord.CONCURRENT_TO, request_id))
record = WarcRecord(headers=headers, content=content)
return record
def make_request(request_id, date, url, content, response_id):
# pylint: disable-msg=E1101
headers = [
(WarcRecord.TYPE, WarcRecord.REQUEST),
(WarcRecord.ID, request_id),
(WarcRecord.DATE, date),
(WarcRecord.URL, url),
]
if response_id:
headers.append((WarcRecord.CONCURRENT_TO, response_id))
record = WarcRecord(headers=headers, content=content)
return record
def make_metadata(meta_id, date, content, concurrent_to=None, url=None):
# pylint: disable-msg=E1101
headers = [
(WarcRecord.TYPE, WarcRecord.METADATA),
(WarcRecord.ID, meta_id),
(WarcRecord.DATE, date),
]
if concurrent_to:
headers.append((WarcRecord.CONCURRENT_TO, concurrent_to))
if url:
headers.append((WarcRecord.URL, url))
record = WarcRecord(headers=headers, content=content)
return record
def make_conversion(conv_id, date, content, refers_to=None, url=None):
# pylint: disable-msg=E1101
headers = [
(WarcRecord.TYPE, WarcRecord.CONVERSION),
(WarcRecord.ID, conv_id),
(WarcRecord.DATE, date),
]
if refers_to:
headers.append((WarcRecord.REFERS_TO, refers_to))
if url:
headers.append((WarcRecord.URL, url))
record = WarcRecord(headers=headers, content=content)
return record
def warc_datetime_str(d):
s = d.isoformat()
if '.' in s:
s = s[:s.find('.')]
return (s + 'Z').encode('utf-8')
| mit | 4,394,548,616,748,940,300 | 31.616438 | 106 | 0.549517 | false |
an7oine/WinVHS | Cygwin/lib/python2.7/encodings/euc_jis_2004.py | 816 | 1051 | #
# euc_jis_2004.py: Python Unicode Codec for EUC_JIS_2004
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('euc_jis_2004')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='euc_jis_2004',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| gpl-3.0 | -4,974,546,387,813,277,000 | 25.948718 | 74 | 0.699334 | false |
jgmanzanas/CMNT_004_15 | project-addons/flask_middleware_connector/__openerp__.py | 1 | 1575 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Comunitea All Rights Reserved
# $Omar Castiñeira Saavedra <[email protected]>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Flask middleware connector",
'version': '1.0',
'category': 'Connector',
'description': """Connect to Visiotech flask middleware using Odoo connector""",
'author': 'Comunitea',
'website': 'www.comunitea.com',
"depends": ['base', 'product', 'connector', 'stock', 'custom_partner', 'crm_claim_rma', 'product_virtual_stock_conservative'],
"data": ["views/middleware_view.xml", "views/product_view.xml", 'views/res_users.xml',
"views/product_brand.xml", "views/claim_line_view.xml", "security/ir.model.access.csv"],
"installable": True
}
| agpl-3.0 | 3,590,445,221,710,274,000 | 46.69697 | 130 | 0.61817 | false |
pwong-mapr/private-hue | desktop/core/ext-py/Django-1.4.5/django/conf/locale/cs/formats.py | 86 | 1313 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. E Y'
TIME_FORMAT = 'G:i:s'
DATETIME_FORMAT = 'j. E Y G:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = u'\xa0' # non-breaking space
NUMBER_GROUPING = 3
| apache-2.0 | -6,246,536,851,668,346,000 | 34.486486 | 77 | 0.555217 | false |
MauHernandez/cyclope | cyclope/apps/contacts/urls.py | 2 | 1044 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010-2015 Código Sur Sociedad Civil.
# All rights reserved.
#
# This file is part of Cyclope.
#
# Cyclope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Cyclope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import patterns, url
from cyclope.views import ContentDeleteView
urlpatterns = patterns(
'',
url(r'^(?P<content_type>contact)/(?P<slug>[\w-]+)/delete/$', ContentDeleteView.as_view(), {'app': 'contacts'}, name='contacts-delete'),
)
| gpl-3.0 | -3,042,254,299,071,432,000 | 36.25 | 139 | 0.730585 | false |
nke001/attention-lvcsr | libs/blocks-extras/blocks/extras/bricks/__init__.py | 7 | 1928 | from theano import shared, tensor
from blocks.bricks import Feedforward
from blocks.bricks.base import application, lazy
from blocks.extras.initialization import PermutationMatrix
from blocks.extras.utils import check_valid_permutation
from blocks.utils import shared_floatx
class FixedPermutation(Feedforward):
"""Perform a fixed permutation of the input features.
Parameters
----------
order : ndarray-like
A 1-dimensional container containing a permutation
on the integers.
dot : bool, optional
Whether or not to perform the permutation by matrix
multiplication. This may be faster in some circumstances
but requires allocation of a permutation matrix.
"""
@lazy(allocation=['order'])
def __init__(self, order, dot=True, **kwargs):
self.order = order
self._dot = dot
super(FixedPermutation, self).__init__(**kwargs)
def _allocate(self):
self.order = check_valid_permutation(self.order)
if self.input_dim != len(self.order):
raise ValueError("input_dim does not match length of order "
"vector")
# No roles assigned here, since these are not learnable parameters.
if self._dot:
shape = (self.order.shape[0], self.order.shape[0])
self._matrix = shared_floatx(
PermutationMatrix(self.order).generate(None, shape))
else:
order = self.order.astype('int32')
assert order.min() == 0 # Catch highly unlikely downcast issue.
self._permutation = shared(order)
@property
def input_dim(self):
return len(self.order)
@application(inputs=['input_'], outputs=['output_'])
def apply(self, input_):
if self._dot:
return tensor.dot(input_, self._matrix)
else:
return tensor.take(input_, self._permutation, axis=1)
| mit | 5,851,748,505,730,599,000 | 35.377358 | 76 | 0.634336 | false |
jn2840/bitcoin | qa/rpc-tests/mempool_packages.py | 1 | 7342 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Beardcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test descendant package tracking code
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
class MempoolPackagesTest(BitcoinTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-relaypriority=0", "-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-maxorphantx=1000", "-relaypriority=0", "-limitancestorcount=5", "-debug"]))
connect_nodes(self.nodes[0], 1)
self.is_network_split = False
self.sync_all()
# Build a transaction that spends parent_txid:vout
# Return amount sent
def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs):
send_value = satoshi_round((value - fee)/num_outputs)
inputs = [ {'txid' : parent_txid, 'vout' : vout} ]
outputs = {}
for i in xrange(num_outputs):
outputs[node.getnewaddress()] = send_value
rawtx = node.createrawtransaction(inputs, outputs)
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx['hex'])
fulltx = node.getrawtransaction(txid, 1)
assert(len(fulltx['vout']) == num_outputs) # make sure we didn't generate a change output
return (txid, send_value)
def run_test(self):
''' Mine some blocks and have them mature. '''
self.nodes[0].generate(101)
utxo = self.nodes[0].listunspent(10)
txid = utxo[0]['txid']
vout = utxo[0]['vout']
value = utxo[0]['amount']
fee = Decimal("0.0001")
# 100 transactions off a confirmed tx should be fine
chain = []
for i in xrange(100):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, 0, value, fee, 1)
value = sent_value
chain.append(txid)
# Check mempool has 100 transactions in it, and descendant
# count and fees should look correct
mempool = self.nodes[0].getrawmempool(True)
assert_equal(len(mempool), 100)
descendant_count = 1
descendant_fees = 0
descendant_size = 0
SATOSHIS = 100000000
for x in reversed(chain):
assert_equal(mempool[x]['descendantcount'], descendant_count)
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['descendantfees'], SATOSHIS*descendant_fees)
descendant_size += mempool[x]['size']
assert_equal(mempool[x]['descendantsize'], descendant_size)
descendant_count += 1
# Adding one more transaction on to the chain should fail.
try:
self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
except JSONRPCException as e:
print "too-long-ancestor-chain successfully rejected"
# TODO: check that node1's mempool is as expected
# TODO: test ancestor size limits
# Now test descendant chain limits
txid = utxo[1]['txid']
value = utxo[1]['amount']
vout = utxo[1]['vout']
transaction_package = []
# First create one parent tx with 10 children
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 10)
parent_transaction = txid
for i in xrange(10):
transaction_package.append({'txid': txid, 'vout': i, 'amount': sent_value})
for i in xrange(1000):
utxo = transaction_package.pop(0)
try:
(txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
for j in xrange(10):
transaction_package.append({'txid': txid, 'vout': j, 'amount': sent_value})
if i == 998:
mempool = self.nodes[0].getrawmempool(True)
assert_equal(mempool[parent_transaction]['descendantcount'], 1000)
except JSONRPCException as e:
print e.error['message']
assert_equal(i, 999)
print "tx that would create too large descendant package successfully rejected"
# TODO: check that node1's mempool is as expected
# TODO: test descendant size limits
# Test reorg handling
# First, the basics:
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].invalidateblock(self.nodes[0].getbestblockhash())
self.nodes[1].reconsiderblock(self.nodes[0].getbestblockhash())
# Now test the case where node1 has a transaction T in its mempool that
# depends on transactions A and B which are in a mined block, and the
# block containing A and B is disconnected, AND B is not accepted back
# into node1's mempool because its ancestor count is too high.
# Create 8 transactions, like so:
# Tx0 -> Tx1 (vout0)
# \--> Tx2 (vout1) -> Tx3 -> Tx4 -> Tx5 -> Tx6 -> Tx7
#
# Mine them in the next block, then generate a new tx8 that spends
# Tx1 and Tx7, and add to node1's mempool, then disconnect the
# last block.
# Create tx0 with 2 outputs
utxo = self.nodes[0].listunspent()
txid = utxo[0]['txid']
value = utxo[0]['amount']
vout = utxo[0]['vout']
send_value = satoshi_round((value - fee)/2)
inputs = [ {'txid' : txid, 'vout' : vout} ]
outputs = {}
for i in xrange(2):
outputs[self.nodes[0].getnewaddress()] = send_value
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransaction(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
tx0_id = txid
value = send_value
# Create tx1
(tx1_id, tx1_value) = self.chain_transaction(self.nodes[0], tx0_id, 0, value, fee, 1)
# Create tx2-7
vout = 1
txid = tx0_id
for i in xrange(6):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
vout = 0
value = sent_value
# Mine these in a block
self.nodes[0].generate(1)
self.sync_all()
# Now generate tx8, with a big fee
inputs = [ {'txid' : tx1_id, 'vout': 0}, {'txid' : txid, 'vout': 0} ]
outputs = { self.nodes[0].getnewaddress() : send_value + value - 4*fee }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransaction(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
sync_mempools(self.nodes)
# Now try to disconnect the tip on each node...
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
sync_blocks(self.nodes)
if __name__ == '__main__':
MempoolPackagesTest().main()
| mit | 9,267,213,072,915,682 | 40.247191 | 139 | 0.602833 | false |
alex-march/micropython | esp8266/modules/flashbdev.py | 7 | 2025 | import esp
class FlashBdev:
SEC_SIZE = 4096
START_SEC = esp.flash_user_start() // SEC_SIZE
NUM_BLK = 0x6b
def __init__(self, blocks=NUM_BLK):
self.blocks = blocks
def readblocks(self, n, buf):
#print("readblocks(%s, %x(%d))" % (n, id(buf), len(buf)))
esp.flash_read((n + self.START_SEC) * self.SEC_SIZE, buf)
def writeblocks(self, n, buf):
#print("writeblocks(%s, %x(%d))" % (n, id(buf), len(buf)))
#assert len(buf) <= self.SEC_SIZE, len(buf)
esp.flash_erase(n + self.START_SEC)
esp.flash_write((n + self.START_SEC) * self.SEC_SIZE, buf)
def ioctl(self, op, arg):
#print("ioctl(%d, %r)" % (op, arg))
if op == 4: # BP_IOCTL_SEC_COUNT
return self.blocks
if op == 5: # BP_IOCTL_SEC_SIZE
return self.SEC_SIZE
def set_bl_flash_size(real_size):
if real_size == 256*1024:
code = 1
elif real_size == 512*1024:
code = 0
elif real_size == 1024*1024:
code = 2
elif real_size == 2048*1024:
code = 3
elif real_size == 4096*1024:
code = 4
else:
code = 2
buf = bytearray(4096)
esp.flash_read(0, buf)
buf[3] = (buf[3] & 0xf) | (code << 4)
esp.flash_erase(0)
esp.flash_write(0, buf)
# If bootloader size ID doesn't correspond to real Flash size,
# fix bootloader value and reboot.
size = esp.flash_id() >> 16
# Check that it looks like realistic power of 2 for flash sizes
# commonly used with esp8266
if 22 >= size >= 18:
size = 1 << size
if size != esp.flash_size():
import machine
import time
print("Bootloader Flash size appear to have been set incorrectly, trying to fix")
set_bl_flash_size(size)
machine.reset()
while 1: time.sleep(1)
size = esp.flash_size()
if size < 1024*1024:
bdev = None
else:
# 20K at the flash end is reserved for SDK params storage
bdev = FlashBdev((size - 20480) // FlashBdev.SEC_SIZE - FlashBdev.START_SEC)
| mit | -2,381,036,306,828,812,300 | 28.779412 | 89 | 0.578272 | false |
SayCV/tools-OpenOCD | tools/xsvf_tools/svf2xsvf.py | 101 | 26710 | #!/usr/bin/python3.0
# Copyright 2008, SoftPLC Corporation http://softplc.com
# Dick Hollenbeck [email protected]
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, you may find one here:
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# or you may search the http://www.gnu.org website for the version 2 license,
# or you may write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# A python program to convert an SVF file to an XSVF file. There is an
# option to include comments containing the source file line number from the origin
# SVF file before each outputted XSVF statement.
#
# We deviate from the XSVF spec in that we introduce a new command called
# XWAITSTATE which directly flows from the SVF RUNTEST command. Unfortunately
# XRUNSTATE was ill conceived and is not used here. We also add support for the
# three Lattice extensions to SVF: LCOUNT, LDELAY, and LSDR. The xsvf file
# generated from this program is suitable for use with the xsvf player in
# OpenOCD with my modifications to xsvf.c.
#
# This program is written for python 3.0, and it is not easy to change this
# back to 2.x. You may find it easier to use python 3.x even if that means
# building it.
import re
import sys
import struct
# There are both ---<Lexer>--- and ---<Parser>--- sections to this program
if len( sys.argv ) < 3:
print("usage %s <svf_filename> <xsvf_filename>" % sys.argv[0])
exit(1)
inputFilename = sys.argv[1]
outputFilename = sys.argv[2]
doCOMMENTs = True # Save XCOMMENTs in the output xsvf file
#doCOMMENTs = False # Save XCOMMENTs in the output xsvf file
# pick your file encoding
file_encoding = 'ISO-8859-1'
#file_encoding = 'utf-8'
xrepeat = 0 # argument to XREPEAT, gives retry count for masked compares
#-----< Lexer >---------------------------------------------------------------
StateBin = (RESET,IDLE,
DRSELECT,DRCAPTURE,DRSHIFT,DREXIT1,DRPAUSE,DREXIT2,DRUPDATE,
IRSELECT,IRCAPTURE,IRSHIFT,IREXIT1,IRPAUSE,IREXIT2,IRUPDATE) = range(16)
# Any integer index into this tuple will be equal to its corresponding StateBin value
StateTxt = ("RESET","IDLE",
"DRSELECT","DRCAPTURE","DRSHIFT","DREXIT1","DRPAUSE","DREXIT2","DRUPDATE",
"IRSELECT","IRCAPTURE","IRSHIFT","IREXIT1","IRPAUSE","IREXIT2","IRUPDATE")
(XCOMPLETE,XTDOMASK,XSIR,XSDR,XRUNTEST,hole0,hole1,XREPEAT,XSDRSIZE,XSDRTDO,
XSETSDRMASKS,XSDRINC,XSDRB,XSDRC,XSDRE,XSDRTDOB,XSDRTDOC,
XSDRTDOE,XSTATE,XENDIR,XENDDR,XSIR2,XCOMMENT,XWAIT,XWAITSTATE,
LCOUNT,LDELAY,LSDR,XTRST) = range(29)
#Note: LCOUNT, LDELAY, and LSDR are Lattice extensions to SVF and provide a way to loop back
# and check a completion status, essentially waiting on a part until it signals that it is done.
# For example below: loop 25 times, each time through the loop do a LDELAY (same as a true RUNTEST)
# and exit loop when LSDR compares match.
"""
LCOUNT 25;
! Step to DRPAUSE give 5 clocks and wait for 1.00e+000 SEC.
LDELAY DRPAUSE 5 TCK 1.00E-003 SEC;
! Test for the completed status. Match means pass.
! Loop back to LDELAY line if not match and loop count less than 25.
LSDR 1 TDI (0)
TDO (1);
"""
#XTRST is an opcode Xilinx seemed to have missed and it comes from the SVF TRST statement.
LineNumber = 1
def s_ident(scanner, token): return ("ident", token.upper(), LineNumber)
def s_hex(scanner, token):
global LineNumber
LineNumber = LineNumber + token.count('\n')
token = ''.join(token.split())
return ("hex", token[1:-1], LineNumber)
def s_int(scanner, token): return ("int", int(token), LineNumber)
def s_float(scanner, token): return ("float", float(token), LineNumber)
#def s_comment(scanner, token): return ("comment", token, LineNumber)
def s_semicolon(scanner, token): return ("semi", token, LineNumber)
def s_nl(scanner,token):
global LineNumber
LineNumber = LineNumber + 1
#print( 'LineNumber=', LineNumber, file=sys.stderr )
return None
#2.00E-002
scanner = re.Scanner([
(r"[a-zA-Z]\w*", s_ident),
# (r"[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?", s_float),
(r"[-+]?[0-9]+(([.][0-9eE+-]*)|([eE]+[-+]?[0-9]+))", s_float),
(r"\d+", s_int),
(r"\(([0-9a-fA-F]|\s)*\)", s_hex),
(r"(!|//).*$", None),
(r";", s_semicolon),
(r"\n",s_nl),
(r"\s*", None),
],
re.MULTILINE
)
# open the file using the given encoding
file = open( sys.argv[1], encoding=file_encoding )
# read all svf file input into string "input"
input = file.read()
file.close()
# Lexer:
# create a list of tuples containing (tokenType, tokenValue, LineNumber)
tokens = scanner.scan( input )[0]
input = None # allow gc to reclaim memory holding file
#for tokenType, tokenValue, ln in tokens: print( "line %d: %s" % (ln, tokenType), tokenValue )
#-----<parser>-----------------------------------------------------------------
tokVal = tokType = tokLn = None
tup = iter( tokens )
def nextTok():
"""
Function to read the next token from tup into tokType, tokVal, tokLn (linenumber)
which are globals.
"""
global tokType, tokVal, tokLn, tup
tokType, tokVal, tokLn = tup.__next__()
class ParseError(Exception):
"""A class to hold a parsing error message"""
def __init__(self, linenumber, token, message):
self.linenumber = linenumber
self.token = token
self.message = message
def __str__(self):
global inputFilename
return "Error in file \'%s\' at line %d near token %s\n %s" % (
inputFilename, self.linenumber, repr(self.token), self.message)
class MASKSET(object):
"""
Class MASKSET holds a set of bit vectors, all of which are related, will all
have the same length, and are associated with one of the seven shiftOps:
HIR, HDR, TIR, TDR, SIR, SDR, LSDR. One of these holds a mask, smask, tdi, tdo, and a
size.
"""
def __init__(self, name):
self.empty()
self.name = name
def empty(self):
self.mask = bytearray()
self.smask = bytearray()
self.tdi = bytearray()
self.tdo = bytearray()
self.size = 0
def syncLengths( self, sawTDI, sawTDO, sawMASK, sawSMASK, newSize ):
"""
Set all the lengths equal in the event some of the masks were
not seen as part of the last change set.
"""
if self.size == newSize:
return
if newSize == 0:
self.empty()
return
# If an SIR was given without a MASK(), then use a mask of all zeros.
# this is not consistent with the SVF spec, but it makes sense because
# it would be odd to be testing an instruction register read out of a
# tap without giving a mask for it. Also, lattice seems to agree and is
# generating SVF files that comply with this philosophy.
if self.name == 'SIR' and not sawMASK:
self.mask = bytearray( newSize )
if newSize != len(self.mask):
self.mask = bytearray( newSize )
if self.name == 'SDR': # leave mask for HIR,HDR,TIR,TDR,SIR zeros
for i in range( newSize ):
self.mask[i] = 1
if newSize != len(self.tdo):
self.tdo = bytearray( newSize )
if newSize != len(self.tdi):
self.tdi = bytearray( newSize )
if newSize != len(self.smask):
self.smask = bytearray( newSize )
self.size = newSize
#-----</MASKSET>-----
def makeBitArray( hexString, bitCount ):
"""
Converts a packed sequence of hex ascii characters into a bytearray where
each element in the array holds exactly one bit. Only "bitCount" bits are
scanned and these must be the least significant bits in the hex number. That
is, it is legal to have some unused bits in the must significant hex nibble
of the input "hexString". The string is scanned starting from the backend,
then just before returning we reverse the array. This way the append()
method can be used, which I assume is faster than an insert.
"""
global tokLn
a = bytearray()
length = bitCount
hexString = list(hexString)
hexString.reverse()
#print(hexString)
for c in hexString:
if length <= 0:
break;
c = int(c, 16)
for mask in [1,2,4,8]:
if length <= 0:
break;
length = length - 1
a.append( (c & mask) != 0 )
if length > 0:
raise ParseError( tokLn, hexString, "Insufficient hex characters for given length of %d" % bitCount )
a.reverse()
#print(a)
return a
def makeXSVFbytes( bitarray ):
"""
Make a bytearray which is contains the XSVF bits which will be written
directly to disk. The number of bytes needed is calculated from the size
of the argument bitarray.
"""
bitCount = len(bitarray)
byteCount = (bitCount+7)//8
ba = bytearray( byteCount )
firstBit = (bitCount % 8) - 1
if firstBit == -1:
firstBit = 7
bitNdx = 0
for byteNdx in range(byteCount):
mask = 1<<firstBit
byte = 0
while mask:
if bitarray[bitNdx]:
byte |= mask;
mask = mask >> 1
bitNdx = bitNdx + 1
ba[byteNdx] = byte
firstBit = 7
return ba
def writeComment( outputFile, shiftOp_linenum, shiftOp ):
"""
Write an XCOMMENT record to outputFile
"""
comment = "%s @%d\0" % (shiftOp, shiftOp_linenum) # \0 is terminating nul
ba = bytearray(1)
ba[0] = XCOMMENT
ba += comment.encode()
outputFile.write( ba )
def combineBitVectors( trailer, meat, header ):
"""
Combine the 3 bit vectors comprizing a transmission. Since the least
significant bits are sent first, the header is put onto the list last so
they are sent first from that least significant position.
"""
ret = bytearray()
ret.extend( trailer )
ret.extend( meat )
ret.extend( header )
return ret
def writeRUNTEST( outputFile, run_state, end_state, run_count, min_time, tokenTxt ):
"""
Write the output for the SVF RUNTEST command.
run_count - the number of clocks
min_time - the number of seconds
tokenTxt - either RUNTEST or LDELAY
"""
# convert from secs to usecs
min_time = int( min_time * 1000000)
# the SVF RUNTEST command does NOT map to the XSVF XRUNTEST command. Check the SVF spec, then
# read the XSVF command. They are not the same. Use an XSVF XWAITSTATE to
# implement the required behavior of the SVF RUNTEST command.
if doCOMMENTs:
writeComment( output, tokLn, tokenTxt )
if tokenTxt == 'RUNTEST':
obuf = bytearray(11)
obuf[0] = XWAITSTATE
obuf[1] = run_state
obuf[2] = end_state
struct.pack_into(">i", obuf, 3, run_count ) # big endian 4 byte int to obuf
struct.pack_into(">i", obuf, 7, min_time ) # big endian 4 byte int to obuf
outputFile.write( obuf )
else: # == 'LDELAY'
obuf = bytearray(10)
obuf[0] = LDELAY
obuf[1] = run_state
# LDELAY has no end_state
struct.pack_into(">i", obuf, 2, run_count ) # big endian 4 byte int to obuf
struct.pack_into(">i", obuf, 6, min_time ) # big endian 4 byte int to obuf
outputFile.write( obuf )
output = open( outputFilename, mode='wb' )
hir = MASKSET('HIR')
hdr = MASKSET('HDR')
tir = MASKSET('TIR')
tdr = MASKSET('TDR')
sir = MASKSET('SIR')
sdr = MASKSET('SDR')
expecting_eof = True
# one of the commands that take the shiftParts after the length, the parse
# template for all of these commands is identical
shiftOps = ('SDR', 'SIR', 'LSDR', 'HDR', 'HIR', 'TDR', 'TIR')
# the order must correspond to shiftOps, this holds the MASKSETS. 'LSDR' shares sdr with 'SDR'
shiftSets = (sdr, sir, sdr, hdr, hir, tdr, tir )
# what to expect as parameters to a shiftOp, i.e. after a SDR length or SIR length
shiftParts = ('TDI', 'TDO', 'MASK', 'SMASK')
# the set of legal states which can trail the RUNTEST command
run_state_allowed = ('IRPAUSE', 'DRPAUSE', 'RESET', 'IDLE')
enddr_state_allowed = ('DRPAUSE', 'IDLE')
endir_state_allowed = ('IRPAUSE', 'IDLE')
trst_mode_allowed = ('ON', 'OFF', 'Z', 'ABSENT')
enddr_state = IDLE
endir_state = IDLE
frequency = 1.00e+006 # HZ;
# change detection for xsdrsize and xtdomask
xsdrsize = -1 # the last one sent, send only on change
xtdomask = bytearray() # the last one sent, send only on change
# we use a number of single byte writes for the XSVF command below
cmdbuf = bytearray(1)
# Save the XREPEAT setting into the file as first thing.
obuf = bytearray(2)
obuf[0] = XREPEAT
obuf[1] = xrepeat
output.write( obuf )
try:
while 1:
expecting_eof = True
nextTok()
expecting_eof = False
# print( tokType, tokVal, tokLn )
if tokVal in shiftOps:
shiftOp_linenum = tokLn
shiftOp = tokVal
set = shiftSets[shiftOps.index(shiftOp)]
# set flags false, if we see one later, set that one true later
sawTDI = sawTDO = sawMASK = sawSMASK = False
nextTok()
if tokType != 'int':
raise ParseError( tokLn, tokVal, "Expecting 'int' giving %s length, got '%s'" % (shiftOp, tokType) )
length = tokVal
nextTok()
while tokVal != ';':
if tokVal not in shiftParts:
raise ParseError( tokLn, tokVal, "Expecting TDI, TDO, MASK, SMASK, or ';'")
shiftPart = tokVal
nextTok()
if tokType != 'hex':
raise ParseError( tokLn, tokVal, "Expecting hex bits" )
bits = makeBitArray( tokVal, length )
if shiftPart == 'TDI':
sawTDI = True
set.tdi = bits
elif shiftPart == 'TDO':
sawTDO = True
set.tdo = bits
elif shiftPart == 'MASK':
sawMASK = True
set.mask = bits
elif shiftPart == 'SMASK':
sawSMASK = True
set.smask = bits
nextTok()
set.syncLengths( sawTDI, sawTDO, sawMASK, sawSMASK, length )
# process all the gathered parameters and generate outputs here
if shiftOp == 'SIR':
if doCOMMENTs:
writeComment( output, shiftOp_linenum, 'SIR' )
tdi = combineBitVectors( tir.tdi, sir.tdi, hir.tdi )
if len(tdi) > 255:
obuf = bytearray(3)
obuf[0] = XSIR2
struct.pack_into( ">h", obuf, 1, len(tdi) )
else:
obuf = bytearray(2)
obuf[0] = XSIR
obuf[1] = len(tdi)
output.write( obuf )
obuf = makeXSVFbytes( tdi )
output.write( obuf )
elif shiftOp == 'SDR':
if doCOMMENTs:
writeComment( output, shiftOp_linenum, shiftOp )
if not sawTDO:
# pass a zero filled bit vector for the sdr.mask
mask = combineBitVectors( tdr.mask, bytearray(sdr.size), hdr.mask )
tdi = combineBitVectors( tdr.tdi, sdr.tdi, hdr.tdi )
if xsdrsize != len(tdi):
xsdrsize = len(tdi)
cmdbuf[0] = XSDRSIZE
output.write( cmdbuf )
obuf = bytearray(4)
struct.pack_into( ">i", obuf, 0, xsdrsize ) # big endian 4 byte int to obuf
output.write( obuf )
if xtdomask != mask:
xtdomask = mask
cmdbuf[0] = XTDOMASK
output.write( cmdbuf )
obuf = makeXSVFbytes( mask )
output.write( obuf )
cmdbuf[0] = XSDR
output.write( cmdbuf )
obuf = makeXSVFbytes( tdi )
output.write( obuf )
else:
mask = combineBitVectors( tdr.mask, sdr.mask, hdr.mask )
tdi = combineBitVectors( tdr.tdi, sdr.tdi, hdr.tdi )
tdo = combineBitVectors( tdr.tdo, sdr.tdo, hdr.tdo )
if xsdrsize != len(tdi):
xsdrsize = len(tdi)
cmdbuf[0] = XSDRSIZE
output.write( cmdbuf )
obuf = bytearray(4)
struct.pack_into(">i", obuf, 0, xsdrsize ) # big endian 4 byte int to obuf
output.write( obuf )
if xtdomask != mask:
xtdomask = mask
cmdbuf[0] = XTDOMASK
output.write( cmdbuf )
obuf = makeXSVFbytes( mask )
output.write( obuf )
cmdbuf[0] = XSDRTDO
output.write( cmdbuf )
obuf = makeXSVFbytes( tdi )
output.write( obuf )
obuf = makeXSVFbytes( tdo )
output.write( obuf )
#print( "len(tdo)=", len(tdo), "len(tdr.tdo)=", len(tdr.tdo), "len(sdr.tdo)=", len(sdr.tdo), "len(hdr.tdo)=", len(hdr.tdo) )
elif shiftOp == 'LSDR':
if doCOMMENTs:
writeComment( output, shiftOp_linenum, shiftOp )
mask = combineBitVectors( tdr.mask, sdr.mask, hdr.mask )
tdi = combineBitVectors( tdr.tdi, sdr.tdi, hdr.tdi )
tdo = combineBitVectors( tdr.tdo, sdr.tdo, hdr.tdo )
if xsdrsize != len(tdi):
xsdrsize = len(tdi)
cmdbuf[0] = XSDRSIZE
output.write( cmdbuf )
obuf = bytearray(4)
struct.pack_into(">i", obuf, 0, xsdrsize ) # big endian 4 byte int to obuf
output.write( obuf )
if xtdomask != mask:
xtdomask = mask
cmdbuf[0] = XTDOMASK
output.write( cmdbuf )
obuf = makeXSVFbytes( mask )
output.write( obuf )
cmdbuf[0] = LSDR
output.write( cmdbuf )
obuf = makeXSVFbytes( tdi )
output.write( obuf )
obuf = makeXSVFbytes( tdo )
output.write( obuf )
#print( "len(tdo)=", len(tdo), "len(tdr.tdo)=", len(tdr.tdo), "len(sdr.tdo)=", len(sdr.tdo), "len(hdr.tdo)=", len(hdr.tdo) )
elif tokVal == 'RUNTEST' or tokVal == 'LDELAY':
# e.g. from lattice tools:
# "RUNTEST IDLE 5 TCK 1.00E-003 SEC;"
saveTok = tokVal
nextTok()
min_time = 0
run_count = 0
max_time = 600 # ten minutes
if tokVal in run_state_allowed:
run_state = StateTxt.index(tokVal)
end_state = run_state # bottom of page 17 of SVF spec
nextTok()
if tokType != 'int' and tokType != 'float':
raise ParseError( tokLn, tokVal, "Expecting 'int' or 'float' after RUNTEST [run_state]")
timeval = tokVal;
nextTok()
if tokVal != 'TCK' and tokVal != 'SEC' and tokVal != 'SCK':
raise ParseError( tokLn, tokVal, "Expecting 'TCK' or 'SEC' or 'SCK' after RUNTEST [run_state] (run_count|min_time)")
if tokVal == 'TCK' or tokVal == 'SCK':
run_count = int( timeval )
else:
min_time = timeval
nextTok()
if tokType == 'int' or tokType == 'float':
min_time = tokVal
nextTok()
if tokVal != 'SEC':
raise ParseError( tokLn, tokVal, "Expecting 'SEC' after RUNTEST [run_state] run_count min_time")
nextTok()
if tokVal == 'MAXIMUM':
nextTok()
if tokType != 'int' and tokType != 'float':
raise ParseError( tokLn, tokVal, "Expecting 'max_time' after RUNTEST [run_state] min_time SEC MAXIMUM")
max_time = tokVal
nextTok()
if tokVal != 'SEC':
raise ParseError( tokLn, tokVal, "Expecting 'max_time' after RUNTEST [run_state] min_time SEC MAXIMUM max_time")
nextTok()
if tokVal == 'ENDSTATE':
nextTok()
if tokVal not in run_state_allowed:
raise ParseError( tokLn, tokVal, "Expecting 'run_state' after RUNTEST .... ENDSTATE")
end_state = StateTxt.index(tokVal)
nextTok()
if tokVal != ';':
raise ParseError( tokLn, tokVal, "Expecting ';' after RUNTEST ....")
# print( "run_count=", run_count, "min_time=", min_time,
# "max_time=", max_time, "run_state=", State[run_state], "end_state=", State[end_state] )
writeRUNTEST( output, run_state, end_state, run_count, min_time, saveTok )
elif tokVal == 'LCOUNT':
nextTok()
if tokType != 'int':
raise ParseError( tokLn, tokVal, "Expecting integer 'count' after LCOUNT")
loopCount = tokVal
nextTok()
if tokVal != ';':
raise ParseError( tokLn, tokVal, "Expecting ';' after LCOUNT count")
if doCOMMENTs:
writeComment( output, tokLn, 'LCOUNT' )
obuf = bytearray(5)
obuf[0] = LCOUNT
struct.pack_into(">i", obuf, 1, loopCount ) # big endian 4 byte int to obuf
output.write( obuf )
elif tokVal == 'ENDDR':
nextTok()
if tokVal not in enddr_state_allowed:
raise ParseError( tokLn, tokVal, "Expecting 'stable_state' after ENDDR. (one of: DRPAUSE, IDLE)")
enddr_state = StateTxt.index(tokVal)
nextTok()
if tokVal != ';':
raise ParseError( tokLn, tokVal, "Expecting ';' after ENDDR stable_state")
if doCOMMENTs:
writeComment( output, tokLn, 'ENDDR' )
obuf = bytearray(2)
obuf[0] = XENDDR
# Page 10 of the March 1999 SVF spec shows that RESET is also allowed here.
# Yet the XSVF spec has no provision for that, and uses a non-standard, i.e.
# boolean argument to XENDDR which only handles two of the 3 intended states.
obuf[1] = 1 if enddr_state == DRPAUSE else 0
output.write( obuf )
elif tokVal == 'ENDIR':
nextTok()
if tokVal not in endir_state_allowed:
raise ParseError( tokLn, tokVal, "Expecting 'stable_state' after ENDIR. (one of: IRPAUSE, IDLE)")
endir_state = StateTxt.index(tokVal)
nextTok()
if tokVal != ';':
raise ParseError( tokLn, tokVal, "Expecting ';' after ENDIR stable_state")
if doCOMMENTs:
writeComment( output, tokLn, 'ENDIR' )
obuf = bytearray(2)
obuf[0] = XENDIR
# Page 10 of the March 1999 SVF spec shows that RESET is also allowed here.
# Yet the XSVF spec has no provision for that, and uses a non-standard, i.e.
# boolean argument to XENDDR which only handles two of the 3 intended states.
obuf[1] = 1 if endir_state == IRPAUSE else 0
output.write( obuf )
elif tokVal == 'STATE':
nextTok()
ln = tokLn
while tokVal != ';':
if tokVal not in StateTxt:
raise ParseError( tokLn, tokVal, "Expecting 'stable_state' after STATE")
stable_state = StateTxt.index( tokVal )
if doCOMMENTs and ln != -1:
writeComment( output, ln, 'STATE' )
ln = -1 # save comment only once
obuf = bytearray(2)
obuf[0] = XSTATE
obuf[1] = stable_state
output.write( obuf )
nextTok()
elif tokVal == 'FREQUENCY':
nextTok()
if tokVal != ';':
if tokType != 'int' and tokType != 'float':
raise ParseError( tokLn, tokVal, "Expecting 'cycles HZ' after FREQUENCY")
frequency = tokVal
nextTok()
if tokVal != 'HZ':
raise ParseError( tokLn, tokVal, "Expecting 'HZ' after FREQUENCY cycles")
nextTok()
if tokVal != ';':
raise ParseError( tokLn, tokVal, "Expecting ';' after FREQUENCY cycles HZ")
elif tokVal == 'TRST':
nextTok()
if tokVal not in trst_mode_allowed:
raise ParseError( tokLn, tokVal, "Expecting 'ON|OFF|Z|ABSENT' after TRST")
trst_mode = tokVal
nextTok()
if tokVal != ';':
raise ParseError( tokLn, tokVal, "Expecting ';' after TRST trst_mode")
if doCOMMENTs:
writeComment( output, tokLn, 'TRST %s' % trst_mode )
obuf = bytearray( 2 )
obuf[0] = XTRST
obuf[1] = trst_mode_allowed.index( trst_mode ) # use the index as the binary argument to XTRST opcode
output.write( obuf )
else:
raise ParseError( tokLn, tokVal, "Unknown token '%s'" % tokVal)
except StopIteration:
if not expecting_eof:
print( "Unexpected End of File at line ", tokLn )
except ParseError as pe:
print( "\n", pe )
finally:
# print( "closing file" )
cmdbuf[0] = XCOMPLETE
output.write( cmdbuf )
output.close()
| gpl-2.0 | -4,119,163,166,708,930,000 | 35.639232 | 144 | 0.558854 | false |
lexus24/w16b_test | static/Brython3.1.1-20150328-091302/Lib/xml/etree/ElementTree.py | 730 | 61800 | #
# ElementTree
# $Id: ElementTree.py 3440 2008-07-18 14:45:01Z fredrik $
#
# light-weight XML support for Python 2.3 and later.
#
# history (since 1.2.6):
# 2005-11-12 fl added tostringlist/fromstringlist helpers
# 2006-07-05 fl merged in selected changes from the 1.3 sandbox
# 2006-07-05 fl removed support for 2.1 and earlier
# 2007-06-21 fl added deprecation/future warnings
# 2007-08-25 fl added doctype hook, added parser version attribute etc
# 2007-08-26 fl added new serializer code (better namespace handling, etc)
# 2007-08-27 fl warn for broken /tag searches on tree level
# 2007-09-02 fl added html/text methods to serializer (experimental)
# 2007-09-05 fl added method argument to tostring/tostringlist
# 2007-09-06 fl improved error handling
# 2007-09-13 fl added itertext, iterfind; assorted cleanups
# 2007-12-15 fl added C14N hooks, copy method (experimental)
#
# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring", "fromstringlist",
"iselement", "iterparse",
"parse", "ParseError",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring", "tostringlist",
"TreeBuilder",
"VERSION",
"XML", "XMLID",
"XMLParser", "XMLTreeBuilder",
"register_namespace",
]
VERSION = "1.3.0"
##
# The <b>Element</b> type is a flexible container object, designed to
# store hierarchical data structures in memory. The type can be
# described as a cross between a list and a dictionary.
# <p>
# Each element has a number of properties associated with it:
# <ul>
# <li>a <i>tag</i>. This is a string identifying what kind of data
# this element represents (the element type, in other words).</li>
# <li>a number of <i>attributes</i>, stored in a Python dictionary.</li>
# <li>a <i>text</i> string.</li>
# <li>an optional <i>tail</i> string.</li>
# <li>a number of <i>child elements</i>, stored in a Python sequence</li>
# </ul>
#
# To create an element instance, use the {@link #Element} constructor
# or the {@link #SubElement} factory function.
# <p>
# The {@link #ElementTree} class can be used to wrap an element
# structure, and convert it from and to XML.
##
import sys
import re
import warnings
import io
import contextlib
from . import ElementPath
##
# Parser error. This is a subclass of <b>SyntaxError</b>.
# <p>
# In addition to the exception value, an exception instance contains a
# specific exception code in the <b>code</b> attribute, and the line and
# column of the error in the <b>position</b> attribute.
class ParseError(SyntaxError):
pass
# --------------------------------------------------------------------
##
# Checks if an object appears to be a valid element object.
#
# @param An element instance.
# @return A true value if this is an element object.
# @defreturn flag
def iselement(element):
# FIXME: not sure about this;
# isinstance(element, Element) or look for tag/attrib/text attributes
return hasattr(element, 'tag')
##
# Element class. This class defines the Element interface, and
# provides a reference implementation of this interface.
# <p>
# The element name, attribute names, and attribute values can be
# either ASCII strings (ordinary Python strings containing only 7-bit
# ASCII characters) or Unicode strings.
#
# @param tag The element name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @see Element
# @see SubElement
# @see Comment
# @see ProcessingInstruction
class Element:
# <tag attrib>text<child/>...</tag>tail
##
# (Attribute) Element tag.
tag = None
##
# (Attribute) Element attribute dictionary. Where possible, use
# {@link #Element.get},
# {@link #Element.set},
# {@link #Element.keys}, and
# {@link #Element.items} to access
# element attributes.
attrib = None
##
# (Attribute) Text before first subelement. This is either a
# string or the value None. Note that if there was no text, this
# attribute may be either None or an empty string, depending on
# the parser.
text = None
##
# (Attribute) Text after this element's end tag, but before the
# next sibling element's start tag. This is either a string or
# the value None. Note that if there was no text, this attribute
# may be either None or an empty string, depending on the parser.
tail = None # text after end tag, if any
# constructor
def __init__(self, tag, attrib={}, **extra):
if not isinstance(attrib, dict):
raise TypeError("attrib must be dict, not %s" % (
attrib.__class__.__name__,))
attrib = attrib.copy()
attrib.update(extra)
self.tag = tag
self.attrib = attrib
self._children = []
def __repr__(self):
return "<Element %s at 0x%x>" % (repr(self.tag), id(self))
##
# Creates a new element object of the same type as this element.
#
# @param tag Element tag.
# @param attrib Element attributes, given as a dictionary.
# @return A new element instance.
def makeelement(self, tag, attrib):
return self.__class__(tag, attrib)
##
# (Experimental) Copies the current element. This creates a
# shallow copy; subelements will be shared with the original tree.
#
# @return A new element instance.
def copy(self):
elem = self.makeelement(self.tag, self.attrib)
elem.text = self.text
elem.tail = self.tail
elem[:] = self
return elem
##
# Returns the number of subelements. Note that this only counts
# full elements; to check if there's any content in an element, you
# have to check both the length and the <b>text</b> attribute.
#
# @return The number of subelements.
def __len__(self):
return len(self._children)
def __bool__(self):
warnings.warn(
"The behavior of this method will change in future versions. "
"Use specific 'len(elem)' or 'elem is not None' test instead.",
FutureWarning, stacklevel=2
)
return len(self._children) != 0 # emulate old behaviour, for now
##
# Returns the given subelement, by index.
#
# @param index What subelement to return.
# @return The given subelement.
# @exception IndexError If the given element does not exist.
def __getitem__(self, index):
return self._children[index]
##
# Replaces the given subelement, by index.
#
# @param index What subelement to replace.
# @param element The new element value.
# @exception IndexError If the given element does not exist.
def __setitem__(self, index, element):
# if isinstance(index, slice):
# for elt in element:
# assert iselement(elt)
# else:
# assert iselement(element)
self._children[index] = element
##
# Deletes the given subelement, by index.
#
# @param index What subelement to delete.
# @exception IndexError If the given element does not exist.
def __delitem__(self, index):
del self._children[index]
##
# Adds a subelement to the end of this element. In document order,
# the new element will appear after the last existing subelement (or
# directly after the text, if it's the first subelement), but before
# the end tag for this element.
#
# @param element The element to add.
def append(self, element):
self._assert_is_element(element)
self._children.append(element)
##
# Appends subelements from a sequence.
#
# @param elements A sequence object with zero or more elements.
# @since 1.3
def extend(self, elements):
for element in elements:
self._assert_is_element(element)
self._children.extend(elements)
##
# Inserts a subelement at the given position in this element.
#
# @param index Where to insert the new subelement.
def insert(self, index, element):
self._assert_is_element(element)
self._children.insert(index, element)
def _assert_is_element(self, e):
# Need to refer to the actual Python implementation, not the
# shadowing C implementation.
if not isinstance(e, _Element):
raise TypeError('expected an Element, not %s' % type(e).__name__)
##
# Removes a matching subelement. Unlike the <b>find</b> methods,
# this method compares elements based on identity, not on tag
# value or contents. To remove subelements by other means, the
# easiest way is often to use a list comprehension to select what
# elements to keep, and use slice assignment to update the parent
# element.
#
# @param element What element to remove.
# @exception ValueError If a matching element could not be found.
def remove(self, element):
# assert iselement(element)
self._children.remove(element)
##
# (Deprecated) Returns all subelements. The elements are returned
# in document order.
#
# @return A list of subelements.
# @defreturn list of Element instances
def getchildren(self):
warnings.warn(
"This method will be removed in future versions. "
"Use 'list(elem)' or iteration over elem instead.",
DeprecationWarning, stacklevel=2
)
return self._children
##
# Finds the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path, namespaces=None):
return ElementPath.find(self, path, namespaces)
##
# Finds text for the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @param default What to return if the element was not found.
# @keyparam namespaces Optional namespace prefix map.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None, namespaces=None):
return ElementPath.findtext(self, path, default, namespaces)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return A list or other sequence containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path, namespaces=None):
return ElementPath.findall(self, path, namespaces)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return An iterator or sequence containing all matching elements,
# in document order.
# @defreturn a generated sequence of Element instances
def iterfind(self, path, namespaces=None):
return ElementPath.iterfind(self, path, namespaces)
##
# Resets an element. This function removes all subelements, clears
# all attributes, and sets the <b>text</b> and <b>tail</b> attributes
# to None.
def clear(self):
self.attrib.clear()
self._children = []
self.text = self.tail = None
##
# Gets an element attribute. Equivalent to <b>attrib.get</b>, but
# some implementations may handle this a bit more efficiently.
#
# @param key What attribute to look for.
# @param default What to return if the attribute was not found.
# @return The attribute value, or the default value, if the
# attribute was not found.
# @defreturn string or None
def get(self, key, default=None):
return self.attrib.get(key, default)
##
# Sets an element attribute. Equivalent to <b>attrib[key] = value</b>,
# but some implementations may handle this a bit more efficiently.
#
# @param key What attribute to set.
# @param value The attribute value.
def set(self, key, value):
self.attrib[key] = value
##
# Gets a list of attribute names. The names are returned in an
# arbitrary order (just like for an ordinary Python dictionary).
# Equivalent to <b>attrib.keys()</b>.
#
# @return A list of element attribute names.
# @defreturn list of strings
def keys(self):
return self.attrib.keys()
##
# Gets element attributes, as a sequence. The attributes are
# returned in an arbitrary order. Equivalent to <b>attrib.items()</b>.
#
# @return A list of (name, value) tuples for all attributes.
# @defreturn list of (string, string) tuples
def items(self):
return self.attrib.items()
##
# Creates a tree iterator. The iterator loops over this element
# and all subelements, in document order, and returns all elements
# with a matching tag.
# <p>
# If the tree structure is modified during iteration, new or removed
# elements may or may not be included. To get a stable set, use the
# list() function on the iterator, and loop over the resulting list.
#
# @param tag What tags to look for (default is to return all elements).
# @return An iterator containing all the matching elements.
# @defreturn iterator
def iter(self, tag=None):
if tag == "*":
tag = None
if tag is None or self.tag == tag:
yield self
for e in self._children:
for e in e.iter(tag):
yield e
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'elem.iter()' or 'list(elem.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
##
# Creates a text iterator. The iterator loops over this element
# and all subelements, in document order, and returns all inner
# text.
#
# @return An iterator containing all inner text.
# @defreturn iterator
def itertext(self):
tag = self.tag
if not isinstance(tag, str) and tag is not None:
return
if self.text:
yield self.text
for e in self:
for s in e.itertext():
yield s
if e.tail:
yield e.tail
# compatibility
_Element = _ElementInterface = Element
##
# Subelement factory. This function creates an element instance, and
# appends it to an existing element.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param parent The parent element.
# @param tag The subelement name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @return An element instance.
# @defreturn Element
def SubElement(parent, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
##
# Comment element factory. This factory function creates a special
# element that will be serialized as an XML comment by the standard
# serializer.
# <p>
# The comment string can be either an 8-bit ASCII string or a Unicode
# string.
#
# @param text A string containing the comment string.
# @return An element instance, representing a comment.
# @defreturn Element
def Comment(text=None):
element = Element(Comment)
element.text = text
return element
##
# PI element factory. This factory function creates a special element
# that will be serialized as an XML processing instruction by the standard
# serializer.
#
# @param target A string containing the PI target.
# @param text A string containing the PI contents, if any.
# @return An element instance, representing a PI.
# @defreturn Element
def ProcessingInstruction(target, text=None):
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
##
# QName wrapper. This can be used to wrap a QName attribute value, in
# order to get proper namespace handling on output.
#
# @param text A string containing the QName value, in the form {uri}local,
# or, if the tag argument is given, the URI part of a QName.
# @param tag Optional tag. If given, the first argument is interpreted as
# an URI, and this argument is interpreted as a local name.
# @return An opaque object, representing the QName.
class QName:
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __repr__(self):
return '<QName %r>' % (self.text,)
def __hash__(self):
return hash(self.text)
def __le__(self, other):
if isinstance(other, QName):
return self.text <= other.text
return self.text <= other
def __lt__(self, other):
if isinstance(other, QName):
return self.text < other.text
return self.text < other
def __ge__(self, other):
if isinstance(other, QName):
return self.text >= other.text
return self.text >= other
def __gt__(self, other):
if isinstance(other, QName):
return self.text > other.text
return self.text > other
def __eq__(self, other):
if isinstance(other, QName):
return self.text == other.text
return self.text == other
def __ne__(self, other):
if isinstance(other, QName):
return self.text != other.text
return self.text != other
# --------------------------------------------------------------------
##
# ElementTree wrapper class. This class represents an entire element
# hierarchy, and adds some extra support for serialization to and from
# standard XML.
#
# @param element Optional root element.
# @keyparam file Optional file handle or file name. If given, the
# tree is initialized with the contents of this XML file.
class ElementTree:
def __init__(self, element=None, file=None):
# assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
##
# Gets the root element for this tree.
#
# @return An element instance.
# @defreturn Element
def getroot(self):
return self._root
##
# Replaces the root element for this tree. This discards the
# current contents of the tree, and replaces it with the given
# element. Use with care.
#
# @param element An element instance.
def _setroot(self, element):
# assert iselement(element)
self._root = element
##
# Loads an external XML document into this element tree.
#
# @param source A file name or file object. If a file object is
# given, it only has to implement a <b>read(n)</b> method.
# @keyparam parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return The document root element.
# @defreturn Element
# @exception ParseError If the parser fails to parse the document.
def parse(self, source, parser=None):
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
try:
if not parser:
parser = XMLParser(target=TreeBuilder())
while 1:
data = source.read(65536)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
finally:
if close_source:
source.close()
##
# Creates a tree iterator for the root element. The iterator loops
# over all elements in this tree, in document order.
#
# @param tag What tags to look for (default is to return all elements)
# @return An iterator.
# @defreturn iterator
def iter(self, tag=None):
# assert self._root is not None
return self._root.iter(tag)
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'tree.iter()' or 'list(tree.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
##
# Same as getroot().find(path), starting at the root of the tree.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.find(path, namespaces)
##
# Same as getroot().findtext(path), starting at the root of the tree.
#
# @param path What element to look for.
# @param default What to return if the element was not found.
# @keyparam namespaces Optional namespace prefix map.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findtext(path, default, namespaces)
##
# Same as getroot().findall(path), starting at the root of the tree.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findall(path, namespaces)
##
# Finds all matching subelements, by tag name or path.
# Same as getroot().iterfind(path).
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return An iterator or sequence containing all matching elements,
# in document order.
# @defreturn a generated sequence of Element instances
def iterfind(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.iterfind(path, namespaces)
##
# Writes the element tree to a file, as XML.
#
# @def write(file, **options)
# @param file A file name, or a file object opened for writing.
# @param **options Options, given as keyword arguments.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# Use "unicode" to return a Unicode string.
# @keyparam xml_declaration Controls if an XML declaration should
# be added to the file. Use False for never, True for always,
# None for only if not US-ASCII or UTF-8 or Unicode. None is default.
# @keyparam default_namespace Sets the default XML namespace (for "xmlns").
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
def write(self, file_or_filename,
encoding=None,
xml_declaration=None,
default_namespace=None,
method=None):
if not method:
method = "xml"
elif method not in _serialize:
raise ValueError("unknown method %r" % method)
if not encoding:
if method == "c14n":
encoding = "utf-8"
else:
encoding = "us-ascii"
else:
encoding = encoding.lower()
with _get_writer(file_or_filename, encoding) as write:
if method == "xml" and (xml_declaration or
(xml_declaration is None and
encoding not in ("utf-8", "us-ascii", "unicode"))):
declared_encoding = encoding
if encoding == "unicode":
# Retrieve the default encoding for the xml declaration
import locale
declared_encoding = locale.getpreferredencoding()
write("<?xml version='1.0' encoding='%s'?>\n" % (
declared_encoding,))
if method == "text":
_serialize_text(write, self._root)
else:
qnames, namespaces = _namespaces(self._root, default_namespace)
serialize = _serialize[method]
serialize(write, self._root, qnames, namespaces)
def write_c14n(self, file):
# lxml.etree compatibility. use output method instead
return self.write(file, method="c14n")
# --------------------------------------------------------------------
# serialization support
@contextlib.contextmanager
def _get_writer(file_or_filename, encoding):
# returns text write method and release all resourses after using
try:
write = file_or_filename.write
except AttributeError:
# file_or_filename is a file name
if encoding == "unicode":
file = open(file_or_filename, "w")
else:
file = open(file_or_filename, "w", encoding=encoding,
errors="xmlcharrefreplace")
with file:
yield file.write
else:
# file_or_filename is a file-like object
# encoding determines if it is a text or binary writer
if encoding == "unicode":
# use a text writer as is
yield write
else:
# wrap a binary writer with TextIOWrapper
with contextlib.ExitStack() as stack:
if isinstance(file_or_filename, io.BufferedIOBase):
file = file_or_filename
elif isinstance(file_or_filename, io.RawIOBase):
file = io.BufferedWriter(file_or_filename)
# Keep the original file open when the BufferedWriter is
# destroyed
stack.callback(file.detach)
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
file = io.BufferedIOBase()
file.writable = lambda: True
file.write = write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
file.seekable = file_or_filename.seekable
file.tell = file_or_filename.tell
except AttributeError:
pass
file = io.TextIOWrapper(file,
encoding=encoding,
errors="xmlcharrefreplace",
newline="\n")
# Keep the original file open when the TextIOWrapper is
# destroyed
stack.callback(file.detach)
yield file.write
def _namespaces(elem, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].rsplit("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = "%s:%s" % (prefix, tag)
else:
qnames[qname] = tag # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = qname
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
for elem in elem.iter():
tag = elem.tag
if isinstance(tag, QName):
if tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, str):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def _serialize_xml(write, elem, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % text)
elif tag is ProcessingInstruction:
write("<?%s?>" % text)
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib(v)
write(" %s=\"%s\"" % (qnames[k], v))
if text or len(elem):
write(">")
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None)
write("</" + tag + ">")
else:
write(" />")
if elem.tail:
write(_escape_cdata(elem.tail))
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta", "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
def _serialize_html(write, elem, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
write(">")
tag = tag.lower()
if text:
if tag == "script" or tag == "style":
write(text)
else:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None)
if tag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail))
def _serialize_text(write, elem):
for part in elem.itertext():
write(part)
if elem.tail:
write(elem.tail)
_serialize = {
"xml": _serialize_xml,
"html": _serialize_html,
"text": _serialize_text,
# this optional method is imported at the end of the module
# "c14n": _serialize_c14n,
}
##
# Registers a namespace prefix. The registry is global, and any
# existing mapping for either the given prefix or the namespace URI
# will be removed.
#
# @param prefix Namespace prefix.
# @param uri Namespace uri. Tags and attributes in this namespace
# will be serialized with the given prefix, if at all possible.
# @exception ValueError If the prefix is reserved, or is otherwise
# invalid.
def register_namespace(prefix, uri):
if re.match("ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in list(_namespace_map.items()):
if k == uri or v == prefix:
del _namespace_map[k]
_namespace_map[uri] = prefix
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublin core
"http://purl.org/dc/elements/1.1/": "dc",
}
# For tests and troubleshooting
register_namespace._namespace_map = _namespace_map
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _escape_cdata(text):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
if "\n" in text:
text = text.replace("\n", " ")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
##
# Generates a string representation of an XML element, including all
# subelements. If encoding is "unicode", the return type is a string;
# otherwise it is a bytes array.
#
# @param element An Element instance.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# Use "unicode" to return a Unicode string.
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @return An (optionally) encoded string containing the XML data.
# @defreturn string
def tostring(element, encoding=None, method=None):
stream = io.StringIO() if encoding == 'unicode' else io.BytesIO()
ElementTree(element).write(stream, encoding, method=method)
return stream.getvalue()
##
# Generates a string representation of an XML element, including all
# subelements.
#
# @param element An Element instance.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# Use "unicode" to return a Unicode string.
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @return A sequence object containing the XML data.
# @defreturn sequence
# @since 1.3
class _ListDataStream(io.BufferedIOBase):
""" An auxiliary stream accumulating into a list reference
"""
def __init__(self, lst):
self.lst = lst
def writable(self):
return True
def seekable(self):
return True
def write(self, b):
self.lst.append(b)
def tell(self):
return len(self.lst)
def tostringlist(element, encoding=None, method=None):
lst = []
stream = _ListDataStream(lst)
ElementTree(element).write(stream, encoding, method=method)
return lst
##
# Writes an element tree or element structure to sys.stdout. This
# function should be used for debugging only.
# <p>
# The exact output format is implementation dependent. In this
# version, it's written as an ordinary XML file.
#
# @param elem An element tree or an individual element.
def dump(elem):
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout, encoding="unicode")
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
# --------------------------------------------------------------------
# parsing
##
# Parses an XML document into an element tree.
#
# @param source A filename or file object containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An ElementTree instance
def parse(source, parser=None):
tree = ElementTree()
tree.parse(source, parser)
return tree
##
# Parses an XML document into an element tree incrementally, and reports
# what's going on to the user.
#
# @param source A filename or file object containing XML data.
# @param events A list of events to report back. If omitted, only "end"
# events are reported.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return A (event, elem) iterator.
def iterparse(source, events=None, parser=None):
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
if not parser:
parser = XMLParser(target=TreeBuilder())
return _IterParseIterator(source, events, parser, close_source)
class _IterParseIterator:
def __init__(self, source, events, parser, close_source=False):
self._file = source
self._close_file = close_source
self._events = []
self._index = 0
self._error = None
self.root = self._root = None
self._parser = parser
# wire up the parser for event reporting
parser = self._parser._parser
append = self._events.append
if events is None:
events = ["end"]
for event in events:
if event == "start":
try:
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start_list):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
except AttributeError:
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event == "end":
def handler(tag, event=event, append=append,
end=self._parser._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event == "start-ns":
def handler(prefix, uri, event=event, append=append):
append((event, (prefix or "", uri or "")))
parser.StartNamespaceDeclHandler = handler
elif event == "end-ns":
def handler(prefix, event=event, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
else:
raise ValueError("unknown event %r" % event)
def __next__(self):
while 1:
try:
item = self._events[self._index]
self._index += 1
return item
except IndexError:
pass
if self._error:
e = self._error
self._error = None
raise e
if self._parser is None:
self.root = self._root
if self._close_file:
self._file.close()
raise StopIteration
# load event buffer
del self._events[:]
self._index = 0
data = self._file.read(16384)
if data:
try:
self._parser.feed(data)
except SyntaxError as exc:
self._error = exc
else:
self._root = self._parser.close()
self._parser = None
def __iter__(self):
return self
##
# Parses an XML document from a string constant. This function can
# be used to embed "XML literals" in Python code.
#
# @param source A string containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An Element instance.
# @defreturn Element
def XML(text, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
return parser.close()
##
# Parses an XML document from a string constant, and also returns
# a dictionary which maps from element id:s to elements.
#
# @param source A string containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return A tuple containing an Element instance and a dictionary.
# @defreturn (Element, dictionary)
def XMLID(text, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.iter():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
##
# Parses an XML document from a string constant. Same as {@link #XML}.
#
# @def fromstring(text)
# @param source A string containing XML data.
# @return An Element instance.
# @defreturn Element
fromstring = XML
##
# Parses an XML document from a sequence of string fragments.
#
# @param sequence A list or other sequence containing XML data fragments.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An Element instance.
# @defreturn Element
# @since 1.3
def fromstringlist(sequence, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
for text in sequence:
parser.feed(text)
return parser.close()
# --------------------------------------------------------------------
##
# Generic element structure builder. This builder converts a sequence
# of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link
# #TreeBuilder.end} method calls to a well-formed element structure.
# <p>
# You can use this class to build an element structure using a custom XML
# parser, or a parser for some other XML-like format.
#
# @param element_factory Optional element factory. This factory
# is called to create new Element instances, as necessary.
class TreeBuilder:
def __init__(self, element_factory=None):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._tail = None # true if we're after an end tag
if element_factory is None:
element_factory = Element
self._factory = element_factory
##
# Flushes the builder buffers, and returns the toplevel document
# element.
#
# @return An Element instance.
# @defreturn Element
def close(self):
assert len(self._elem) == 0, "missing end tags"
assert self._last is not None, "missing toplevel element"
return self._last
def _flush(self):
if self._data:
if self._last is not None:
text = "".join(self._data)
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
##
# Adds text to the current element.
#
# @param data A string. This should be either an 8-bit string
# containing ASCII text, or a Unicode string.
def data(self, data):
self._data.append(data)
##
# Opens a new element.
#
# @param tag The element name.
# @param attrib A dictionary containing element attributes.
# @return The opened element.
# @defreturn Element
def start(self, tag, attrs):
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
self._elem.append(elem)
self._tail = 0
return elem
##
# Closes the current element.
#
# @param tag The element name.
# @return The closed element.
# @defreturn Element
def end(self, tag):
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
##
# Element structure builder for XML source data, based on the
# <b>expat</b> parser.
#
# @keyparam target Target object. If omitted, the builder uses an
# instance of the standard {@link #TreeBuilder} class.
# @keyparam html Predefine HTML entities. This flag is not supported
# by the current implementation.
# @keyparam encoding Optional encoding. If given, the value overrides
# the encoding specified in the XML file.
# @see #ElementTree
# @see #TreeBuilder
class XMLParser:
def __init__(self, html=0, target=None, encoding=None):
try:
from xml.parsers import expat
except ImportError:
try:
import pyexpat as expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
parser = expat.ParserCreate(encoding, "}")
if target is None:
target = TreeBuilder()
# underscored names are provided for compatibility only
self.parser = self._parser = parser
self.target = self._target = target
self._error = expat.error
self._names = {} # name memo cache
# main callbacks
parser.DefaultHandlerExpand = self._default
if hasattr(target, 'start'):
parser.StartElementHandler = self._start
if hasattr(target, 'end'):
parser.EndElementHandler = self._end
if hasattr(target, 'data'):
parser.CharacterDataHandler = target.data
# miscellaneous callbacks
if hasattr(target, 'comment'):
parser.CommentHandler = target.comment
if hasattr(target, 'pi'):
parser.ProcessingInstructionHandler = target.pi
# let expat do the buffering, if supported
try:
parser.buffer_text = 1
except AttributeError:
pass
# use new-style attribute handling, if supported
try:
parser.ordered_attributes = 1
parser.specified_attributes = 1
if hasattr(target, 'start'):
parser.StartElementHandler = self._start_list
except AttributeError:
pass
self._doctype = None
self.entity = {}
try:
self.version = "Expat %d.%d.%d" % expat.version_info
except AttributeError:
pass # unknown
def _raiseerror(self, value):
err = ParseError(value)
err.code = value.code
err.position = value.lineno, value.offset
raise err
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name
return name
def _start(self, tag, attrib_in):
fixname = self._fixname
tag = fixname(tag)
attrib = {}
for key, value in attrib_in.items():
attrib[fixname(key)] = value
return self.target.start(tag, attrib)
def _start_list(self, tag, attrib_in):
fixname = self._fixname
tag = fixname(tag)
attrib = {}
if attrib_in:
for i in range(0, len(attrib_in), 2):
attrib[fixname(attrib_in[i])] = attrib_in[i+1]
return self.target.start(tag, attrib)
def _end(self, tag):
return self.target.end(self._fixname(tag))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
data_handler = self.target.data
except AttributeError:
return
try:
data_handler(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
err = expat.error(
"undefined entity %s: line %d, column %d" %
(text, self.parser.ErrorLineNumber,
self.parser.ErrorColumnNumber)
)
err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
err.lineno = self.parser.ErrorLineNumber
err.offset = self.parser.ErrorColumnNumber
raise err
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = text.strip()
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
if pubid:
pubid = pubid[1:-1]
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if hasattr(self.target, "doctype"):
self.target.doctype(name, pubid, system[1:-1])
elif self.doctype != self._XMLParser__doctype:
# warn about deprecated call
self._XMLParser__doctype(name, pubid, system[1:-1])
self.doctype(name, pubid, system[1:-1])
self._doctype = None
##
# (Deprecated) Handles a doctype declaration.
#
# @param name Doctype name.
# @param pubid Public identifier.
# @param system System identifier.
def doctype(self, name, pubid, system):
"""This method of XMLParser is deprecated."""
warnings.warn(
"This method of XMLParser is deprecated. Define doctype() "
"method on the TreeBuilder target.",
DeprecationWarning,
)
# sentinel, if doctype is redefined in a subclass
__doctype = doctype
##
# Feeds data to the parser.
#
# @param data Encoded data.
def feed(self, data):
try:
self.parser.Parse(data, 0)
except self._error as v:
self._raiseerror(v)
##
# Finishes feeding data to the parser.
#
# @return An element structure.
# @defreturn Element
def close(self):
try:
self.parser.Parse("", 1) # end of data
except self._error as v:
self._raiseerror(v)
try:
close_handler = self.target.close
except AttributeError:
pass
else:
return close_handler()
finally:
# get rid of circular references
del self.parser, self._parser
del self.target, self._target
# Import the C accelerators
try:
# Element, SubElement, ParseError, TreeBuilder, XMLParser
from _elementtree import *
except ImportError:
pass
else:
# Overwrite 'ElementTree.parse' and 'iterparse' to use the C XMLParser
class ElementTree(ElementTree):
def parse(self, source, parser=None):
close_source = False
if not hasattr(source, 'read'):
source = open(source, 'rb')
close_source = True
try:
if parser is not None:
while True:
data = source.read(65536)
if not data:
break
parser.feed(data)
self._root = parser.close()
else:
parser = XMLParser()
self._root = parser._parse(source)
return self._root
finally:
if close_source:
source.close()
class iterparse:
"""Parses an XML section into an element tree incrementally.
Reports what’s going on to the user. 'source' is a filename or file
object containing XML data. 'events' is a list of events to report back.
The supported events are the strings "start", "end", "start-ns" and
"end-ns" (the "ns" events are used to get detailed namespace
information). If 'events' is omitted, only "end" events are reported.
'parser' is an optional parser instance. If not given, the standard
XMLParser parser is used. Returns an iterator providing
(event, elem) pairs.
"""
root = None
def __init__(self, file, events=None, parser=None):
self._close_file = False
if not hasattr(file, 'read'):
file = open(file, 'rb')
self._close_file = True
self._file = file
self._events = []
self._index = 0
self._error = None
self.root = self._root = None
if parser is None:
parser = XMLParser(target=TreeBuilder())
self._parser = parser
self._parser._setevents(self._events, events)
def __next__(self):
while True:
try:
item = self._events[self._index]
self._index += 1
return item
except IndexError:
pass
if self._error:
e = self._error
self._error = None
raise e
if self._parser is None:
self.root = self._root
if self._close_file:
self._file.close()
raise StopIteration
# load event buffer
del self._events[:]
self._index = 0
data = self._file.read(16384)
if data:
try:
self._parser.feed(data)
except SyntaxError as exc:
self._error = exc
else:
self._root = self._parser.close()
self._parser = None
def __iter__(self):
return self
# compatibility
XMLTreeBuilder = XMLParser
# workaround circular import.
try:
from ElementC14N import _serialize_c14n
_serialize["c14n"] = _serialize_c14n
except ImportError:
pass
| agpl-3.0 | 6,363,018,151,209,591,000 | 33.218162 | 80 | 0.576168 | false |
unclev/vk.unclev.ru | library/xmpp/protocol.py | 1 | 54681 | ## protocol.py
##
## Copyright (C) 2003-2005 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: protocol.py, v1.64 2014/01/10 alkorgun Exp $
"""
Protocol module contains tools that is needed for processing of
xmpp-related data structures.
"""
import time
from .simplexml import Node, XML_ls, XMLescape, ustr
NS_ACTIVITY = "http://jabber.org/protocol/activity" # XEP-0108
NS_ADDRESS = "http://jabber.org/protocol/address" # XEP-0033
NS_ADMIN = "http://jabber.org/protocol/admin" # XEP-0133
NS_ADMIN_ADD_USER = NS_ADMIN + "#add-user" # XEP-0133
NS_ADMIN_DELETE_USER = NS_ADMIN + "#delete-user" # XEP-0133
NS_ADMIN_DISABLE_USER = NS_ADMIN + "#disable-user" # XEP-0133
NS_ADMIN_REENABLE_USER = NS_ADMIN + "#reenable-user" # XEP-0133
NS_ADMIN_END_USER_SESSION = NS_ADMIN + "#end-user-session" # XEP-0133
NS_ADMIN_GET_USER_PASSWORD = NS_ADMIN + "#get-user-password" # XEP-0133
NS_ADMIN_CHANGE_USER_PASSWORD = NS_ADMIN + "#change-user-password" # XEP-0133
NS_ADMIN_GET_USER_ROSTER = NS_ADMIN + "#get-user-roster" # XEP-0133
NS_ADMIN_GET_USER_LASTLOGIN = NS_ADMIN + "#get-user-lastlogin" # XEP-0133
NS_ADMIN_USER_STATS = NS_ADMIN + "#user-stats" # XEP-0133
NS_ADMIN_EDIT_BLACKLIST = NS_ADMIN + "#edit-blacklist" # XEP-0133
NS_ADMIN_EDIT_WHITELIST = NS_ADMIN + "#edit-whitelist" # XEP-0133
NS_ADMIN_REGISTERED_USERS_NUM = NS_ADMIN + "#get-registered-users-num" # XEP-0133
NS_ADMIN_DISABLED_USERS_NUM = NS_ADMIN + "#get-disabled-users-num" # XEP-0133
NS_ADMIN_ONLINE_USERS_NUM = NS_ADMIN + "#get-online-users-num" # XEP-0133
NS_ADMIN_ACTIVE_USERS_NUM = NS_ADMIN + "#get-active-users-num" # XEP-0133
NS_ADMIN_IDLE_USERS_NUM = NS_ADMIN + "#get-idle-users-num" # XEP-0133
NS_ADMIN_REGISTERED_USERS_LIST = NS_ADMIN + "#get-registered-users-list" # XEP-0133
NS_ADMIN_DISABLED_USERS_LIST = NS_ADMIN + "#get-disabled-users-list" # XEP-0133
NS_ADMIN_ONLINE_USERS_LIST = NS_ADMIN + "#get-online-users-list" # XEP-0133
NS_ADMIN_ACTIVE_USERS_LIST = NS_ADMIN + "#get-active-users-list" # XEP-0133
NS_ADMIN_IDLE_USERS_LIST = NS_ADMIN + "#get-idle-users-list" # XEP-0133
NS_ADMIN_ANNOUNCE = NS_ADMIN + "#announce" # XEP-0133
NS_ADMIN_SET_MOTD = NS_ADMIN + "#set-motd" # XEP-0133
NS_ADMIN_EDIT_MOTD = NS_ADMIN + "#edit-motd" # XEP-0133
NS_ADMIN_DELETE_MOTD = NS_ADMIN + "#delete-motd" # XEP-0133
NS_ADMIN_SET_WELCOME = NS_ADMIN + "#set-welcome" # XEP-0133
NS_ADMIN_DELETE_WELCOME = NS_ADMIN + "#delete-welcome" # XEP-0133
NS_ADMIN_EDIT_ADMIN = NS_ADMIN + "#edit-admin" # XEP-0133
NS_ADMIN_RESTART = NS_ADMIN + "#restart" # XEP-0133
NS_ADMIN_SHUTDOWN = NS_ADMIN + "#shutdown" # XEP-0133
NS_AGENTS = "jabber:iq:agents" # XEP-0094 (historical)
NS_AMP = "http://jabber.org/protocol/amp" # XEP-0079
NS_AMP_ERRORS = NS_AMP + "#errors" # XEP-0079
NS_AUTH = "jabber:iq:auth" # XEP-0078
NS_AVATAR = "jabber:iq:avatar" # XEP-0008 (historical)
NS_BIND = "urn:ietf:params:xml:ns:xmpp-bind" # RFC 3920
NS_BROWSE = "jabber:iq:browse" # XEP-0011 (historical)
NS_BYTESTREAM = "http://jabber.org/protocol/bytestreams" # XEP-0065
NS_CAPS = "http://jabber.org/protocol/caps" # XEP-0115
NS_CAPTCHA = "urn:xmpp:captcha" # XEP-0158
NS_CHATSTATES = "http://jabber.org/protocol/chatstates" # XEP-0085
NS_CLIENT = "jabber:client" # RFC 3921
NS_COMMANDS = "http://jabber.org/protocol/commands" # XEP-0050
NS_COMPONENT_ACCEPT = "jabber:component:accept" # XEP-0114
NS_COMPONENT_1 = "http://jabberd.jabberstudio.org/ns/component/1.0" # Jabberd2
NS_COMPRESS = "http://jabber.org/protocol/compress" # XEP-0138
NS_DATA = "jabber:x:data" # XEP-0004
NS_DATA_LAYOUT = "http://jabber.org/protocol/xdata-layout" # XEP-0141
NS_DATA_VALIDATE = "http://jabber.org/protocol/xdata-validate" # XEP-0122
NS_DELAY = "jabber:x:delay" # XEP-0091 (deprecated)
NS_DIALBACK = "jabber:server:dialback" # RFC 3921
NS_DISCO = "http://jabber.org/protocol/disco" # XEP-0030
NS_DISCO_INFO = NS_DISCO + "#info" # XEP-0030
NS_DISCO_ITEMS = NS_DISCO + "#items" # XEP-0030
NS_ENCRYPTED = "jabber:x:encrypted" # XEP-0027
NS_EVENT = "jabber:x:event" # XEP-0022 (deprecated)
NS_FEATURE = "http://jabber.org/protocol/feature-neg" # XEP-0020
NS_FILE = "http://jabber.org/protocol/si/profile/file-transfer" # XEP-0096
NS_GATEWAY = "jabber:iq:gateway" # XEP-0100
NS_GEOLOC = "http://jabber.org/protocol/geoloc" # XEP-0080
NS_GROUPCHAT = "gc-1.0" # XEP-0045
NS_HTTP_BIND = "http://jabber.org/protocol/httpbind" # XEP-0124
NS_IBB = "http://jabber.org/protocol/ibb" # XEP-0047
NS_INVISIBLE = "presence-invisible" # Jabberd2
NS_IQ = "iq" # Jabberd2
NS_LAST = "jabber:iq:last" # XEP-0012
NS_MEDIA = "urn:xmpp:media-element" # XEP-0158
NS_MESSAGE = "message" # Jabberd2
NS_MOOD = "http://jabber.org/protocol/mood" # XEP-0107
NS_MUC = "http://jabber.org/protocol/muc" # XEP-0045
NS_MUC_ADMIN = NS_MUC + "#admin" # XEP-0045
NS_MUC_OWNER = NS_MUC + "#owner" # XEP-0045
NS_MUC_UNIQUE = NS_MUC + "#unique" # XEP-0045
NS_MUC_USER = NS_MUC + "#user" # XEP-0045
NS_MUC_REGISTER = NS_MUC + "#register" # XEP-0045
NS_MUC_REQUEST = NS_MUC + "#request" # XEP-0045
NS_MUC_ROOMCONFIG = NS_MUC + "#roomconfig" # XEP-0045
NS_MUC_ROOMINFO = NS_MUC + "#roominfo" # XEP-0045
NS_MUC_ROOMS = NS_MUC + "#rooms" # XEP-0045
NS_MUC_TRAFIC = NS_MUC + "#traffic" # XEP-0045
NS_NICK = "http://jabber.org/protocol/nick" # XEP-0172
NS_OFFLINE = "http://jabber.org/protocol/offline" # XEP-0013
NS_OOB = "jabber:x:oob" # XEP-0066
NS_PHYSLOC = "http://jabber.org/protocol/physloc" # XEP-0112
NS_PRESENCE = "presence" # Jabberd2
NS_PRIVACY = "jabber:iq:privacy" # RFC 3921
NS_PRIVATE = "jabber:iq:private" # XEP-0049
NS_PUBSUB = "http://jabber.org/protocol/pubsub" # XEP-0060
NS_RC = "http://jabber.org/protocol/rc" # XEP-0146
NS_REGISTER = "jabber:iq:register" # XEP-0077
NS_RECEIPTS = "urn:xmpp:receipts" # XEP-0184
NS_ROSTER = "jabber:iq:roster" # RFC 3921
NS_ROSTERX = "http://jabber.org/protocol/rosterx" # XEP-0144
NS_RPC = "jabber:iq:rpc" # XEP-0009
NS_SASL = "urn:ietf:params:xml:ns:xmpp-sasl" # RFC 3920
NS_SEARCH = "jabber:iq:search" # XEP-0055
NS_SERVER = "jabber:server" # RFC 3921
NS_SESSION = "urn:ietf:params:xml:ns:xmpp-session" # RFC 3921
NS_SI = "http://jabber.org/protocol/si" # XEP-0096
NS_SI_PUB = "http://jabber.org/protocol/sipub" # XEP-0137
NS_SIGNED = "jabber:x:signed" # XEP-0027
NS_SOFTWAREINFO = "urn:xmpp:dataforms:softwareinfo" # XEP-0155
NS_STANZAS = "urn:ietf:params:xml:ns:xmpp-stanzas" # RFC 3920
NS_STATS = "http://jabber.org/protocol/stats" # XEP-0039
NS_STREAMS = "http://etherx.jabber.org/streams" # RFC 3920
NS_TIME = "jabber:iq:time" # XEP-0090 (deprecated)
NS_TLS = "urn:ietf:params:xml:ns:xmpp-tls" # RFC 3920
NS_URN_ATTENTION = "urn:xmpp:attention:0" # XEP-0224
NS_URN_OOB = "urn:xmpp:bob" # XEP-0158
NS_URN_TIME = "urn:xmpp:time" # XEP-0202
NS_VACATION = "http://jabber.org/protocol/vacation" # XEP-0109
NS_VCARD = "vcard-temp" # XEP-0054
NS_VCARD_UPDATE = "vcard-temp:x:update" # XEP-0153
NS_VERSION = "jabber:iq:version" # XEP-0092
NS_WAITINGLIST = "http://jabber.org/protocol/waitinglist" # XEP-0130
NS_XHTML_IM = "http://jabber.org/protocol/xhtml-im" # XEP-0071
NS_XMPP_STREAMS = "urn:ietf:params:xml:ns:xmpp-streams" # RFC 3920
NS_PING = "urn:xmpp:ping" # XEP-0199
NS_MUC_FILTER = "http://jabber.ru/muc-filter"
STREAM_NOT_AUTHORIZED = NS_XMPP_STREAMS + " not-authorized"
STREAM_REMOTE_CONNECTION_FAILED = NS_XMPP_STREAMS + " remote-connection-failed"
SASL_MECHANISM_TOO_WEAK = NS_SASL + " mechanism-too-weak"
STREAM_XML_NOT_WELL_FORMED = NS_XMPP_STREAMS + " xml-not-well-formed"
ERR_JID_MALFORMED = NS_STANZAS + " jid-malformed"
STREAM_SEE_OTHER_HOST = NS_XMPP_STREAMS + " see-other-host"
STREAM_BAD_NAMESPACE_PREFIX = NS_XMPP_STREAMS + " bad-namespace-prefix"
ERR_SERVICE_UNAVAILABLE = NS_STANZAS + " service-unavailable"
STREAM_CONNECTION_TIMEOUT = NS_XMPP_STREAMS + " connection-timeout"
STREAM_UNSUPPORTED_VERSION = NS_XMPP_STREAMS + " unsupported-version"
STREAM_IMPROPER_ADDRESSING = NS_XMPP_STREAMS + " improper-addressing"
STREAM_UNDEFINED_CONDITION = NS_XMPP_STREAMS + " undefined-condition"
SASL_NOT_AUTHORIZED = NS_SASL + " not-authorized"
ERR_GONE = NS_STANZAS + " gone"
SASL_TEMPORARY_AUTH_FAILURE = NS_SASL + " temporary-auth-failure"
ERR_REMOTE_SERVER_NOT_FOUND = NS_STANZAS + " remote-server-not-found"
ERR_UNEXPECTED_REQUEST = NS_STANZAS + " unexpected-request"
ERR_RECIPIENT_UNAVAILABLE = NS_STANZAS + " recipient-unavailable"
ERR_CONFLICT = NS_STANZAS + " conflict"
STREAM_SYSTEM_SHUTDOWN = NS_XMPP_STREAMS + " system-shutdown"
STREAM_BAD_FORMAT = NS_XMPP_STREAMS + " bad-format"
ERR_SUBSCRIPTION_REQUIRED = NS_STANZAS + " subscription-required"
STREAM_INTERNAL_SERVER_ERROR = NS_XMPP_STREAMS + " internal-server-error"
ERR_NOT_AUTHORIZED = NS_STANZAS + " not-authorized"
SASL_ABORTED = NS_SASL + " aborted"
ERR_REGISTRATION_REQUIRED = NS_STANZAS + " registration-required"
ERR_INTERNAL_SERVER_ERROR = NS_STANZAS + " internal-server-error"
SASL_INCORRECT_ENCODING = NS_SASL + " incorrect-encoding"
STREAM_HOST_GONE = NS_XMPP_STREAMS + " host-gone"
STREAM_POLICY_VIOLATION = NS_XMPP_STREAMS + " policy-violation"
STREAM_INVALID_XML = NS_XMPP_STREAMS + " invalid-xml"
STREAM_CONFLICT = NS_XMPP_STREAMS + " conflict"
STREAM_RESOURCE_CONSTRAINT = NS_XMPP_STREAMS + " resource-constraint"
STREAM_UNSUPPORTED_ENCODING = NS_XMPP_STREAMS + " unsupported-encoding"
ERR_NOT_ALLOWED = NS_STANZAS + " not-allowed"
ERR_ITEM_NOT_FOUND = NS_STANZAS + " item-not-found"
ERR_NOT_ACCEPTABLE = NS_STANZAS + " not-acceptable"
STREAM_INVALID_FROM = NS_XMPP_STREAMS + " invalid-from"
ERR_FEATURE_NOT_IMPLEMENTED = NS_STANZAS + " feature-not-implemented"
ERR_BAD_REQUEST = NS_STANZAS + " bad-request"
STREAM_INVALID_ID = NS_XMPP_STREAMS + " invalid-id"
STREAM_HOST_UNKNOWN = NS_XMPP_STREAMS + " host-unknown"
ERR_UNDEFINED_CONDITION = NS_STANZAS + " undefined-condition"
SASL_INVALID_MECHANISM = NS_SASL + " invalid-mechanism"
STREAM_RESTRICTED_XML = NS_XMPP_STREAMS + " restricted-xml"
ERR_RESOURCE_CONSTRAINT = NS_STANZAS + " resource-constraint"
ERR_REMOTE_SERVER_TIMEOUT = NS_STANZAS + " remote-server-timeout"
SASL_INVALID_AUTHZID = NS_SASL + " invalid-authzid"
ERR_PAYMENT_REQUIRED = NS_STANZAS + " payment-required"
STREAM_INVALID_NAMESPACE = NS_XMPP_STREAMS + " invalid-namespace"
ERR_REDIRECT = NS_STANZAS + " redirect"
STREAM_UNSUPPORTED_STANZA_TYPE = NS_XMPP_STREAMS + " unsupported-stanza-type"
ERR_FORBIDDEN = NS_STANZAS + " forbidden"
ERRORS = {
"urn:ietf:params:xml:ns:xmpp-sasl not-authorized": ["", "", "The authentication failed because the initiating entity did not provide valid credentials (this includes but is not limited to the case of an unknown username); sent in reply to a <response/> element or an <auth/> element with initial response data."],
"urn:ietf:params:xml:ns:xmpp-stanzas payment-required": ["402", "auth", "The requesting entity is not authorized to access the requested service because payment is required."],
"urn:ietf:params:xml:ns:xmpp-sasl mechanism-too-weak": ["", "", "The mechanism requested by the initiating entity is weaker than server policy permits for that initiating entity; sent in reply to a <response/> element or an <auth/> element with initial response data."],
"urn:ietf:params:xml:ns:xmpp-streams unsupported-encoding": ["", "", "The initiating entity has encoded the stream in an encoding that is not supported by the server."],
"urn:ietf:params:xml:ns:xmpp-stanzas remote-server-timeout": ["504", "wait", "A remote server or service specified as part or all of the JID of the intended recipient could not be contacted within a reasonable amount of time."],
"urn:ietf:params:xml:ns:xmpp-streams remote-connection-failed": ["", "", "The server is unable to properly connect to a remote resource that is required for authentication or authorization."],
"urn:ietf:params:xml:ns:xmpp-streams restricted-xml": ["", "", "The entity has attempted to send restricted XML features such as a comment, processing instruction, DTD, entity reference, or unescaped character."],
"urn:ietf:params:xml:ns:xmpp-streams see-other-host": ["", "", "The server will not provide service to the initiating entity but is redirecting traffic to another host."],
"urn:ietf:params:xml:ns:xmpp-streams xml-not-well-formed": ["", "", "The initiating entity has sent XML that is not well-formed."],
"urn:ietf:params:xml:ns:xmpp-stanzas subscription-required": ["407", "auth", "The requesting entity is not authorized to access the requested service because a subscription is required."],
"urn:ietf:params:xml:ns:xmpp-streams internal-server-error": ["", "", "The server has experienced a misconfiguration or an otherwise-undefined internal error that prevents it from servicing the stream."],
"urn:ietf:params:xml:ns:xmpp-sasl invalid-mechanism": ["", "", "The initiating entity did not provide a mechanism or requested a mechanism that is not supported by the receiving entity; sent in reply to an <auth/> element."],
"urn:ietf:params:xml:ns:xmpp-streams policy-violation": ["", "", "The entity has violated some local service policy."],
"urn:ietf:params:xml:ns:xmpp-stanzas conflict": ["409", "cancel", "Access cannot be granted because an existing resource or session exists with the same name or address."],
"urn:ietf:params:xml:ns:xmpp-streams unsupported-stanza-type": ["", "", "The initiating entity has sent a first-level child of the stream that is not supported by the server."],
"urn:ietf:params:xml:ns:xmpp-sasl incorrect-encoding": ["", "", "The data provided by the initiating entity could not be processed because the [BASE64]Josefsson, S., The Base16, Base32, and Base64 Data Encodings, July 2003. encoding is incorrect (e.g., because the encoding does not adhere to the definition in Section 3 of [BASE64]Josefsson, S., The Base16, Base32, and Base64 Data Encodings, July 2003.); sent in reply to a <response/> element or an <auth/> element with initial response data."],
"urn:ietf:params:xml:ns:xmpp-stanzas registration-required": ["407", "auth", "The requesting entity is not authorized to access the requested service because registration is required."],
"urn:ietf:params:xml:ns:xmpp-streams invalid-id": ["", "", "The stream ID or dialback ID is invalid or does not match an ID previously provided."],
"urn:ietf:params:xml:ns:xmpp-sasl invalid-authzid": ["", "", "The authzid provided by the initiating entity is invalid, either because it is incorrectly formatted or because the initiating entity does not have permissions to authorize that ID; sent in reply to a <response/> element or an <auth/> element with initial response data."],
"urn:ietf:params:xml:ns:xmpp-stanzas bad-request": ["400", "modify", "The sender has sent XML that is malformed or that cannot be processed."],
"urn:ietf:params:xml:ns:xmpp-streams not-authorized": ["", "", "The entity has attempted to send data before the stream has been authenticated, or otherwise is not authorized to perform an action related to stream negotiation."],
"urn:ietf:params:xml:ns:xmpp-stanzas forbidden": ["403", "auth", "The requesting entity does not possess the required permissions to perform the action."],
"urn:ietf:params:xml:ns:xmpp-sasl temporary-auth-failure": ["", "", "The authentication failed because of a temporary error condition within the receiving entity; sent in reply to an <auth/> element or <response/> element."],
"urn:ietf:params:xml:ns:xmpp-streams invalid-namespace": ["", "", "The streams namespace name is something other than \http://etherx.jabber.org/streams\" or the dialback namespace name is something other than \"jabber:server:dialback\"."],
"urn:ietf:params:xml:ns:xmpp-stanzas feature-not-implemented": ["501", "cancel", "The feature requested is not implemented by the recipient or server and therefore cannot be processed."],
"urn:ietf:params:xml:ns:xmpp-streams invalid-xml": ["", "", "The entity has sent invalid XML over the stream to a server that performs validation."],
"urn:ietf:params:xml:ns:xmpp-stanzas item-not-found": ["404", "cancel", "The addressed JID or item requested cannot be found."],
"urn:ietf:params:xml:ns:xmpp-streams host-gone": ["", "", "The value of the \"to\" attribute provided by the initiating entity in the stream header corresponds to a hostname that is no longer hosted by the server."],
"urn:ietf:params:xml:ns:xmpp-stanzas recipient-unavailable": ["404", "wait", "The intended recipient is temporarily unavailable."],
"urn:ietf:params:xml:ns:xmpp-stanzas not-acceptable": ["406", "cancel", "The recipient or server understands the request but is refusing to process it because it does not meet criteria defined by the recipient or server."],
"urn:ietf:params:xml:ns:xmpp-streams invalid-from": ["cancel", "", "The JID or hostname provided in a \"from\" address does not match an authorized JID or validated domain negotiated between servers via SASL or dialback, or between a client and a server via authentication and resource authorization."],
"urn:ietf:params:xml:ns:xmpp-streams bad-format": ["", "", "The entity has sent XML that cannot be processed."],
"urn:ietf:params:xml:ns:xmpp-streams resource-constraint": ["", "", "The server lacks the system resources necessary to service the stream."],
"urn:ietf:params:xml:ns:xmpp-stanzas undefined-condition": ["500", "", "The condition is undefined."],
"urn:ietf:params:xml:ns:xmpp-stanzas redirect": ["302", "modify", "The recipient or server is redirecting requests for this information to another entity."],
"urn:ietf:params:xml:ns:xmpp-streams bad-namespace-prefix": ["", "", "The entity has sent a namespace prefix that is unsupported, or has sent no namespace prefix on an element that requires such a prefix."],
"urn:ietf:params:xml:ns:xmpp-streams system-shutdown": ["", "", "The server is being shut down and all active streams are being closed."],
"urn:ietf:params:xml:ns:xmpp-streams conflict": ["", "", "The server is closing the active stream for this entity because a new stream has been initiated that conflicts with the existing stream."],
"urn:ietf:params:xml:ns:xmpp-streams connection-timeout": ["", "", "The entity has not generated any traffic over the stream for some period of time."],
"urn:ietf:params:xml:ns:xmpp-stanzas jid-malformed": ["400", "modify", "The value of the \"to\" attribute in the sender's stanza does not adhere to the syntax defined in Addressing Scheme."],
"urn:ietf:params:xml:ns:xmpp-stanzas resource-constraint": ["500", "wait", "The server or recipient lacks the system resources necessary to service the request."],
"urn:ietf:params:xml:ns:xmpp-stanzas remote-server-not-found": ["404", "cancel", "A remote server or service specified as part or all of the JID of the intended recipient does not exist."],
"urn:ietf:params:xml:ns:xmpp-streams unsupported-version": ["", "", "The value of the \"version\" attribute provided by the initiating entity in the stream header specifies a version of XMPP that is not supported by the server."],
"urn:ietf:params:xml:ns:xmpp-streams host-unknown": ["", "", "The value of the \"to\" attribute provided by the initiating entity in the stream header does not correspond to a hostname that is hosted by the server."],
"urn:ietf:params:xml:ns:xmpp-stanzas unexpected-request": ["400", "wait", "The recipient or server understood the request but was not expecting it at this time (e.g., the request was out of order)."],
"urn:ietf:params:xml:ns:xmpp-streams improper-addressing": ["", "", "A stanza sent between two servers lacks a \"to\" or \"from\" attribute (or the attribute has no value)."],
"urn:ietf:params:xml:ns:xmpp-stanzas not-allowed": ["405", "cancel", "The recipient or server does not allow any entity to perform the action."],
"urn:ietf:params:xml:ns:xmpp-stanzas internal-server-error": ["500", "wait", "The server could not process the stanza because of a misconfiguration or an otherwise-undefined internal server error."],
"urn:ietf:params:xml:ns:xmpp-stanzas gone": ["302", "modify", "The recipient or server can no longer be contacted at this address."],
"urn:ietf:params:xml:ns:xmpp-streams undefined-condition": ["", "", "The error condition is not one of those defined by the other conditions in this list."],
"urn:ietf:params:xml:ns:xmpp-stanzas service-unavailable": ["503", "cancel", "The server or recipient does not currently provide the requested service."],
"urn:ietf:params:xml:ns:xmpp-stanzas not-authorized": ["401", "auth", "The sender must provide proper credentials before being allowed to perform the action, or has provided improper credentials."],
"urn:ietf:params:xml:ns:xmpp-sasl aborted": ["", "", "The receiving entity acknowledges an <abort/> element sent by the initiating entity; sent in reply to the <abort/> element."]
}
_errorcodes = {
"302": "redirect",
"400": "unexpected-request",
"401": "not-authorized",
"402": "payment-required",
"403": "forbidden",
"404": "remote-server-not-found",
"405": "not-allowed",
"406": "not-acceptable",
"407": "subscription-required",
"409": "conflict",
"500": "undefined-condition",
"501": "feature-not-implemented",
"503": "service-unavailable",
"504": "remote-server-timeout"
}
def isResultNode(node):
"""
Returns true if the node is a positive reply.
"""
return (node and node.getType() == "result")
def isGetNode(node):
"""
Returns true if the node is a positive reply.
"""
return (node and node.getType() == "get")
def isSetNode(node):
"""
Returns true if the node is a positive reply.
"""
return (node and node.getType() == "set")
def isErrorNode(node):
"""
Returns true if the node is a negative reply.
"""
return (node and node.getType() == "error")
class NodeProcessed(Exception):
"""
Exception that should be raised by handler when the handling should be stopped.
"""
class StreamError(Exception):
"""
Base exception class for stream errors.
"""
class BadFormat(StreamError): pass
class BadNamespacePrefix(StreamError): pass
class Conflict(StreamError): pass
class ConnectionTimeout(StreamError): pass
class HostGone(StreamError): pass
class HostUnknown(StreamError): pass
class ImproperAddressing(StreamError): pass
class InternalServerError(StreamError): pass
class InvalidFrom(StreamError): pass
class InvalidID(StreamError): pass
class InvalidNamespace(StreamError): pass
class InvalidXML(StreamError): pass
class NotAuthorized(StreamError): pass
class PolicyViolation(StreamError): pass
class RemoteConnectionFailed(StreamError): pass
class ResourceConstraint(StreamError): pass
class RestrictedXML(StreamError): pass
class SeeOtherHost(StreamError): pass
class SystemShutdown(StreamError): pass
class UndefinedCondition(StreamError): pass
class UnsupportedEncoding(StreamError): pass
class UnsupportedStanzaType(StreamError): pass
class UnsupportedVersion(StreamError): pass
class XMLNotWellFormed(StreamError): pass
stream_exceptions = {
"bad-format": BadFormat,
"bad-namespace-prefix": BadNamespacePrefix,
"conflict": Conflict,
"connection-timeout": ConnectionTimeout,
"host-gone": HostGone,
"host-unknown": HostUnknown,
"improper-addressing": ImproperAddressing,
"internal-server-error": InternalServerError,
"invalid-from": InvalidFrom,
"invalid-id": InvalidID,
"invalid-namespace": InvalidNamespace,
"invalid-xml": InvalidXML,
"not-authorized": NotAuthorized,
"policy-violation": PolicyViolation,
"remote-connection-failed": RemoteConnectionFailed,
"resource-constraint": ResourceConstraint,
"restricted-xml": RestrictedXML,
"see-other-host": SeeOtherHost,
"system-shutdown": SystemShutdown,
"undefined-condition": UndefinedCondition,
"unsupported-encoding": UnsupportedEncoding,
"unsupported-stanza-type": UnsupportedStanzaType,
"unsupported-version": UnsupportedVersion,
"xml-not-well-formed": XMLNotWellFormed
}
class JID:
"""
JID object. JID can be built from string, modified, compared, serialized into string.
"""
def __init__(self, jid=None, node="", domain="", resource=""):
"""
Constructor. JID can be specified as string (jid argument) or as separate parts.
Examples:
JID("node@domain/resource")
JID(node="node", domain="domain.org")
"""
if not jid and not domain:
raise ValueError("JID must contain at least domain name")
elif isinstance(jid, self.__class__):
self.node, self.domain, self.resource = jid.node, jid.domain, jid.resource
elif domain:
self.node, self.domain, self.resource = node, domain, resource
else:
if jid.find("@") + 1:
self.node, jid = jid.split("@", 1)
else:
self.node = ""
if jid.find("/") + 1:
self.domain, self.resource = jid.split("/", 1)
else:
self.domain, self.resource = jid, ""
def getNode(self):
"""
Return the node part of the JID.
"""
return self.node
def setNode(self, node):
"""
Set the node part of the JID to new value. Specify None to remove the node part.
"""
self.node = node.lower()
def getDomain(self):
"""
Return the domain part of the JID.
"""
return self.domain
def setDomain(self, domain):
"""
Set the domain part of the JID to new value.
"""
self.domain = domain.lower()
def getResource(self):
"""
Return the resource part of the JID.
"""
return self.resource
def setResource(self, resource):
"""
Set the resource part of the JID to new value. Specify None to remove the resource part.
"""
self.resource = resource
def getStripped(self):
"""
Return the bare representation of JID. I.e. string value w/o resource.
"""
return self.__str__(0)
def __eq__(self, other):
"""
Compare the JID to another instance or to string for equality.
"""
try:
other = JID(other)
except ValueError:
return False
return self.resource == other.resource and self.__str__(0) == other.__str__(0)
def __ne__(self, other):
"""
Compare the JID to another instance or to string for non-equality.
"""
return not self.__eq__(other)
def bareMatch(self, other):
"""
Compare the node and domain parts of the JID's for equality.
"""
return self.__str__(0) == JID(other).__str__(0)
def __str__(self, wresource=1):
"""
Serialize JID into string.
"""
jid = "@".join((self.node, self.domain)) if self.node else self.domain
if wresource and self.resource:
jid = "/".join((jid, self.resource))
return jid
def __hash__(self):
"""
Produce hash of the JID, Allows to use JID objects as keys of the dictionary.
"""
return hash(self.__str__())
class Protocol(Node):
"""
A "stanza" object class. Contains methods that are common for presences, iqs and messages.
"""
def __init__(self, name=None, to=None, typ=None, frm=None, attrs={}, payload=[], timestamp=None, xmlns=None, node=None):
"""
Constructor, name is the name of the stanza i.e. "message" or "presence" or "iq".
to is the value of "to" attribure, "typ" - "type" attribute
frn - from attribure, attrs - other attributes mapping, payload - same meaning as for simplexml payload definition
timestamp - the time value that needs to be stamped over stanza
xmlns - namespace of top stanza node
node - parsed or unparsed stana to be taken as prototype.
"""
if not attrs:
attrs = {}
if to:
attrs["to"] = to
if frm:
attrs["from"] = frm
if typ:
attrs["type"] = typ
Node.__init__(self, tag=name, attrs=attrs, payload=payload, node=node)
if not node and xmlns:
self.setNamespace(xmlns)
if self["to"]:
self.setTo(self["to"])
if self["from"]:
self.setFrom(self["from"])
if node and isinstance(node, self.__class__) and self.__class__ == node.__class__ and "id" in self.attrs:
del self.attrs["id"]
self.timestamp = None
for x in self.getTags("x", namespace=NS_DELAY):
try:
if not self.getTimestamp() or x.getAttr("stamp") < self.getTimestamp():
self.setTimestamp(x.getAttr("stamp"))
except Exception:
pass
if timestamp is not None:
self.setTimestamp(timestamp) # To auto-timestamp stanza just pass timestamp=""
def getTo(self):
"""
Return value of the "to" attribute.
"""
try:
to = self["to"]
except Exception:
to = None
return to
def getFrom(self):
"""
Return value of the "from" attribute.
"""
try:
frm = self["from"]
except Exception:
frm = None
return frm
def getTimestamp(self):
"""
Return the timestamp in the "yyyymmddThhmmss" format.
"""
return self.timestamp
def getID(self):
"""
Return the value of the "id" attribute.
"""
return self.getAttr("id")
def setTo(self, val):
"""
Set the value of the "to" attribute.
"""
self.setAttr("to", JID(val))
def getType(self):
"""
Return the value of the "type" attribute.
"""
return self.getAttr("type")
def setFrom(self, val):
"""
Set the value of the "from" attribute.
"""
self.setAttr("from", JID(val))
def setType(self, val):
"""
Set the value of the "type" attribute.
"""
self.setAttr("type", val)
def setID(self, val):
"""
Set the value of the "id" attribute.
"""
self.setAttr("id", val)
def getError(self):
"""
Return the error-condition (if present) or the textual description of the error (otherwise).
"""
errtag = self.getTag("error")
if errtag:
for tag in errtag.getChildren():
if tag.getName() != "text":
return tag.getName()
return errtag.getData()
def getErrorCode(self):
"""
Return the error code. Obsolette.
"""
return self.getTagAttr("error", "code")
def setError(self, error, code=None):
"""
Set the error code. Obsolette. Use error-conditions instead.
"""
if code:
if str(code) in _errorcodes.keys():
error = ErrorNode(_errorcodes[str(code)], text=error)
else:
error = ErrorNode(ERR_UNDEFINED_CONDITION, code=code, typ="cancel", text=error)
elif isinstance(error, basestring):
error = ErrorNode(error)
self.setType("error")
self.addChild(node=error)
def setTimestamp(self, val=None):
"""
Set the timestamp. timestamp should be the yyyymmddThhmmss string.
"""
if not val:
val = time.strftime("%Y%m%dT%H:%M:%S", time.gmtime())
self.timestamp = val
self.setTag("x", {"stamp": self.timestamp}, namespace=NS_DELAY)
def getProperties(self):
"""
Return the list of namespaces to which belongs the direct childs of element.
"""
props = []
for child in self.getChildren():
prop = child.getNamespace()
if prop not in props:
props.append(prop)
return props
def __setitem__(self, item, val):
"""
Set the item "item" to the value "val".
"""
if item in ["to", "from"]:
val = JID(val)
return self.setAttr(item, val)
class Message(Protocol):
"""
XMPP Message stanza - "push" mechanism.
"""
def __init__(self, to=None, body=None, typ=None, subject=None, attrs={}, frm=None, payload=[], timestamp=None, xmlns=NS_CLIENT, node=None):
"""
Create message object. You can specify recipient, text of message, type of message
any additional attributes, sender of the message, any additional payload (f.e. jabber:x:delay element) and namespace in one go.
Alternatively you can pass in the other XML object as the "node" parameted to replicate it as message.
"""
Protocol.__init__(self, "message", to=to, typ=typ, attrs=attrs, frm=frm, payload=payload, timestamp=timestamp, xmlns=xmlns, node=node)
if body:
self.setBody(body)
if subject:
self.setSubject(subject)
def getBody(self):
"""
Returns text of the message.
"""
return self.getTagData("body")
def getSubject(self):
"""
Returns subject of the message.
"""
return self.getTagData("subject")
def getThread(self):
"""
Returns thread of the message.
"""
return self.getTagData("thread")
def setBody(self, val):
"""
Sets the text of the message.
"""
self.setTagData("body", val)
def setSubject(self, val):
"""
Sets the subject of the message.
"""
self.setTagData("subject", val)
def setThread(self, val):
"""
Sets the thread of the message.
"""
self.setTagData("thread", val)
def buildReply(self, text=None):
"""
Builds and returns another message object with specified text.
The to, from type and thread properties of new message are pre-set as reply to this message.
"""
msg = Message(to=self.getFrom(), frm=self.getTo(), body=text)
thr = self.getThread()
if thr:
msg.setThread(thr)
return msg
class Presence(Protocol):
"""
XMPP Presence object.
"""
def __init__(self, to=None, typ=None, priority=None, show=None, status=None, attrs={}, frm=None, timestamp=None, payload=[], xmlns=NS_CLIENT, node=None):
"""
Create presence object. You can specify recipient, type of message, priority, show and status values
any additional attributes, sender of the presence, timestamp, any additional payload (f.e. jabber:x:delay element) and namespace in one go.
Alternatively you can pass in the other XML object as the "node" parameted to replicate it as presence.
"""
Protocol.__init__(self, "presence", to=to, typ=typ, attrs=attrs, frm=frm, payload=payload, timestamp=timestamp, xmlns=xmlns, node=node)
if priority:
self.setPriority(priority)
if show:
self.setShow(show)
if status:
self.setStatus(status)
def getPriority(self):
"""
Returns the priority of the message.
"""
return self.getTagData("priority")
def getShow(self):
"""
Returns the show value of the message.
"""
return self.getTagData("show")
def getStatus(self):
"""
Returns the status string of the message.
"""
return self.getTagData("status")
def setPriority(self, val):
"""
Sets the priority of the message.
"""
self.setTagData("priority", val)
def setShow(self, val):
"""
Sets the show value of the message.
"""
self.setTagData("show", val)
def setStatus(self, val):
"""
Sets the status string of the message.
"""
self.setTagData("status", val)
def _muc_getItemAttr(self, tag, attr):
for xtag in self.getTags("x", namespace=NS_MUC_USER):
for child in xtag.getTags(tag):
return child.getAttr(attr)
def _muc_getSubTagDataAttr(self, tag, attr):
for xtag in self.getTags("x", namespace=NS_MUC_USER):
for child in xtag.getTags("item"):
for cchild in child.getTags(tag):
return cchild.getData(), cchild.getAttr(attr)
return None, None
def getRole(self):
"""
Returns the presence role (for groupchat).
"""
return self._muc_getItemAttr("item", "role")
def getAffiliation(self):
"""Returns the presence affiliation (for groupchat).
"""
return self._muc_getItemAttr("item", "affiliation")
def getNick(self):
"""
Returns the nick value (for nick change in groupchat).
"""
return self._muc_getItemAttr("item", "nick")
def getJid(self):
"""
Returns the presence jid (for groupchat).
"""
return self._muc_getItemAttr("item", "jid")
def getReason(self):
"""
Returns the reason of the presence (for groupchat).
"""
return self._muc_getSubTagDataAttr("reason", "")[0]
def getActor(self):
"""
Returns the reason of the presence (for groupchat).
"""
return self._muc_getSubTagDataAttr("actor", "jid")[1]
def getStatusCode(self):
"""
Returns the status code of the presence (for groupchat).
"""
return self._muc_getItemAttr("status", "code")
class Iq(Protocol):
"""
XMPP Iq object - get/set dialog mechanism.
"""
def __init__(self, typ=None, queryNS=None, attrs={}, to=None, frm=None, payload=[], xmlns=NS_CLIENT, node=None):
"""
Create Iq object. You can specify type, query namespace
any additional attributes, recipient of the iq, sender of the iq, any additional payload (f.e. jabber:x:data node) and namespace in one go.
Alternatively you can pass in the other XML object as the "node" parameted to replicate it as an iq.
"""
Protocol.__init__(self, "iq", to=to, typ=typ, attrs=attrs, frm=frm, xmlns=xmlns, node=node)
if payload:
self.setQueryPayload(payload)
if queryNS:
self.setQueryNS(queryNS)
def getQuery(self):
"""
Returns the query node.
"""
return self.getTag("query")
def getQueryNS(self):
"""
Returns the namespace of the "query" child element.
"""
tag = self.getTag("query")
if tag:
return tag.getNamespace()
def getQuerynode(self):
"""
Returns the "node" attribute value of the "query" child element.
"""
return self.getTagAttr("query", "node")
def getQueryPayload(self):
"""
Returns the "query" child element payload.
"""
tag = self.getTag("query")
if tag:
return tag.getPayload()
def getQueryChildren(self):
"""
Returns the "query" child element child nodes.
"""
tag = self.getTag("query")
if tag:
return tag.getChildren()
def setQuery(self, name=None):
"""
Changes the name of the query node, creates it if needed.
Keep the existing name if none is given (use "query" if it's a creation).
Returns the query node.
"""
query = self.getQuery()
if query is None:
query = self.addChild("query")
if name is not None:
query.setName(name)
return query
def setQueryNS(self, namespace):
"""
Set the namespace of the "query" child element.
"""
self.setTag("query").setNamespace(namespace)
def setQueryPayload(self, payload):
"""
Set the "query" child element payload.
"""
self.setTag("query").setPayload(payload)
def setQuerynode(self, node):
"""
Set the "node" attribute value of the "query" child element.
"""
self.setTagAttr("query", "node", node)
def buildReply(self, typ):
"""
Builds and returns another Iq object of specified type.
The to, from and query child node of new Iq are pre-set as reply to this Iq.
"""
iq = Iq(typ, to=self.getFrom(), frm=self.getTo(), attrs={"id": self.getID()})
if self.getTag("query"):
iq.setQueryNS(self.getQueryNS())
if self.getTagAttr("query", "node"):
iq.setQuerynode(self.getQuerynode())
return iq
class ErrorNode(Node):
"""
XMPP-style error element.
In the case of stanza error should be attached to XMPP stanza.
In the case of stream-level errors should be used separately.
"""
def __init__(self, name, code=None, typ=None, text=None):
"""
Create new error node object.
Mandatory parameter: name - name of error condition.
Optional parameters: code, typ, text. Used for backwards compartibility with older jabber protocol.
"""
if name in ERRORS:
cod, type, txt = ERRORS[name]
ns = name.split()[0]
else:
cod, ns, type, txt = "500", NS_STANZAS, "cancel", ""
if typ:
type = typ
if code:
cod = code
if text:
txt = text
Node.__init__(self, "error", {}, [Node(name)])
if type:
self.setAttr("type", type)
if not cod:
self.setName("stream:error")
if txt:
self.addChild(node=Node(ns + " text", {}, [txt]))
if cod:
self.setAttr("code", cod)
class Error(Protocol):
"""
Used to quickly transform received stanza into error reply.
"""
def __init__(self, node, error, reply=1):
"""
Create error reply basing on the received "node" stanza and the "error" error condition.
If the "node" is not the received stanza but locally created ("to" and "from" fields needs not swapping)
specify the "reply" argument as false.
"""
if reply:
Protocol.__init__(self, to=node.getFrom(), frm=node.getTo(), node=node)
else:
Protocol.__init__(self, node=node)
self.setError(error)
if node.getType() == "error":
self.__str__ = self.__dupstr__
def __dupstr__(self, dup1=None, dup2=None):
"""
Dummy function used as preventor of creating error node in reply to error node.
I.e. you will not be able to serialize "double" error into string.
"""
return ""
class DataField(Node):
"""
This class is used in the DataForm class to describe the single data item.
If you are working with jabber:x:data (XEP-0004, XEP-0068, XEP-0122)
then you will need to work with instances of this class.
"""
def __init__(self, name=None, value=None, typ=None, required=0, label=None, desc=None, options=[], node=None):
"""
Create new data field of specified name,value and type. Also "required", "desc" and "options" fields can be set.
Alternatively other XML object can be passed in as the "node" parameted to replicate it as a new datafiled.
"""
Node.__init__(self, "field", node=node)
if name:
self.setVar(name)
if isinstance(value, (list, tuple)):
self.setValues(value)
elif value:
self.setValue(value)
if typ:
self.setType(typ)
# elif not typ and not node:
# self.setType("text-single")
if required:
self.setRequired(required)
if label:
self.setLabel(label)
if desc:
self.setDesc(desc)
if options:
self.setOptions(options)
def setRequired(self, req=1):
"""
Change the state of the "required" flag.
"""
if req:
self.setTag("required")
else:
try:
self.delChild("required")
except ValueError:
return None
def isRequired(self):
"""
Returns in this field a required one.
"""
return self.getTag("required")
def setLabel(self, label):
"""
Set the label of this field.
"""
self.setAttr("label", label)
def getLabel(self):
"""
Return the label of this field.
"""
return self.getAttr("label")
def setDesc(self, desc):
"""
Set the description of this field.
"""
self.setTagData("desc", desc)
def getDesc(self):
"""
Return the description of this field.
"""
return self.getTagData("desc")
def setValue(self, val):
"""
Set the value of this field.
"""
self.setTagData("value", val)
def getValue(self):
return self.getTagData("value")
def setValues(self, ls):
"""
Set the values of this field as values-list.
Replaces all previous filed values! If you need to just add a value - use addValue method.
"""
while self.getTag("value"):
self.delChild("value")
for val in ls:
self.addValue(val)
def addValue(self, val):
"""
Add one more value to this field. Used in "get" iq's or such.
"""
self.addChild("value", {}, [val])
def getValues(self):
"""
Return the list of values associated with this field.
"""
ret = []
for tag in self.getTags("value"):
ret.append(tag.getData())
return ret
def getOptions(self):
"""
Return label-option pairs list associated with this field.
"""
ret = []
for tag in self.getTags("option"):
ret.append([tag.getAttr("label"), tag.getTagData("value")])
return ret
def setOptions(self, ls):
"""
Set label-option pairs list associated with this field.
"""
while self.getTag("option"):
self.delChild("option")
for opt in ls:
self.addOption(opt)
def addOption(self, opt):
"""
Add one more label-option pair to this field.
"""
if isinstance(opt, basestring):
self.addChild("option").setTagData("value", opt)
else:
self.addChild("option", {"label": opt[0]}).setTagData("value", opt[1])
def getType(self):
"""
Get type of this field.
"""
return self.getAttr("type")
def setType(self, val):
"""
Set type of this field.
"""
return self.setAttr("type", val)
def getVar(self):
"""
Get "var" attribute value of this field.
"""
return self.getAttr("var")
def setVar(self, val):
"""
Set "var" attribute value of this field.
"""
return self.setAttr("var", val)
class DataReported(Node):
"""
This class is used in the DataForm class to describe the "reported data field" data items which are used in
"multiple item form results" (as described in XEP-0004).
Represents the fields that will be returned from a search. This information is useful when
you try to use the jabber:iq:search namespace to return dynamic form information.
"""
def __init__(self, node=None):
"""
Create new empty "reported data" field. However, note that, according XEP-0004:
* It MUST contain one or more DataFields.
* Contained DataFields SHOULD possess a "type" and "label" attribute in addition to "var" attribute
* Contained DataFields SHOULD NOT contain a <value/> element.
Alternatively other XML object can be passed in as the "node" parameted to replicate it as a new
dataitem.
"""
Node.__init__(self, "reported", node=node)
if node:
newkids = []
for n in self.getChildren():
if n.getName() == "field":
newkids.append(DataField(node=n))
else:
newkids.append(n)
self.kids = newkids
def getField(self, name):
"""
Return the datafield object with name "name" (if exists).
"""
return self.getTag("field", attrs={"var": name})
def setField(self, name, typ=None, label=None, desc=None, options=[]):
"""
Create if nessessary or get the existing datafield object with name "name" and return it.
If created, attributes "type" and "label" are applied to new datafield.
"""
field = self.getField(name)
if not field:
field = self.addChild(node=DataField(name, None, typ, 0, label, desc=desc, options=options))
return field
def asDict(self):
"""
Represent dataitem as simple dictionary mapping of datafield names to their values.
"""
ret = {}
for field in self.getTags("field"):
name = field.getAttr("var")
typ = field.getType()
if isinstance(typ, basestring) and typ.endswith("-multi"):
val = []
for i in field.getTags("value"):
val.append(i.getData())
else:
val = field.getTagData("value")
ret[name] = val
if self.getTag("instructions"):
ret["instructions"] = self.getInstructions()
return ret
def __getitem__(self, name):
"""
Simple dictionary interface for getting datafields values by their names.
"""
item = self.getField(name)
if item:
return item.getValue()
raise IndexError("No such field")
def __setitem__(self, name, val):
"""
Simple dictionary interface for setting datafields values by their names.
"""
return self.setField(name).setValue(val)
class DataItem(Node):
"""
This class is used in the DataForm class to describe data items which are used in "multiple
item form results" (as described in XEP-0004).
"""
def __init__(self, node=None):
"""
Create new empty data item. However, note that, according XEP-0004, DataItem MUST contain ALL
DataFields described in DataReported.
Alternatively other XML object can be passed in as the "node" parameted to replicate it as a new
dataitem.
"""
Node.__init__(self, "item", node=node)
if node:
newkids = []
for n in self.getChildren():
if n.getName() == "field":
newkids.append(DataField(node=n))
else:
newkids.append(n)
self.kids = newkids
def getField(self, name):
"""
Return the datafield object with name "name" (if exists).
"""
return self.getTag("field", attrs={"var": name})
def setField(self, name, value=None, typ=None, desc=None, options=[]):
"""
Create if nessessary or get the existing datafield object with name "name" and return it.
"""
field = self.getField(name)
if not field:
field = self.addChild(node=DataField(name, value, typ, desc=desc, options=options))
return field
def asDict(self):
"""
Represent dataitem as simple dictionary mapping of datafield names to their values.
"""
ret = {}
for field in self.getTags("field"):
name = field.getAttr("var")
typ = field.getType()
if isinstance(typ, basestring) and typ.endswith("-multi"):
val = []
for i in field.getTags("value"):
val.append(i.getData())
else:
val = field.getTagData("value")
ret[name] = val
if self.getTag("instructions"):
ret["instructions"] = self.getInstructions()
return ret
def __getitem__(self, name):
"""
Simple dictionary interface for getting datafields values by their names.
"""
item = self.getField(name)
if item:
return item.getValue()
raise IndexError("No such field")
def __setitem__(self, name, val):
"""
Simple dictionary interface for setting datafields values by their names.
"""
return self.setField(name).setValue(val)
class DataForm(Node):
"""
DataForm class. Used for manipulating dataforms in XMPP.
Relevant XEPs: 0004, 0068, 0122.
Can be used in disco, pub-sub and many other applications.
"""
def __init__(self, typ=None, data=[], title=None, node=None):
"""
Create new dataform of type "typ"; "data" is the list of DataReported,
DataItem and DataField instances that this dataform contains; "title"
is the title string.
You can specify the "node" argument as the other node to be used as
base for constructing this dataform.
Title and instructions is optional and SHOULD NOT contain newlines.
Several instructions MAY be present.
"typ" can be one of ("form" | "submit" | "cancel" | "result" )
"typ" of reply iq can be ( "result" | "set" | "set" | "result" ) respectively.
"cancel" form can not contain any fields. All other forms contains AT LEAST one field.
"title" MAY be included in forms of type "form" and "result".
"""
Node.__init__(self, "x", node=node)
if node:
newkids = []
for n in self.getChildren():
if n.getName() == "field":
newkids.append(DataField(node=n))
elif n.getName() == "item":
newkids.append(DataItem(node=n))
elif n.getName() == "reported":
newkids.append(DataReported(node=n))
else:
newkids.append(n)
self.kids = newkids
if typ:
self.setType(typ)
self.setNamespace(NS_DATA)
if title:
self.setTitle(title)
if isinstance(data, dict):
newdata = []
for name in data.keys():
newdata.append(DataField(name, data[name]))
data = newdata
for child in data:
if isinstance(child, basestring):
self.addInstructions(child)
elif isinstance(child, DataField):
self.kids.append(child)
elif isinstance(child, DataItem):
self.kids.append(child)
elif isinstance(child, DataReported):
self.kids.append(child)
else:
self.kids.append(DataField(node=child))
def getType(self):
"""
Return the type of dataform.
"""
return self.getAttr("type")
def setType(self, typ):
"""
Set the type of dataform.
"""
self.setAttr("type", typ)
def getTitle(self):
"""
Return the title of dataform.
"""
return self.getTagData("title")
def setTitle(self, text):
"""
Set the title of dataform.
"""
self.setTagData("title", text)
def getInstructions(self):
"""
Return the instructions of dataform.
"""
return self.getTagData("instructions")
def setInstructions(self, text):
"""
Set the instructions of dataform.
"""
self.setTagData("instructions", text)
def addInstructions(self, text):
"""
Add one more instruction to the dataform.
"""
self.addChild("instructions", {}, [text])
def getField(self, name):
"""
Return the datafield object with name "name" (if exists).
"""
return self.getTag("field", attrs={"var": name})
def setField(self, name, value=None, typ=None, desc=None, options=[]):
"""
Create if nessessary or get the existing datafield object with name "name" and return it.
"""
field = self.getField(name)
if not field:
field = self.addChild(node=DataField(name, value, typ, desc=desc, options=options))
return field
def asDict(self):
"""
Represent dataform as simple dictionary mapping of datafield names to their values.
"""
ret = {}
for field in self.getTags("field"):
name = field.getAttr("var")
typ = field.getType()
if isinstance(typ, basestring) and typ.endswith("-multi"):
val = []
for i in field.getTags("value"):
val.append(i.getData())
else:
val = field.getTagData("value")
ret[name] = val
if self.getTag("instructions"):
ret["instructions"] = self.getInstructions()
return ret
def __getitem__(self, name):
"""
Simple dictionary interface for getting datafields values by their names.
"""
item = self.getField(name)
if item:
return item.getValue()
raise IndexError("No such field")
def __setitem__(self, name, val):
"""
Simple dictionary interface for setting datafields values by their names.
"""
return self.setField(name).setValue(val)
| mit | 7,747,326,246,037,135,000 | 36.318851 | 499 | 0.66246 | false |
Zen-CODE/kivy | kivy/uix/bubble.py | 42 | 12590 | '''
Bubble
======
.. versionadded:: 1.1.0
.. image:: images/bubble.jpg
:align: right
The Bubble widget is a form of menu or a small popup where the menu options
are stacked either vertically or horizontally.
The :class:`Bubble` contains an arrow pointing in the direction you
choose.
Simple example
--------------
.. include:: ../../examples/widgets/bubble_test.py
:literal:
Customize the Bubble
--------------------
You can choose the direction in which the arrow points::
Bubble(arrow_pos='top_mid')
The widgets added to the Bubble are ordered horizontally by default, like a
Boxlayout. You can change that by::
orientation = 'vertical'
To add items to the bubble::
bubble = Bubble(orientation = 'vertical')
bubble.add_widget(your_widget_instance)
To remove items::
bubble.remove_widget(widget)
or
bubble.clear_widgets()
To access the list of children, use content.children::
bubble.content.children
.. warning::
This is important! Do not use bubble.children
To change the appearance of the bubble::
bubble.background_color = (1, 0, 0, .5) #50% translucent red
bubble.border = [0, 0, 0, 0]
background_image = 'path/to/background/image'
arrow_image = 'path/to/arrow/image'
'''
__all__ = ('Bubble', 'BubbleButton', 'BubbleContent')
from kivy.uix.image import Image
from kivy.uix.widget import Widget
from kivy.uix.scatter import Scatter
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.properties import ObjectProperty, StringProperty, OptionProperty, \
ListProperty, BooleanProperty
from kivy.clock import Clock
from kivy.base import EventLoop
from kivy.metrics import dp
class BubbleButton(Button):
'''A button intended for use in a Bubble widget.
You can use a "normal" button class, but it will not look good unless
the background is changed.
Rather use this BubbleButton widget that is already defined and provides a
suitable background for you.
'''
pass
class BubbleContent(GridLayout):
pass
class Bubble(GridLayout):
'''Bubble class. See module documentation for more information.
'''
background_color = ListProperty([1, 1, 1, 1])
'''Background color, in the format (r, g, b, a).
:attr:`background_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 1, 1, 1].
'''
border = ListProperty([16, 16, 16, 16])
'''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction. Used with the :attr:`background_image`.
It should be used when using custom backgrounds.
It must be a list of 4 values: (top, right, bottom, left). Read the
BorderImage instructions for more information about how to use it.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults to
(16, 16, 16, 16)
'''
background_image = StringProperty(
'atlas://data/images/defaulttheme/bubble')
'''Background image of the bubble.
:attr:`background_image` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/bubble'.
'''
arrow_image = StringProperty(
'atlas://data/images/defaulttheme/bubble_arrow')
''' Image of the arrow pointing to the bubble.
:attr:`arrow_image` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/bubble_arrow'.
'''
show_arrow = BooleanProperty(True)
''' Indicates whether to show arrow.
.. versionadded:: 1.8.0
:attr:`show_arrow` is a :class:`~kivy.properties.BooleanProperty` and
defaults to `True`.
'''
arrow_pos = OptionProperty('bottom_mid', options=(
'left_top', 'left_mid', 'left_bottom', 'top_left', 'top_mid',
'top_right', 'right_top', 'right_mid', 'right_bottom',
'bottom_left', 'bottom_mid', 'bottom_right'))
'''Specifies the position of the arrow relative to the bubble.
Can be one of: left_top, left_mid, left_bottom top_left, top_mid, top_right
right_top, right_mid, right_bottom bottom_left, bottom_mid, bottom_right.
:attr:`arrow_pos` is a :class:`~kivy.properties.OptionProperty` and
defaults to 'bottom_mid'.
'''
content = ObjectProperty(None)
'''This is the object where the main content of the bubble is held.
:attr:`content` is a :class:`~kivy.properties.ObjectProperty` and
defaults to 'None'.
'''
orientation = OptionProperty('horizontal',
options=('horizontal', 'vertical'))
'''This specifies the manner in which the children inside bubble
are arranged. Can be one of 'vertical' or 'horizontal'.
:attr:`orientation` is a :class:`~kivy.properties.OptionProperty` and
defaults to 'horizontal'.
'''
limit_to = ObjectProperty(None, allownone=True)
'''Specifies the widget to which the bubbles position is restricted.
.. versionadded:: 1.6.0
:attr:`limit_to` is a :class:`~kivy.properties.ObjectProperty` and
defaults to 'None'.
'''
def __init__(self, **kwargs):
self._prev_arrow_pos = None
self._arrow_layout = BoxLayout()
self._bk_img = Image(
source=self.background_image, allow_stretch=True,
keep_ratio=False, color=self.background_color)
self.background_texture = self._bk_img.texture
self._arrow_img = Image(source=self.arrow_image,
allow_stretch=True,
color=self.background_color)
self.content = content = BubbleContent(parent=self)
super(Bubble, self).__init__(**kwargs)
content.parent = None
self.add_widget(content)
self.on_arrow_pos()
def add_widget(self, *l):
content = self.content
if content is None:
return
if l[0] == content or l[0] == self._arrow_img\
or l[0] == self._arrow_layout:
super(Bubble, self).add_widget(*l)
else:
content.add_widget(*l)
def remove_widget(self, *l):
content = self.content
if not content:
return
if l[0] == content or l[0] == self._arrow_img\
or l[0] == self._arrow_layout:
super(Bubble, self).remove_widget(*l)
else:
content.remove_widget(l[0])
def clear_widgets(self, **kwargs):
content = self.content
if not content:
return
if kwargs.get('do_super', False):
super(Bubble, self).clear_widgets()
else:
content.clear_widgets()
def on_show_arrow(self, instance, value):
self._arrow_img.opacity = int(value)
def on_parent(self, instance, value):
Clock.schedule_once(self._update_arrow)
def on_pos(self, instance, pos):
lt = self.limit_to
if lt:
self.limit_to = None
if lt is EventLoop.window:
x = y = 0
top = lt.height
right = lt.width
else:
x, y = lt.x, lt.y
top, right = lt.top, lt.right
self.x = max(self.x, x)
self.right = min(self.right, right)
self.top = min(self.top, top)
self.y = max(self.y, y)
self.limit_to = lt
def on_background_image(self, *l):
self._bk_img.source = self.background_image
def on_background_color(self, *l):
if self.content is None:
return
self._arrow_img.color = self._bk_img.color = self.background_color
def on_orientation(self, *l):
content = self.content
if not content:
return
if self.orientation[0] == 'v':
content.cols = 1
content.rows = 99
else:
content.cols = 99
content.rows = 1
def on_arrow_image(self, *l):
self._arrow_img.source = self.arrow_image
def on_arrow_pos(self, *l):
self_content = self.content
if not self_content:
Clock.schedule_once(self.on_arrow_pos)
return
if self_content not in self.children:
Clock.schedule_once(self.on_arrow_pos)
return
self_arrow_pos = self.arrow_pos
if self._prev_arrow_pos == self_arrow_pos:
return
self._prev_arrow_pos = self_arrow_pos
self_arrow_layout = self._arrow_layout
self_arrow_layout.clear_widgets()
self_arrow_img = self._arrow_img
self._sctr = self._arrow_img
self.clear_widgets(do_super=True)
self_content.parent = None
self_arrow_img.size_hint = (1, None)
self_arrow_img.height = dp(self_arrow_img.texture_size[1])
self_arrow_img.pos = 0, 0
widget_list = []
arrow_list = []
parent = self_arrow_img.parent
if parent:
parent.remove_widget(self_arrow_img)
if self_arrow_pos[0] == 'b' or self_arrow_pos[0] == 't':
self.cols = 1
self.rows = 3
self_arrow_layout.orientation = 'horizontal'
self_arrow_img.width = self.width / 3
self_arrow_layout.size_hint = (1, None)
self_arrow_layout.height = self_arrow_img.height
if self_arrow_pos[0] == 'b':
if self_arrow_pos == 'bottom_mid':
widget_list = (self_content, self_arrow_img)
else:
if self_arrow_pos == 'bottom_left':
arrow_list = (self_arrow_img, Widget(), Widget())
elif self_arrow_pos == 'bottom_right':
#add two dummy widgets
arrow_list = (Widget(), Widget(), self_arrow_img)
widget_list = (self_content, self_arrow_layout)
else:
sctr = Scatter(do_translation=False,
rotation=180,
do_rotation=False,
do_scale=False,
size_hint=(None, None),
size=self_arrow_img.size)
sctr.add_widget(self_arrow_img)
if self_arrow_pos == 'top_mid':
#add two dummy widgets
arrow_list = (Widget(), sctr, Widget())
elif self_arrow_pos == 'top_left':
arrow_list = (sctr, Widget(), Widget())
elif self_arrow_pos == 'top_right':
arrow_list = (Widget(), Widget(), sctr)
widget_list = (self_arrow_layout, self_content)
elif self_arrow_pos[0] == 'l' or self_arrow_pos[0] == 'r':
self.cols = 3
self.rows = 1
self_arrow_img.width = self.height / 3
self_arrow_layout.orientation = 'vertical'
self_arrow_layout.cols = 1
self_arrow_layout.size_hint = (None, 1)
self_arrow_layout.width = self_arrow_img.height
rotation = -90 if self_arrow_pos[0] == 'l' else 90
self._sctr = sctr = Scatter(do_translation=False,
rotation=rotation,
do_rotation=False,
do_scale=False,
size_hint=(None, None),
size=(self_arrow_img.size))
sctr.add_widget(self_arrow_img)
if self_arrow_pos[-4:] == '_top':
arrow_list = (Widget(size_hint=(1, .07)),
sctr, Widget(size_hint=(1, .3)))
elif self_arrow_pos[-4:] == '_mid':
arrow_list = (Widget(), sctr, Widget())
Clock.schedule_once(self._update_arrow)
elif self_arrow_pos[-7:] == '_bottom':
arrow_list = (Widget(), Widget(), sctr)
if self_arrow_pos[0] == 'l':
widget_list = (self_arrow_layout, self_content)
else:
widget_list = (self_content, self_arrow_layout)
# add widgets to arrow_layout
add = self_arrow_layout.add_widget
for widg in arrow_list:
add(widg)
# add widgets to self
add = self.add_widget
for widg in widget_list:
add(widg)
def _update_arrow(self, *dt):
if self.arrow_pos in ('left_mid', 'right_mid'):
self._sctr.center_y = self._arrow_layout.center_y
| mit | -530,803,376,703,513,150 | 32.753351 | 79 | 0.570056 | false |
virtualopensystems/neutron | neutron/tests/unit/test_config.py | 2 | 2478 | # Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
from oslo.config import cfg
from neutron.common import config # noqa
from neutron.tests import base
class ConfigurationTest(base.BaseTestCase):
def setup_config(self):
# don't use default config
pass
def test_defaults(self):
self.assertEqual('0.0.0.0', cfg.CONF.bind_host)
self.assertEqual(9696, cfg.CONF.bind_port)
self.assertEqual('api-paste.ini', cfg.CONF.api_paste_config)
self.assertEqual('', cfg.CONF.api_extensions_path)
self.assertEqual('policy.json', cfg.CONF.policy_file)
self.assertEqual('keystone', cfg.CONF.auth_strategy)
self.assertIsNone(cfg.CONF.core_plugin)
self.assertEqual(0, len(cfg.CONF.service_plugins))
self.assertEqual('fa:16:3e:00:00:00', cfg.CONF.base_mac)
self.assertEqual(16, cfg.CONF.mac_generation_retries)
self.assertTrue(cfg.CONF.allow_bulk)
self.assertEqual(5, cfg.CONF.max_dns_nameservers)
self.assertEqual(20, cfg.CONF.max_subnet_host_routes)
relative_dir = os.path.join(os.path.dirname(__file__),
'..', '..', '..')
absolute_dir = os.path.abspath(relative_dir)
self.assertEqual(absolute_dir, cfg.CONF.state_path)
self.assertEqual(86400, cfg.CONF.dhcp_lease_duration)
self.assertFalse(cfg.CONF.allow_overlapping_ips)
self.assertEqual('neutron', cfg.CONF.control_exchange)
def test_load_paste_app_not_found(self):
self.config(api_paste_config='no_such_file.conf')
with mock.patch.object(cfg.CONF, 'find_file', return_value=None) as ff:
e = self.assertRaises(cfg.ConfigFilesNotFoundError,
config.load_paste_app, 'app')
ff.assert_called_once_with('no_such_file.conf')
self.assertEqual(['no_such_file.conf'], e.config_files)
| apache-2.0 | -3,926,090,285,083,533,000 | 41 | 79 | 0.671913 | false |
janelia-idf/hybridizer | tests/adc_to_volume.py | 4 | 5112 | # -*- coding: utf-8 -*-
from __future__ import print_function, division
import matplotlib.pyplot as plot
import numpy
from numpy.polynomial.polynomial import polyfit,polyadd,Polynomial
import yaml
INCHES_PER_ML = 0.078
VOLTS_PER_ADC_UNIT = 0.0049
def load_numpy_data(path):
with open(path,'r') as fid:
header = fid.readline().rstrip().split(',')
dt = numpy.dtype({'names':header,'formats':['S25']*len(header)})
numpy_data = numpy.loadtxt(path,dtype=dt,delimiter=",",skiprows=1)
return numpy_data
# -----------------------------------------------------------------------------------------
if __name__ == '__main__':
# Load VA data
data_file = 'hall_effect_data_va.csv'
hall_effect_data_va = load_numpy_data(data_file)
distances_va = numpy.float64(hall_effect_data_va['distance'])
A1_VA = numpy.float64(hall_effect_data_va['A1'])
A9_VA = numpy.float64(hall_effect_data_va['A9'])
A4_VA = numpy.float64(hall_effect_data_va['A4'])
A12_VA = numpy.float64(hall_effect_data_va['A12'])
A2_VA = numpy.float64(hall_effect_data_va['A2'])
A10_VA = numpy.float64(hall_effect_data_va['A10'])
A5_VA = numpy.float64(hall_effect_data_va['A5'])
A13_VA = numpy.float64(hall_effect_data_va['A13'])
# Massage VA data
volumes_va = distances_va/INCHES_PER_ML
A1_VA = numpy.reshape(A1_VA,(-1,1))
A9_VA = numpy.reshape(A9_VA,(-1,1))
A4_VA = numpy.reshape(A4_VA,(-1,1))
A12_VA = numpy.reshape(A12_VA,(-1,1))
A2_VA = numpy.reshape(A2_VA,(-1,1))
A10_VA = numpy.reshape(A10_VA,(-1,1))
A5_VA = numpy.reshape(A5_VA,(-1,1))
A13_VA = numpy.reshape(A13_VA,(-1,1))
data_va = numpy.hstack((A1_VA,A9_VA,A4_VA,A12_VA,A2_VA,A10_VA,A5_VA,A13_VA))
data_va = data_va/VOLTS_PER_ADC_UNIT
# Load OA data
data_file = 'hall_effect_data_oa.csv'
hall_effect_data_oa = load_numpy_data(data_file)
distances_oa = numpy.float64(hall_effect_data_oa['distance'])
A9_OA = numpy.float64(hall_effect_data_oa['A9'])
A10_OA = numpy.float64(hall_effect_data_oa['A10'])
A11_OA = numpy.float64(hall_effect_data_oa['A11'])
A12_OA = numpy.float64(hall_effect_data_oa['A12'])
# Massage OA data
volumes_oa = distances_oa/INCHES_PER_ML
A9_OA = numpy.reshape(A9_OA,(-1,1))
A10_OA = numpy.reshape(A10_OA,(-1,1))
A11_OA = numpy.reshape(A11_OA,(-1,1))
A12_OA = numpy.reshape(A12_OA,(-1,1))
data_oa = numpy.hstack((A9_OA,A10_OA,A11_OA,A12_OA))
data_oa = data_oa/VOLTS_PER_ADC_UNIT
# Create figure
fig = plot.figure()
fig.suptitle('hall effect sensors',fontsize=14,fontweight='bold')
fig.subplots_adjust(top=0.85)
colors = ['b','g','r','c','m','y','k','b']
markers = ['o','o','o','o','o','o','o','^']
# Axis 1
ax1 = fig.add_subplot(121)
for column_index in range(0,data_va.shape[1]):
color = colors[column_index]
marker = markers[column_index]
ax1.plot(data_va[:,column_index],volumes_va,marker=marker,linestyle='--',color=color)
# for column_index in range(0,data_oa.shape[1]):
# color = colors[column_index]
# marker = markers[column_index]
# ax1.plot(data_oa[:,column_index],volumes_oa,marker=marker,linestyle='--',color=color)
ax1.set_xlabel('mean signals (ADC units)')
ax1.set_ylabel('volume (ml)')
ax1.grid(True)
# Axis 2
for column_index in range(0,data_va.shape[1]):
data_va[:,column_index] -= data_va[:,column_index].min()
MAX_VA = 120
data_va = data_va[numpy.all(data_va<MAX_VA,axis=1)]
length = data_va.shape[0]
volumes_va = volumes_va[-length:]
# for column_index in range(0,data_oa.shape[1]):
# data_oa[:,column_index] -= data_oa[:,column_index].max()
ax2 = fig.add_subplot(122)
for column_index in range(0,data_va.shape[1]):
color = colors[column_index]
marker = markers[column_index]
ax2.plot(data_va[:,column_index],volumes_va,marker=marker,linestyle='--',color=color)
# for column_index in range(0,data_oa.shape[1]):
# color = colors[column_index]
# marker = markers[column_index]
# ax2.plot(data_oa[:,column_index],volumes_oa,marker=marker,linestyle='--',color=color)
ax2.set_xlabel('offset mean signals (ADC units)')
ax2.set_ylabel('volume (ml)')
ax2.grid(True)
order = 3
sum_va = None
for column_index in range(0,data_va.shape[1]):
coefficients_va = polyfit(data_va[:,column_index],volumes_va,order)
if sum_va is None:
sum_va = coefficients_va
else:
sum_va = polyadd(sum_va,coefficients_va)
average_va = sum_va/data_va.shape[1]
with open('adc_to_volume_va.yaml', 'w') as f:
yaml.dump(average_va, f, default_flow_style=False)
round_digits = 8
average_va = [round(i,round_digits) for i in average_va]
poly = Polynomial(average_va)
ys_va = poly(data_va[:,-1])
ax2.plot(data_va[:,-1],ys_va,'r',linewidth=3)
ax2.text(5,7.5,r'$v = c_0 + c_1s + c_2s^2 + c_3s^3$',fontsize=20)
ax2.text(5,6.5,str(average_va),fontsize=18,color='r')
plot.show()
| bsd-3-clause | 3,132,821,074,629,454,000 | 35.776978 | 95 | 0.609155 | false |
BootstrapHeroes/django-shopify | django_shopify/shopify_app/services/plan_config_service.py | 1 | 2510 | from base import BaseService
from shopify_app.models import PlanConfig
from django.conf import settings
from shopify_app.config import DEFAULTS
from datetime import datetime
from shopify_api import APIWrapper
class PlanConfigService(BaseService):
entity = PlanConfig
def _get_charge_common_data(self, shop, plan_config):
"""
Returns the common data for the charge API call
"""
data = {
"name": plan_config.name if plan_config.name else "Default",
"price": plan_config.billing_amount if plan_config.billing_amount else 10.0,
"return_url": "http:%s/shop/billing/?shop=%s&plan_config=%s" % (getattr(settings, "HOST", DEFAULTS["HOST"]), shop.id, plan_config.id),
}
if getattr(settings, "TEST", True):
data["test"] = True
return data
def _create_charge(self, shop_model, api_entity, data):
return APIWrapper(shop_model, log=True).create(api_entity, data)
def one_time_charge(self, shop, plan_config):
"""
Generates a one time charge for this app
"""
data = self._get_charge_common_data(shop, plan_config)
return self._create_charge(shop, "application_charge", data)
def recurring_charge(self, shop, plan_config):
"""
Generates a recurring charge for this app
"""
data = self._get_charge_common_data(shop, plan_config)
default_trial_days = plan_config.trial_period_days if plan_config.trial_period_days else 15
#trial days starts counting from the first install
current_trial_days = (datetime.utcnow().replace(tzinfo=None) - shop.created_at.replace(tzinfo=None)).days
if not current_trial_days >= default_trial_days:
data["trial_days"] = default_trial_days - current_trial_days
return self._create_charge(shop, "recurring_application_charge", data)
def confirm_data(self, shop, plan_config):
"""
Makes the request to Generate either a one time charge or recurring charge and
returns the response results.
If there are errors in the request response it raises an exception.
"""
if plan_config.billing_type == "O":
response = self.one_time_charge(shop, plan_config)
else:
response = self.recurring_charge(shop, plan_config)
if "errors" in response:
raise Exception(str(response["errors"]))
return response
| gpl-3.0 | -6,890,294,562,392,808,000 | 33.861111 | 146 | 0.635458 | false |
darjus-amzn/boto | boto/mturk/__init__.py | 782 | 1108 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
| mit | 8,103,445,817,469,408,000 | 47.173913 | 74 | 0.768051 | false |
ChanduERP/odoo | addons/l10n_tr/__openerp__.py | 259 | 2056 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Turkey - Accounting',
'version': '1.beta',
'category': 'Localization/Account Charts',
'description': """
Türkiye için Tek düzen hesap planı şablonu OpenERP Modülü.
==========================================================
Bu modül kurulduktan sonra, Muhasebe yapılandırma sihirbazı çalışır
* Sihirbaz sizden hesap planı şablonu, planın kurulacağı şirket, banka hesap
bilgileriniz, ilgili para birimi gibi bilgiler isteyecek.
""",
'author': 'Ahmet Altınışık',
'maintainer':'https://launchpad.net/~openerp-turkey',
'website':'https://launchpad.net/openerp-turkey',
'depends': [
'account',
'base_vat',
'account_chart',
],
'data': [
'account_code_template.xml',
'account_tdhp_turkey.xml',
'account_tax_code_template.xml',
'account_chart_template.xml',
'account_tax_template.xml',
'l10n_tr_wizard.xml',
],
'demo': [],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,001,856,044,827,126,000 | 38.057692 | 80 | 0.597735 | false |
tdtrask/ansible | test/units/modules/network/nxos/test_nxos_command.py | 57 | 4175 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_command
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosCommandModule(TestNxosModule):
module = nxos_command
def setUp(self):
super(TestNxosCommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestNxosCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None, device=''):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = '%s.txt' % str(command).replace(' ', '_')
output.append(load_fixture('nxos_command', filename))
return output
self.run_commands.side_effect = load_from_file
def test_nxos_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Cisco'))
def test_nxos_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Cisco'))
def test_nxos_command_wait_for(self):
wait_for = 'result[0] contains "NX-OS"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_nxos_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_nxos_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_nxos_command_match_any(self):
wait_for = ['result[0] contains "Cisco"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_nxos_command_match_all(self):
wait_for = ['result[0] contains "Cisco"',
'result[0] contains "image file"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_nxos_command_match_all_failure(self):
wait_for = ['result[0] contains "Cisco"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
| gpl-3.0 | -4,251,694,360,580,350,500 | 38.386792 | 96 | 0.642635 | false |
nickweinberg/werewolf-slackbot | rtmbot.py | 1 | 6723 | #!/usr/bin/env python
import sys
sys.dont_write_bytecode = True
import glob
import yaml
import json
import os
import sys
import time
import logging
from argparse import ArgumentParser
from slackclient import SlackClient
def dbg(debug_string):
if debug:
logging.info(debug_string)
USER_DICT = {}
class RtmBot(object):
def __init__(self, token):
self.last_ping = 0
self.token = token
self.bot_plugins = []
self.slack_client = None
self.channel = None # only want bot in one channel
def connect(self):
"""Convenience method that creates Server instance"""
self.slack_client = SlackClient(self.token)
self.slack_client.rtm_connect()
def start(self):
self.connect()
self.load_plugins()
while True:
for reply in self.slack_client.rtm_read():
self.input(reply)
self.crons()
self.output()
self.autoping()
time.sleep(.1)
def get_users_in_channel(self):
print(self.channel)
channel_info = self.slack_client.api_call("channels.info", channel=self.channel)
info = json.loads(channel_info)
members = info['channel']['members']
print(members)
self.create_user_dict(members)
def autoping(self):
#hardcode the interval to 3 seconds
now = int(time.time())
if now > self.last_ping + 3:
self.slack_client.server.ping()
self.last_ping = now
def input(self, data):
if "type" in data:
function_name = "process_" + data["type"]
dbg("got {}".format(function_name))
for plugin in self.bot_plugins:
plugin.register_jobs()
plugin.do(function_name, data)
def output(self):
for plugin in self.bot_plugins:
limiter = False
for output in plugin.do_output():
channel = self.slack_client.server.channels.find(output[0])
if channel != None and output[1] != None:
if limiter == True:
time.sleep(.1)
limiter = False
message = output[1].encode('ascii','ignore')
channel.send_message("{}".format(message))
limiter = True
def crons(self):
for plugin in self.bot_plugins:
plugin.do_jobs()
def load_plugins(self):
for plugin in glob.glob(directory+'/plugins/*'):
sys.path.insert(0, plugin)
sys.path.insert(0, directory+'/plugins/')
for plugin in glob.glob(directory+'/plugins/*.py') + glob.glob(directory+'/plugins/*/*.py'):
logging.info(plugin)
name = plugin.split('/')[-1][:-3]
# try:
self.bot_plugins.append(Plugin(name))
# except:
# print "error loading plugin %s" % name
class Plugin(object):
def __init__(self, name, plugin_config={}):
self.name = name
self.jobs = []
self.module = __import__(name)
self.register_jobs()
self.outputs = []
if name in config:
logging.info("config found for: " + name)
self.module.config = config[name]
if 'setup' in dir(self.module):
self.module.setup()
def register_jobs(self):
if 'crontable' in dir(self.module):
for interval, function in self.module.crontable:
self.jobs.append(Job(interval, eval("self.module."+function)))
logging.info(self.module.crontable)
self.module.crontable = []
else:
self.module.crontable = []
def do(self, function_name, data):
if function_name in dir(self.module):
#this makes the plugin fail with stack trace in debug mode
if not debug:
try:
eval("self.module."+function_name)(data)
except:
dbg("problem in module {} {}".format(function_name, data))
else:
eval("self.module."+function_name)(data)
if "catch_all" in dir(self.module):
try:
self.module.catch_all(data)
except:
dbg("problem in catch all")
def do_jobs(self):
for job in self.jobs:
job.check()
def do_output(self):
output = []
while True:
if 'outputs' in dir(self.module):
if len(self.module.outputs) > 0:
logging.info("output from {}".format(self.module))
output.append(self.module.outputs.pop(0))
else:
break
else:
self.module.outputs = []
return output
class Job(object):
def __init__(self, interval, function):
self.function = function
self.interval = interval
self.lastrun = 0
def __str__(self):
return "{} {} {}".format(self.function, self.interval, self.lastrun)
def __repr__(self):
return self.__str__()
def check(self):
if self.lastrun + self.interval < time.time():
if not debug:
try:
self.function()
except:
dbg("problem")
else:
self.function()
self.lastrun = time.time()
pass
class UnknownChannel(Exception):
pass
def main_loop():
if "LOGFILE" in config:
logging.basicConfig(filename=config["LOGFILE"], level=logging.INFO, format='%(asctime)s %(message)s')
logging.info(directory)
try:
bot.start()
except KeyboardInterrupt:
sys.exit(0)
except:
logging.exception('OOPS')
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'-c',
'--config',
help='Full path to config file.',
metavar='path'
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
directory = os.path.dirname(sys.argv[0])
if not directory.startswith('/'):
directory = os.path.abspath("{}/{}".format(os.getcwd(),
directory
))
config = yaml.load(file(args.config or 'rtmbot.conf', 'r'))
debug = config["DEBUG"]
bot = RtmBot(config["SLACK_TOKEN"])
bot.channel = config["CHANNEL"]
site_plugins = []
files_currently_downloading = []
job_hash = {}
if config.has_key("DAEMON"):
if config["DAEMON"]:
import daemon
with daemon.DaemonContext():
main_loop()
main_loop()
| mit | 3,092,971,304,012,391,400 | 30.269767 | 109 | 0.532352 | false |
nitramkaroh/OOFEM | tools/unv2oofem/unv2oofem.py | 1 | 12763 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from unv2x import *
from abaqus2x import *
from oofemctrlreader import *
import time
from numpy.core.defchararray import splitlines
if __name__=='__main__':
helpmsg="""
Usage: unv2oofem.py unvfile ctrlfile oofemfile
What it does: read unvfile, create an internal FEM object structure
in memory and writes the oofem native input file
The ctrlfile specifies additional properties required by oofem
See http://www.oofem.org/wiki/doku.php?id=unv2oofem:unv2oofem for more info.
The format of ctrl file is following: (lines beginning with '#' are comments)
Output file record
Job description record
Analysis record
Domain record
Output manager record
ncrosssect # nmat # nbc # nic # nltf # nset # nxfemman #
cross section records
material records
boundary condition records
initial condition records
load time function records
extractor records
set records
Assignment of properties to nodes and elements is based on association with some unv group. The same mechanism
is valid for assignment of boundary conditions (edge, surface) load. The syntax is following:
group name1 [name2] [name3] ...
nodeprop "nodal_attributes_appended_to_nodal_records" [set INT]
elemprop "element_attributes_appended_to_element_records" [set INT]
etype[unv_etype] oofem_etype #provides mapping between unv and oofem element types
By default, all nodes will be exported,
elements are exported only when associated to some group
with valid element mapping
Enjoy.
"""
print """
UNV2OOFEM: Converts UNV file from Salome to OOFEM native file format
(C) 2009 Borek Patzak
"""
t1 = time.time()
if len(sys.argv)==4:
unvfile=sys.argv[1]
ctrlfile=sys.argv[2]
oofemfile=sys.argv[3]
of=open(oofemfile,'w')
# read file in FEM object structure
fileExtension = unvfile.split('.')
if (fileExtension[-1].lower()=='unv'): # Salome output file
Parser=UNVParser(unvfile)
elif (fileExtension[-1].lower()=='inp'): # Abaqus output file
Parser=AbaqusParser(unvfile)
else:
print "Unknown extension of input file %s" % fileExtension[-1].lower()
exit(0)
print 'Parsing mesh file %s' % sys.argv[1],
FEM=Parser.parse()
print "done"
print "Detected node groups:",
for i in FEM.nodesets:
print i.name.strip(),
print
print "Detected element groups:",
for i in FEM.elemsets:
print i.name.strip(),
print
# read oofem ctrl file
CTRL=CTRLParser(ctrlfile, Parser.mapping())
print 'Parsing ctrl file %s' % sys.argv[2]
CTRL.parse(FEM)
print "done"
# write files in native oofem format
print 'Writing oofem file %s' % sys.argv[3]
# write oofem header
of.write(CTRL.header)
#store elements in meshElements list. Reason: need to assign boundaryLoad to elements, which may be read after elements
meshElements = []
#create auxiliary array of element numbers to be searched for boundaryLoads
elemNotBoundary = []
# List for sets containing boundaries
boundarySets=[];
for elem in FEM.elems:#loop through all unv elements
#resolve element properties
properties=""
for igroup in elem.oofem_groups:
#print igroup.name
properties+=igroup.oofem_properties
#Do output if oofem_elemtype resolved and not BoundaryLoads
if ( elem.oofem_elemtype):
if(CTRL.oofem_elemProp[elem.oofem_elemtype].name != 'RepresentsBoundaryLoad'):
#Check if unv element and OOFEM element have the same amount of nodes
if (elem.nnodes != len(CTRL.oofem_elemProp[elem.oofem_elemtype].nodeMask)):
print "\nUnv element #%d has %d nodes, which should be mapped on OOFEM element \"%s\" with %d nodes" % \
(elem.id, elem.nnodes,CTRL.oofem_elemProp[elem.oofem_elemtype].name, len(CTRL.oofem_elemProp[elem.oofem_elemtype].nodeMask))
exit(0)
elemNotBoundary.append(elem)
dat = elem.oofem_outputData
dat.append(CTRL.oofem_elemProp[elem.oofem_elemtype].name)
dat.append("%-5d" % elem.id)
dat.append("nodes")
dat.append("%-3d" % elem.nnodes)
for n in range(elem.nnodes):
mask = CTRL.oofem_elemProp[elem.oofem_elemtype].nodeMask[n]
try:
dat.append("%-3d" % elem.cntvt[mask])
except:
print "Exception in mapping nodes in unv element number %d, nodes %s" % (elem.id, elem.cntvt)
exit(0)
#dat.extend(["%-3d" % x for x in elem.cntvt])
dat.append(properties)
meshElements.append([])
#Assign BoundaryLoads to elements (corresponds to edge and face loads).
#We need to loop over all elements and to check whether they have assigned loads. This is quite time consuming but robust algorithm.
for belem in FEM.elems:#loop over all elements from unv file
#resolve element properties
#for igroup in elem.oofem_groups:#unv element with boundary load is assigned to some ctrl element group
#print belem.id, belem.oofem_elemtype, CTRL.oofem_elemProp[belem.oofem_elemtype].name
if CTRL.oofem_elemProp[belem.oofem_elemtype].name == 'RepresentsBoundaryLoad':#found element, which represents boundary load
nodesOnBoundary = belem.cntvt
nodesOnBoundary.sort()
for elem in elemNotBoundary: #loop over, e.g. triangular elements, in order to find which element belem is a boundary to
cnt=0
for n in range(len(nodesOnBoundary)):
if(elem.cntvt.count(int(nodesOnBoundary[n]))):
cnt = cnt+1
if (cnt==len(nodesOnBoundary)):#found eligible element to which assign b.c. Now find which edge/face it is.
success = 0
if(belem.type==11 or belem.type==22):#elements representing EDGE loads
mask = CTRL.oofem_elemProp[elem.oofem_elemtype].edgeMask
else:#face loads
mask = CTRL.oofem_elemProp[elem.oofem_elemtype].faceMask
for i in range(len(mask)):
nodesInMask = []#list of nodes which are extracted according to mask
for x in mask[i]:
nodesInMask.append(elem.cntvt[x])
#We need to compare both arrays nodesInMask and nodesOnBoundary. If they contain the same node numbers, we found edge/face.
nodesInMask.sort()
if(nodesInMask==nodesOnBoundary):#both lists are sorted so they can be compared
success = 1
#since boundary element may be in more unv groups, we need to find corresponding ctrl group
for bel in belem.oofem_groups:
#print "%d '%s' '%s'" % (len(belem.oofem_groups), bel.name.rstrip(), bel.oofem_groupNameForLoads)
if (bel.name.rstrip() == bel.oofem_groupNameForLoads):
#continue
#build a new int list, which reflects load numbers and edges/faces
if (len(bel.oofem_boundaryLoadsNum) > 0):
loadNum = bel.oofem_boundaryLoadsNum
newList=[-1]*(2*len(loadNum))
for j in range(len(loadNum)):
newList[2*j] = loadNum[j]
newList[2*j+1] = i+1
#print newList
elem.oofem_bLoads+=newList
print "Boundary load \"%s\" found for element %d " % (bel.name.rstrip('\n'), elem.id)
#print bel.name, elem.id, elem.oofem_bLoads
if (bel.oofem_sets):
print "Set \"%s\" found for boundary of element %d " % (bel.name.rstrip('\n'), elem.id)
setNum = bel.oofem_sets;
# setID, element id, element side
for thisSet in setNum:
boundarySets.append([thisSet, elem.id, i+1])
if(success==0):
print "Can not assign edge/face load \"%s\" to unv element %d" % (bel.name, elem.id)
#write component record
of.write('ndofman %d nelem %d ncrosssect %d nmat %d nbc %d nic %d nltf %d nset %d nxfemman %d\n' % (FEM.nnodes, len(elemNotBoundary), CTRL.ncrosssect, CTRL.nmat, CTRL.nbc, CTRL.nic, CTRL.nltf, CTRL.nset, CTRL.nxfemman))
#write nodes
for node in FEM.nodes:
#resolve nodal properties
outputLine="node %-5d coords %-2d" % (node.id, len(node.coords))
for coord in node.coords:
outputLine+= "% -8g " % coord
properties=""
for igroup in node.oofem_groups:
if(len(properties)>0 and properties[-1]!=" "):#insert white space if necessary
properties+=" "
properties+=igroup.oofem_properties
outputLine+=properties
# write nodal record
of.write(('%s\n') % (outputLine))
for elem in elemNotBoundary:
str = ' '.join(elem.oofem_outputData)
#Add the list of boundaryLoads if it exists
if(elem.oofem_bLoads):
str+=" BoundaryLoads %d " % len(elem.oofem_bLoads)
str+= ' '.join(["%d" % el for el in elem.oofem_bLoads])
of.write('%s\n' % str)
# write final sections
sl=CTRL.footer.splitlines()
for s in sl:
words=s.split()
#if len(words)==0:#skip empty lines
#continue
if (words[0].lower()=='set'):
setID=int(words[1])
if (words[2].lower()=='nodes'):
nodelist=[];
for nodeset in FEM.nodesets:
for oofemset in nodeset.oofem_sets:
if (setID==oofemset):
nodelist.extend(nodeset.items)
setElements=list(set(nodelist))
elif (words[2].lower()=='elements'):
ellist=[]
for elemset in FEM.elemsets:
#print elemset.id
if setID == elemset.id:
ellist.extend(elemset.items)
for oofemset in elemset.oofem_sets:
if (setID==oofemset):
ellist.extend(elemset.items)
setElements=list(set(ellist))
elif (words[2].lower()=='elementboundaries'):
setElements=[]
for thisSet in boundarySets:
if (thisSet[0]==int(words[1])):
setElements.extend([thisSet[1], thisSet[2]])
of.write('%s %s %s %u ' % ( words[0], words[1], words[2], len(setElements)) )
for setElement in setElements:
of.write('%u ' % setElement)
of.write('\n')
else:
of.write('%s\n' % s)
of.close()
#
t2 = time.time()
#
print "done ( %d nodes %d elements)" % (FEM.nnodes, len(elemNotBoundary))
print "Finished in %0.2f [s]" % ((t2-t1))
else:
print(helpmsg)
| lgpl-2.1 | 871,635,011,210,214,900 | 46.095941 | 227 | 0.517433 | false |
lashwang/pyspider | pyspider/database/sqlalchemy/taskdb.py | 4 | 6205 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2014-12-04 22:33:43
import re
import six
import time
import json
import sqlalchemy.exc
from sqlalchemy import (create_engine, MetaData, Table, Column, Index,
Integer, String, Float, LargeBinary, func)
from sqlalchemy.engine.url import make_url
from pyspider.libs import utils
from pyspider.database.base.taskdb import TaskDB as BaseTaskDB
from .sqlalchemybase import SplitTableMixin, result2dict
class TaskDB(SplitTableMixin, BaseTaskDB):
__tablename__ = ''
def __init__(self, url):
self.table = Table('__tablename__', MetaData(),
Column('taskid', String(64), primary_key=True, nullable=False),
Column('project', String(64)),
Column('url', String(1024)),
Column('status', Integer),
Column('schedule', LargeBinary),
Column('fetch', LargeBinary),
Column('process', LargeBinary),
Column('track', LargeBinary),
Column('lastcrawltime', Float(32)),
Column('updatetime', Float(32)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
self.url = make_url(url)
if self.url.database:
database = self.url.database
self.url.database = None
try:
engine = create_engine(self.url, convert_unicode=True, pool_recycle=3600)
conn = engine.connect()
conn.execute("commit")
conn.execute("CREATE DATABASE %s" % database)
except sqlalchemy.exc.SQLAlchemyError:
pass
self.url.database = database
self.engine = create_engine(url, convert_unicode=True, pool_recycle=3600)
self._list_project()
def _create_project(self, project):
assert re.match(r'^\w+$', project) is not None
if project in self.projects:
return
self.table.name = self._tablename(project)
Index('status_%s_index' % self.table.name, self.table.c.status)
self.table.create(self.engine, checkfirst=True)
self.table.indexes.clear()
@staticmethod
def _parse(data):
for key, value in list(six.iteritems(data)):
if isinstance(value, six.binary_type):
data[key] = utils.text(value)
for each in ('schedule', 'fetch', 'process', 'track'):
if each in data:
if data[each]:
if isinstance(data[each], bytearray):
data[each] = str(data[each])
data[each] = json.loads(data[each])
else:
data[each] = {}
return data
@staticmethod
def _stringify(data):
for each in ('schedule', 'fetch', 'process', 'track'):
if each in data:
data[each] = utils.utf8(json.dumps(data[each]))
return data
def load_tasks(self, status, project=None, fields=None):
if project and project not in self.projects:
return
if project:
projects = [project, ]
else:
projects = self.projects
columns = [getattr(self.table.c, f, f) for f in fields] if fields else self.table.c
for project in projects:
self.table.name = self._tablename(project)
for task in self.engine.execute(self.table.select()
.with_only_columns(columns)
.where(self.table.c.status == status)):
yield self._parse(result2dict(columns, task))
def get_task(self, project, taskid, fields=None):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return None
self.table.name = self._tablename(project)
columns = [getattr(self.table.c, f, f) for f in fields] if fields else self.table.c
for each in self.engine.execute(self.table.select()
.with_only_columns(columns)
.limit(1)
.where(self.table.c.taskid == taskid)):
return self._parse(result2dict(columns, each))
def status_count(self, project):
result = dict()
if project not in self.projects:
self._list_project()
if project not in self.projects:
return result
self.table.name = self._tablename(project)
for status, count in self.engine.execute(
self.table.select()
.with_only_columns((self.table.c.status, func.count(1)))
.group_by(self.table.c.status)):
result[status] = count
return result
def insert(self, project, taskid, obj={}):
if project not in self.projects:
self._list_project()
if project not in self.projects:
self._create_project(project)
self._list_project()
obj = dict(obj)
obj['taskid'] = taskid
obj['project'] = project
obj['updatetime'] = time.time()
self.table.name = self._tablename(project)
return self.engine.execute(self.table.insert()
.values(**self._stringify(obj)))
def update(self, project, taskid, obj={}, **kwargs):
if project not in self.projects:
self._list_project()
if project not in self.projects:
raise LookupError
self.table.name = self._tablename(project)
obj = dict(obj)
obj.update(kwargs)
obj['updatetime'] = time.time()
return self.engine.execute(self.table.update()
.where(self.table.c.taskid == taskid)
.values(**self._stringify(obj)))
| apache-2.0 | -1,496,998,859,426,300,700 | 38.025157 | 91 | 0.534085 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.