repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
aliyun/aliyun-oss-python-sdk | tests/test_bucket_worm.py | 1 | 2199 | from .common import *
class TestBucketWorm(OssTestCase):
def test_bucke_worm_normal(self):
init_result = self.bucket.init_bucket_worm(1)
worm_id = init_result.worm_id
self.assertIsNotNone(init_result.request_id)
get_result = self.bucket.get_bucket_worm()
self.assertIsNotNone(get_result.request_id)
self.assertEqual(worm_id, get_result.worm_id)
self.assertEqual('InProgress', get_result.state)
self.assertEqual(1, get_result.retention_period_days)
self.assertIsNotNone(get_result.creation_date)
complete_reuslt = self.bucket.complete_bucket_worm(worm_id)
self.assertIsNotNone(complete_reuslt.request_id)
get_result = self.bucket.get_bucket_worm()
self.assertEqual(worm_id, get_result.worm_id)
self.assertEqual('Locked', get_result.state)
self.assertEqual(1, get_result.retention_period_days)
self.assertIsNotNone(get_result.creation_date)
extend_result = self.bucket.extend_bucket_worm(worm_id, 2)
self.assertIsNotNone(extend_result.request_id)
get_result = self.bucket.get_bucket_worm()
self.assertEqual(worm_id, get_result.worm_id)
self.assertEqual('Locked', get_result.state)
self.assertEqual(2, get_result.retention_period_days)
self.assertIsNotNone(get_result.creation_date)
def test_abort_bucket_worm(self):
self.bucket.init_bucket_worm(1)
abort_result = self.bucket.abort_bucket_worm()
self.assertIsNotNone(abort_result.request_id)
init_result = self.bucket.init_bucket_worm(1)
worm_id = init_result.worm_id
self.bucket.complete_bucket_worm(worm_id)
self.assertRaises(oss2.exceptions.WORMConfigurationLocked, self.bucket.abort_bucket_worm)
def test_bucket_worm_illegal(self):
self.assertRaises(oss2.exceptions.NoSuchWORMConfiguration, self.bucket.get_bucket_worm)
init_result = self.bucket.init_bucket_worm(1)
worm_id = init_result.worm_id
self.assertRaises(oss2.exceptions.InvalidWORMConfiguration, self.bucket.extend_bucket_worm, worm_id, 2)
if __name__ == '__main__':
unittest.main()
| mit | -6,984,264,657,638,178,000 | 39.722222 | 111 | 0.686221 | false |
9seconds/streams | streams/executors/executors.py | 1 | 1711 | # -*- coding: utf-8 -*-
"""
This module has implementation of executors wrapped by
:py:class:`streams.executors.mixins.PoolOfPoolsMixin` and applicable to work
with :py:class:`streams.poolofpools.PoolOfPools`.
Basically all of them are thin extensions of classes from
:py:mod:`concurrent.futures`.
"""
###############################################################################
from concurrent.futures import Executor, Future, \
ThreadPoolExecutor as BaseThreadPoolExecutor, \
ProcessPoolExecutor as BaseProcessPoolExecutor
from .mixins import PoolOfPoolsMixin
###############################################################################
class SequentalExecutor(PoolOfPoolsMixin, Executor):
"""
Debug executor. No concurrency, it just yields elements one by one.
"""
# noinspection PyUnusedLocal
def __init__(self, *args, **kwargs):
super(SequentalExecutor, self).__init__()
self._max_workers = 1
def submit(self, fn, *args, **kwargs):
future = Future()
try:
result = fn(*args, **kwargs)
except Exception as exc:
future.set_exception(exc)
else:
future.set_result(result)
return future
class ThreadPoolExecutor(PoolOfPoolsMixin, BaseThreadPoolExecutor):
"""
Implementation of :py:class:`concurrent.futures.ThreadPoolExecutor`
applicable to work with :py:class:`streams.poolofpools.PoolOfPools`.
"""
pass
class ProcessPoolExecutor(PoolOfPoolsMixin, BaseProcessPoolExecutor):
"""
Implementation of :py:class:`concurrent.futures.ProcessPoolExecutor`
applicable to work with :py:class:`streams.poolofpools.PoolOfPools`.
"""
pass
| mit | -2,665,244,102,855,407,600 | 28 | 79 | 0.630041 | false |
docmeth02/CouchPotatoServer | libs/rtorrent/lib/xmlrpc/basic_auth.py | 83 | 3291 | #
# Copyright (c) 2013 Dean Gardiner, <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from base64 import b64encode
import httplib
import xmlrpclib
class BasicAuthTransport(xmlrpclib.Transport):
def __init__(self, secure=False, username=None, password=None):
xmlrpclib.Transport.__init__(self)
self.secure = secure
self.username = username
self.password = password
def send_auth(self, h):
if not self.username or not self.password:
return
auth = b64encode("%s:%s" % (self.username, self.password))
h.putheader('Authorization', "Basic %s" % auth)
def make_connection(self, host):
if self._connection and host == self._connection[0]:
return self._connection[1]
chost, self._extra_headers, x509 = self.get_host_info(host)
if self.secure:
try:
self._connection = host, httplib.HTTPSConnection(chost, None, **(x509 or {}))
except AttributeError:
raise NotImplementedError(
"your version of httplib doesn't support HTTPS"
)
else:
self._connection = host, httplib.HTTPConnection(chost)
return self._connection[1]
def single_request(self, host, handler, request_body, verbose=0):
# issue XML-RPC request
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
try:
self.send_request(h, handler, request_body)
self.send_host(h, host)
self.send_user_agent(h)
self.send_auth(h)
self.send_content(h, request_body)
response = h.getresponse(buffering=True)
if response.status == 200:
self.verbose = verbose
return self.parse_response(response)
except xmlrpclib.Fault:
raise
except Exception:
self.close()
raise
#discard any response data and raise exception
if response.getheader("content-length", 0):
response.read()
raise xmlrpclib.ProtocolError(
host + handler,
response.status, response.reason,
response.msg,
)
| gpl-3.0 | -2,555,419,410,897,884,000 | 33.642105 | 93 | 0.64175 | false |
mhugent/Quantum-GIS | python/plugins/processing/algs/qgis/Polygonize.py | 2 | 4932 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Polygonize.py
---------------------
Date : March 2013
Copyright : (C) 2013 by Piotr Pociask
Email : ppociask at o2 dot pl
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Piotr Pociask'
__date__ = 'March 2013'
__copyright__ = '(C) 2013, Piotr Pociask'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import QVariant
from qgis.core import *
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import \
GeoAlgorithmExecutionException
from processing.parameters.ParameterVector import ParameterVector
from processing.parameters.ParameterBoolean import ParameterBoolean
from processing.outputs.OutputVector import OutputVector
from processing.tools import dataobjects, vector
class Polygonize(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
FIELDS = 'FIELDS'
GEOMETRY = 'GEOMETRY'
def processAlgorithm(self, progress):
try:
from shapely.ops import polygonize
from shapely.geometry import Point, MultiLineString
except ImportError:
raise GeoAlgorithmExecutionException(
'Polygonize algorithm requires shapely module!')
vlayer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
output = self.getOutputFromName(self.OUTPUT)
vprovider = vlayer.dataProvider()
if self.getParameterValue(self.FIELDS):
fields = vprovider.fields()
else:
fields = QgsFields()
if self.getParameterValue(self.GEOMETRY):
fieldsCount = fields.count()
fields.append(QgsField('area', QVariant.Double, 'double', 16, 2))
fields.append(QgsField('perimeter', QVariant.Double,
'double', 16, 2))
allLinesList = []
features = vector.features(vlayer)
current = 0
progress.setInfo('Processing lines...')
total = 40.0 / float(len(features))
for inFeat in features:
inGeom = inFeat.geometry()
if inGeom.isMultipart():
allLinesList.extend(inGeom.asMultiPolyline())
else:
allLinesList.append(inGeom.asPolyline())
current += 1
progress.setPercentage(int(current * total))
progress.setPercentage(40)
allLines = MultiLineString(allLinesList)
progress.setInfo('Noding lines...')
try:
from shapely.ops import unary_union
allLines = unary_union(allLines)
except ImportError:
allLines = allLines.union(Point(0, 0))
progress.setPercentage(45)
progress.setInfo('Polygonizing...')
polygons = list(polygonize([allLines]))
if not polygons:
raise GeoAlgorithmExecutionException('No polygons were created!')
progress.setPercentage(50)
progress.setInfo('Saving polygons...')
writer = output.getVectorWriter(fields, QGis.WKBPolygon, vlayer.crs())
outFeat = QgsFeature()
current = 0
total = 50.0 / float(len(polygons))
for polygon in polygons:
outFeat.setGeometry(QgsGeometry.fromWkt(polygon.wkt))
if self.getParameterValue(self.GEOMETRY):
outFeat.setAttributes([None] * fieldsCount + [polygon.area,
polygon.length])
writer.addFeature(outFeat)
current += 1
progress.setPercentage(50 + int(current * total))
progress.setInfo('Finished')
del writer
def defineCharacteristics(self):
self.name = 'Polygonize'
self.group = 'Vector geometry tools'
self.addParameter(ParameterVector(self.INPUT, 'Input layer',
[ParameterVector.VECTOR_TYPE_LINE]))
self.addParameter(ParameterBoolean(self.FIELDS,
'Keep table structure of line layer', False))
self.addParameter(ParameterBoolean(self.GEOMETRY,
'Create geometry columns', True))
self.addOutput(OutputVector(self.OUTPUT, 'Output layer'))
| gpl-2.0 | 1,521,028,042,596,485,000 | 40.79661 | 78 | 0.569343 | false |
ramanajee/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/profiler_unittest.py | 124 | 5111 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.platforminfo_mock import MockPlatformInfo
from webkitpy.common.system.systemhost_mock import MockSystemHost
from .profiler import ProfilerFactory, GooglePProf
class ProfilerFactoryTest(unittest.TestCase):
def _assert_default_profiler_name(self, os_name, expected_profiler_name):
profiler_name = ProfilerFactory.default_profiler_name(MockPlatformInfo(os_name))
self.assertEqual(profiler_name, expected_profiler_name)
def test_default_profilers(self):
self._assert_default_profiler_name('mac', 'iprofiler')
self._assert_default_profiler_name('linux', 'perf')
self._assert_default_profiler_name('win32', None)
self._assert_default_profiler_name('freebsd', None)
def test_default_profiler_output(self):
host = MockSystemHost()
self.assertFalse(host.filesystem.exists("/tmp/output"))
# Default mocks are Mac, so iprofile should be default.
profiler = ProfilerFactory.create_profiler(host, '/bin/executable', '/tmp/output')
self.assertTrue(host.filesystem.exists("/tmp/output"))
self.assertEqual(profiler._output_path, "/tmp/output/test.dtps")
# Linux defaults to perf.
host.platform.os_name = 'linux'
profiler = ProfilerFactory.create_profiler(host, '/bin/executable', '/tmp/output')
self.assertEqual(profiler._output_path, "/tmp/output/test.data")
class GooglePProfTest(unittest.TestCase):
def test_pprof_output_regexp(self):
pprof_output = """
sometimes
there
is
junk before the total line
Total: 3770 samples
76 2.0% 2.0% 104 2.8% lookup (inline)
60 1.6% 3.6% 60 1.6% FL_SetPrevious (inline)
56 1.5% 5.1% 56 1.5% MaskPtr (inline)
51 1.4% 6.4% 222 5.9% WebCore::HTMLTokenizer::nextToken
42 1.1% 7.6% 47 1.2% WTF::Vector::shrinkCapacity
35 0.9% 8.5% 35 0.9% WTF::RefPtr::get (inline)
33 0.9% 9.4% 43 1.1% append (inline)
29 0.8% 10.1% 67 1.8% WTF::StringImpl::deref (inline)
29 0.8% 10.9% 100 2.7% add (inline)
28 0.7% 11.6% 28 0.7% WebCore::QualifiedName::localName (inline)
25 0.7% 12.3% 27 0.7% WebCore::Private::addChildNodesToDeletionQueue
24 0.6% 12.9% 24 0.6% __memcpy_ssse3_back
23 0.6% 13.6% 23 0.6% intHash (inline)
23 0.6% 14.2% 76 2.0% tcmalloc::FL_Next
23 0.6% 14.8% 95 2.5% tcmalloc::FL_Push
22 0.6% 15.4% 22 0.6% WebCore::MarkupTokenizerBase::InputStreamPreprocessor::peek (inline)
"""
expected_first_ten_lines = """ 76 2.0% 2.0% 104 2.8% lookup (inline)
60 1.6% 3.6% 60 1.6% FL_SetPrevious (inline)
56 1.5% 5.1% 56 1.5% MaskPtr (inline)
51 1.4% 6.4% 222 5.9% WebCore::HTMLTokenizer::nextToken
42 1.1% 7.6% 47 1.2% WTF::Vector::shrinkCapacity
35 0.9% 8.5% 35 0.9% WTF::RefPtr::get (inline)
33 0.9% 9.4% 43 1.1% append (inline)
29 0.8% 10.1% 67 1.8% WTF::StringImpl::deref (inline)
29 0.8% 10.9% 100 2.7% add (inline)
28 0.7% 11.6% 28 0.7% WebCore::QualifiedName::localName (inline)
"""
host = MockSystemHost()
profiler = GooglePProf(host, '/bin/executable', '/tmp/output')
self.assertEqual(profiler._first_ten_lines_of_profile(pprof_output), expected_first_ten_lines)
| bsd-3-clause | -6,338,322,550,823,810,000 | 48.621359 | 107 | 0.654275 | false |
wiltonlazary/arangodb | 3rdParty/V8/V8-5.0.71.39/tools/swarming_client/swarming.py | 4 | 49821 | #!/usr/bin/env python
# Copyright 2013 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
"""Client tool to trigger tasks or retrieve results from a Swarming server."""
__version__ = '0.8.4'
import collections
import datetime
import json
import logging
import optparse
import os
import subprocess
import sys
import tempfile
import threading
import time
import urllib
from third_party import colorama
from third_party.depot_tools import fix_encoding
from third_party.depot_tools import subcommand
from utils import file_path
from utils import fs
from utils import logging_utils
from third_party.chromium import natsort
from utils import net
from utils import on_error
from utils import threading_utils
from utils import tools
import auth
import isolated_format
import isolateserver
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
class Failure(Exception):
"""Generic failure."""
pass
### Isolated file handling.
def isolated_to_hash(arg, algo):
"""Archives a .isolated file if needed.
Returns the file hash to trigger and a bool specifying if it was a file (True)
or a hash (False).
"""
if arg.endswith('.isolated'):
arg = unicode(os.path.abspath(arg))
file_hash = isolated_format.hash_file(arg, algo)
if not file_hash:
on_error.report('Archival failure %s' % arg)
return None, True
return file_hash, True
elif isolated_format.is_valid_hash(arg, algo):
return arg, False
else:
on_error.report('Invalid hash %s' % arg)
return None, False
def isolated_handle_options(options, args):
"""Handles '--isolated <isolated>', '<isolated>' and '-- <args...>' arguments.
Returns:
tuple(command, inputs_ref).
"""
isolated_cmd_args = []
is_file = False
if not options.isolated:
if '--' in args:
index = args.index('--')
isolated_cmd_args = args[index+1:]
args = args[:index]
else:
# optparse eats '--' sometimes.
isolated_cmd_args = args[1:]
args = args[:1]
if len(args) != 1:
raise ValueError(
'Use --isolated, --raw-cmd or \'--\' to pass arguments to the called '
'process.')
# Old code. To be removed eventually.
options.isolated, is_file = isolated_to_hash(
args[0], isolated_format.get_hash_algo(options.namespace))
if not options.isolated:
raise ValueError('Invalid argument %s' % args[0])
elif args:
if '--' in args:
index = args.index('--')
isolated_cmd_args = args[index+1:]
if index != 0:
raise ValueError('Unexpected arguments.')
else:
# optparse eats '--' sometimes.
isolated_cmd_args = args
# If a file name was passed, use its base name of the isolated hash.
# Otherwise, use user name as an approximation of a task name.
if not options.task_name:
if is_file:
key = os.path.splitext(os.path.basename(args[0]))[0]
else:
key = options.user
options.task_name = u'%s/%s/%s' % (
key,
'_'.join(
'%s=%s' % (k, v)
for k, v in sorted(options.dimensions.iteritems())),
options.isolated)
inputs_ref = FilesRef(
isolated=options.isolated,
isolatedserver=options.isolate_server,
namespace=options.namespace)
return isolated_cmd_args, inputs_ref
### Triggering.
# See ../appengine/swarming/swarming_rpcs.py.
FilesRef = collections.namedtuple(
'FilesRef',
[
'isolated',
'isolatedserver',
'namespace',
])
# See ../appengine/swarming/swarming_rpcs.py.
TaskProperties = collections.namedtuple(
'TaskProperties',
[
'command',
'dimensions',
'env',
'execution_timeout_secs',
'extra_args',
'grace_period_secs',
'idempotent',
'inputs_ref',
'io_timeout_secs',
])
# See ../appengine/swarming/swarming_rpcs.py.
NewTaskRequest = collections.namedtuple(
'NewTaskRequest',
[
'expiration_secs',
'name',
'parent_task_id',
'priority',
'properties',
'tags',
'user',
])
def namedtuple_to_dict(value):
"""Recursively converts a namedtuple to a dict."""
out = dict(value._asdict())
for k, v in out.iteritems():
if hasattr(v, '_asdict'):
out[k] = namedtuple_to_dict(v)
return out
def task_request_to_raw_request(task_request):
"""Returns the json dict expected by the Swarming server for new request.
This is for the v1 client Swarming API.
"""
out = namedtuple_to_dict(task_request)
# Maps are not supported until protobuf v3.
out['properties']['dimensions'] = [
{'key': k, 'value': v}
for k, v in out['properties']['dimensions'].iteritems()
]
out['properties']['dimensions'].sort(key=lambda x: x['key'])
out['properties']['env'] = [
{'key': k, 'value': v}
for k, v in out['properties']['env'].iteritems()
]
out['properties']['env'].sort(key=lambda x: x['key'])
return out
def swarming_trigger(swarming, raw_request):
"""Triggers a request on the Swarming server and returns the json data.
It's the low-level function.
Returns:
{
'request': {
'created_ts': u'2010-01-02 03:04:05',
'name': ..
},
'task_id': '12300',
}
"""
logging.info('Triggering: %s', raw_request['name'])
result = net.url_read_json(
swarming + '/_ah/api/swarming/v1/tasks/new', data=raw_request)
if not result:
on_error.report('Failed to trigger task %s' % raw_request['name'])
return None
if result.get('error'):
# The reply is an error.
msg = 'Failed to trigger task %s' % raw_request['name']
if result['error'].get('errors'):
for err in result['error']['errors']:
if err.get('message'):
msg += '\nMessage: %s' % err['message']
if err.get('debugInfo'):
msg += '\nDebug info:\n%s' % err['debugInfo']
elif result['error'].get('message'):
msg += '\nMessage: %s' % result['error']['message']
on_error.report(msg)
return None
return result
def setup_googletest(env, shards, index):
"""Sets googletest specific environment variables."""
if shards > 1:
assert not any(i['key'] == 'GTEST_SHARD_INDEX' for i in env), env
assert not any(i['key'] == 'GTEST_TOTAL_SHARDS' for i in env), env
env = env[:]
env.append({'key': 'GTEST_SHARD_INDEX', 'value': str(index)})
env.append({'key': 'GTEST_TOTAL_SHARDS', 'value': str(shards)})
return env
def trigger_task_shards(swarming, task_request, shards):
"""Triggers one or many subtasks of a sharded task.
Returns:
Dict with task details, returned to caller as part of --dump-json output.
None in case of failure.
"""
def convert(index):
req = task_request_to_raw_request(task_request)
if shards > 1:
req['properties']['env'] = setup_googletest(
req['properties']['env'], shards, index)
req['name'] += ':%s:%s' % (index, shards)
return req
requests = [convert(index) for index in xrange(shards)]
tasks = {}
priority_warning = False
for index, request in enumerate(requests):
task = swarming_trigger(swarming, request)
if not task:
break
logging.info('Request result: %s', task)
if (not priority_warning and
task['request']['priority'] != task_request.priority):
priority_warning = True
print >> sys.stderr, (
'Priority was reset to %s' % task['request']['priority'])
tasks[request['name']] = {
'shard_index': index,
'task_id': task['task_id'],
'view_url': '%s/user/task/%s' % (swarming, task['task_id']),
}
# Some shards weren't triggered. Abort everything.
if len(tasks) != len(requests):
if tasks:
print >> sys.stderr, 'Only %d shard(s) out of %d were triggered' % (
len(tasks), len(requests))
for task_dict in tasks.itervalues():
abort_task(swarming, task_dict['task_id'])
return None
return tasks
### Collection.
# How often to print status updates to stdout in 'collect'.
STATUS_UPDATE_INTERVAL = 15 * 60.
class State(object):
"""States in which a task can be.
WARNING: Copy-pasted from appengine/swarming/server/task_result.py. These
values are part of the API so if they change, the API changed.
It's in fact an enum. Values should be in decreasing order of importance.
"""
RUNNING = 0x10
PENDING = 0x20
EXPIRED = 0x30
TIMED_OUT = 0x40
BOT_DIED = 0x50
CANCELED = 0x60
COMPLETED = 0x70
STATES = (
'RUNNING', 'PENDING', 'EXPIRED', 'TIMED_OUT', 'BOT_DIED', 'CANCELED',
'COMPLETED')
STATES_RUNNING = ('RUNNING', 'PENDING')
STATES_NOT_RUNNING = (
'EXPIRED', 'TIMED_OUT', 'BOT_DIED', 'CANCELED', 'COMPLETED')
STATES_DONE = ('TIMED_OUT', 'COMPLETED')
STATES_ABANDONED = ('EXPIRED', 'BOT_DIED', 'CANCELED')
_NAMES = {
RUNNING: 'Running',
PENDING: 'Pending',
EXPIRED: 'Expired',
TIMED_OUT: 'Execution timed out',
BOT_DIED: 'Bot died',
CANCELED: 'User canceled',
COMPLETED: 'Completed',
}
_ENUMS = {
'RUNNING': RUNNING,
'PENDING': PENDING,
'EXPIRED': EXPIRED,
'TIMED_OUT': TIMED_OUT,
'BOT_DIED': BOT_DIED,
'CANCELED': CANCELED,
'COMPLETED': COMPLETED,
}
@classmethod
def to_string(cls, state):
"""Returns a user-readable string representing a State."""
if state not in cls._NAMES:
raise ValueError('Invalid state %s' % state)
return cls._NAMES[state]
@classmethod
def from_enum(cls, state):
"""Returns int value based on the string."""
if state not in cls._ENUMS:
raise ValueError('Invalid state %s' % state)
return cls._ENUMS[state]
class TaskOutputCollector(object):
"""Assembles task execution summary (for --task-summary-json output).
Optionally fetches task outputs from isolate server to local disk (used when
--task-output-dir is passed).
This object is shared among multiple threads running 'retrieve_results'
function, in particular they call 'process_shard_result' method in parallel.
"""
def __init__(self, task_output_dir, shard_count):
"""Initializes TaskOutputCollector, ensures |task_output_dir| exists.
Args:
task_output_dir: (optional) local directory to put fetched files to.
shard_count: expected number of task shards.
"""
self.task_output_dir = (
unicode(os.path.abspath(task_output_dir))
if task_output_dir else task_output_dir)
self.shard_count = shard_count
self._lock = threading.Lock()
self._per_shard_results = {}
self._storage = None
if self.task_output_dir and not fs.isdir(self.task_output_dir):
fs.makedirs(self.task_output_dir)
def process_shard_result(self, shard_index, result):
"""Stores results of a single task shard, fetches output files if necessary.
Modifies |result| in place.
shard_index is 0-based.
Called concurrently from multiple threads.
"""
# Sanity check index is in expected range.
assert isinstance(shard_index, int)
if shard_index < 0 or shard_index >= self.shard_count:
logging.warning(
'Shard index %d is outside of expected range: [0; %d]',
shard_index, self.shard_count - 1)
return
if result.get('outputs_ref'):
ref = result['outputs_ref']
result['outputs_ref']['view_url'] = '%s/browse?%s' % (
ref['isolatedserver'],
urllib.urlencode(
[('namespace', ref['namespace']), ('hash', ref['isolated'])]))
# Store result dict of that shard, ignore results we've already seen.
with self._lock:
if shard_index in self._per_shard_results:
logging.warning('Ignoring duplicate shard index %d', shard_index)
return
self._per_shard_results[shard_index] = result
# Fetch output files if necessary.
if self.task_output_dir and result.get('outputs_ref'):
storage = self._get_storage(
result['outputs_ref']['isolatedserver'],
result['outputs_ref']['namespace'])
if storage:
# Output files are supposed to be small and they are not reused across
# tasks. So use MemoryCache for them instead of on-disk cache. Make
# files writable, so that calling script can delete them.
isolateserver.fetch_isolated(
result['outputs_ref']['isolated'],
storage,
isolateserver.MemoryCache(file_mode_mask=0700),
os.path.join(self.task_output_dir, str(shard_index)),
False)
def finalize(self):
"""Assembles and returns task summary JSON, shutdowns underlying Storage."""
with self._lock:
# Write an array of shard results with None for missing shards.
summary = {
'shards': [
self._per_shard_results.get(i) for i in xrange(self.shard_count)
],
}
# Write summary.json to task_output_dir as well.
if self.task_output_dir:
tools.write_json(
os.path.join(self.task_output_dir, u'summary.json'),
summary,
False)
if self._storage:
self._storage.close()
self._storage = None
return summary
def _get_storage(self, isolate_server, namespace):
"""Returns isolateserver.Storage to use to fetch files."""
assert self.task_output_dir
with self._lock:
if not self._storage:
self._storage = isolateserver.get_storage(isolate_server, namespace)
else:
# Shards must all use exact same isolate server and namespace.
if self._storage.location != isolate_server:
logging.error(
'Task shards are using multiple isolate servers: %s and %s',
self._storage.location, isolate_server)
return None
if self._storage.namespace != namespace:
logging.error(
'Task shards are using multiple namespaces: %s and %s',
self._storage.namespace, namespace)
return None
return self._storage
def now():
"""Exists so it can be mocked easily."""
return time.time()
def parse_time(value):
"""Converts serialized time from the API to datetime.datetime."""
# When microseconds are 0, the '.123456' suffix is elided. This means the
# serialized format is not consistent, which confuses the hell out of python.
for fmt in ('%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S'):
try:
return datetime.datetime.strptime(value, fmt)
except ValueError:
pass
raise ValueError('Failed to parse %s' % value)
def retrieve_results(
base_url, shard_index, task_id, timeout, should_stop, output_collector):
"""Retrieves results for a single task ID.
Returns:
<result dict> on success.
None on failure.
"""
assert isinstance(timeout, float), timeout
result_url = '%s/_ah/api/swarming/v1/task/%s/result' % (base_url, task_id)
output_url = '%s/_ah/api/swarming/v1/task/%s/stdout' % (base_url, task_id)
started = now()
deadline = started + timeout if timeout else None
attempt = 0
while not should_stop.is_set():
attempt += 1
# Waiting for too long -> give up.
current_time = now()
if deadline and current_time >= deadline:
logging.error('retrieve_results(%s) timed out on attempt %d',
base_url, attempt)
return None
# Do not spin too fast. Spin faster at the beginning though.
# Start with 1 sec delay and for each 30 sec of waiting add another second
# of delay, until hitting 15 sec ceiling.
if attempt > 1:
max_delay = min(15, 1 + (current_time - started) / 30.0)
delay = min(max_delay, deadline - current_time) if deadline else max_delay
if delay > 0:
logging.debug('Waiting %.1f sec before retrying', delay)
should_stop.wait(delay)
if should_stop.is_set():
return None
# Disable internal retries in net.url_read_json, since we are doing retries
# ourselves.
# TODO(maruel): We'd need to know if it's a 404 and not retry at all.
# TODO(maruel): Sadly, we currently have to poll here. Use hanging HTTP
# request on GAE v2.
result = net.url_read_json(result_url, retry_50x=False)
if not result:
continue
if result.get('error'):
# An error occurred.
if result['error'].get('errors'):
for err in result['error']['errors']:
logging.warning(
'Error while reading task: %s; %s',
err.get('message'), err.get('debugInfo'))
elif result['error'].get('message'):
logging.warning(
'Error while reading task: %s', result['error']['message'])
continue
if result['state'] in State.STATES_NOT_RUNNING:
# TODO(maruel): Not always fetch stdout?
out = net.url_read_json(output_url)
result['output'] = out.get('output') if out else out
# Record the result, try to fetch attached output files (if any).
if output_collector:
# TODO(vadimsh): Respect |should_stop| and |deadline| when fetching.
output_collector.process_shard_result(shard_index, result)
if result.get('internal_failure'):
logging.error('Internal error!')
elif result['state'] == 'BOT_DIED':
logging.error('Bot died!')
return result
def convert_to_old_format(result):
"""Converts the task result data from Endpoints API format to old API format
for compatibility.
This goes into the file generated as --task-summary-json.
"""
# Sets default.
result.setdefault('abandoned_ts', None)
result.setdefault('bot_id', None)
result.setdefault('bot_version', None)
result.setdefault('children_task_ids', [])
result.setdefault('completed_ts', None)
result.setdefault('cost_saved_usd', None)
result.setdefault('costs_usd', None)
result.setdefault('deduped_from', None)
result.setdefault('name', None)
result.setdefault('outputs_ref', None)
result.setdefault('properties_hash', None)
result.setdefault('server_versions', None)
result.setdefault('started_ts', None)
result.setdefault('tags', None)
result.setdefault('user', None)
# Convertion back to old API.
duration = result.pop('duration', None)
result['durations'] = [duration] if duration else []
exit_code = result.pop('exit_code', None)
result['exit_codes'] = [int(exit_code)] if exit_code else []
result['id'] = result.pop('task_id')
result['isolated_out'] = result.get('outputs_ref', None)
output = result.pop('output', None)
result['outputs'] = [output] if output else []
# properties_hash
# server_version
# Endpoints result 'state' as string. For compatibility with old code, convert
# to int.
result['state'] = State.from_enum(result['state'])
result['try_number'] = (
int(result['try_number']) if result.get('try_number') else None)
if 'bot_dimensions' in result:
result['bot_dimensions'] = {
i['key']: i['value'] for i in result['bot_dimensions']
}
else:
result['bot_dimensions'] = None
def yield_results(
swarm_base_url, task_ids, timeout, max_threads, print_status_updates,
output_collector):
"""Yields swarming task results from the swarming server as (index, result).
Duplicate shards are ignored. Shards are yielded in order of completion.
Timed out shards are NOT yielded at all. Caller can compare number of yielded
shards with len(task_keys) to verify all shards completed.
max_threads is optional and is used to limit the number of parallel fetches
done. Since in general the number of task_keys is in the range <=10, it's not
worth normally to limit the number threads. Mostly used for testing purposes.
output_collector is an optional instance of TaskOutputCollector that will be
used to fetch files produced by a task from isolate server to the local disk.
Yields:
(index, result). In particular, 'result' is defined as the
GetRunnerResults() function in services/swarming/server/test_runner.py.
"""
number_threads = (
min(max_threads, len(task_ids)) if max_threads else len(task_ids))
should_stop = threading.Event()
results_channel = threading_utils.TaskChannel()
with threading_utils.ThreadPool(number_threads, number_threads, 0) as pool:
try:
# Adds a task to the thread pool to call 'retrieve_results' and return
# the results together with shard_index that produced them (as a tuple).
def enqueue_retrieve_results(shard_index, task_id):
task_fn = lambda *args: (shard_index, retrieve_results(*args))
pool.add_task(
0, results_channel.wrap_task(task_fn), swarm_base_url, shard_index,
task_id, timeout, should_stop, output_collector)
# Enqueue 'retrieve_results' calls for each shard key to run in parallel.
for shard_index, task_id in enumerate(task_ids):
enqueue_retrieve_results(shard_index, task_id)
# Wait for all of them to finish.
shards_remaining = range(len(task_ids))
active_task_count = len(task_ids)
while active_task_count:
shard_index, result = None, None
try:
shard_index, result = results_channel.pull(
timeout=STATUS_UPDATE_INTERVAL)
except threading_utils.TaskChannel.Timeout:
if print_status_updates:
print(
'Waiting for results from the following shards: %s' %
', '.join(map(str, shards_remaining)))
sys.stdout.flush()
continue
except Exception:
logging.exception('Unexpected exception in retrieve_results')
# A call to 'retrieve_results' finished (successfully or not).
active_task_count -= 1
if not result:
logging.error('Failed to retrieve the results for a swarming key')
continue
# Yield back results to the caller.
assert shard_index in shards_remaining
shards_remaining.remove(shard_index)
yield shard_index, result
finally:
# Done or aborted with Ctrl+C, kill the remaining threads.
should_stop.set()
def decorate_shard_output(swarming, shard_index, metadata):
"""Returns wrapped output for swarming task shard."""
if metadata.get('started_ts') and not metadata.get('deduped_from'):
pending = '%.1fs' % (
parse_time(metadata['started_ts']) - parse_time(metadata['created_ts'])
).total_seconds()
else:
pending = 'N/A'
if metadata.get('duration') is not None:
duration = '%.1fs' % metadata['duration']
else:
duration = 'N/A'
if metadata.get('exit_code') is not None:
# Integers are encoded as string to not loose precision.
exit_code = '%s' % metadata['exit_code']
else:
exit_code = 'N/A'
bot_id = metadata.get('bot_id') or 'N/A'
url = '%s/user/task/%s' % (swarming, metadata['task_id'])
tag_header = 'Shard %d %s' % (shard_index, url)
tag_footer = (
'End of shard %d Pending: %s Duration: %s Bot: %s Exit: %s' % (
shard_index, pending, duration, bot_id, exit_code))
tag_len = max(len(tag_header), len(tag_footer))
dash_pad = '+-%s-+\n' % ('-' * tag_len)
tag_header = '| %s |\n' % tag_header.ljust(tag_len)
tag_footer = '| %s |\n' % tag_footer.ljust(tag_len)
header = dash_pad + tag_header + dash_pad
footer = dash_pad + tag_footer + dash_pad[:-1]
output = (metadata.get('output') or '').rstrip() + '\n'
return header + output + footer
def collect(
swarming, task_ids, timeout, decorate, print_status_updates,
task_summary_json, task_output_dir):
"""Retrieves results of a Swarming task.
Returns:
process exit code that should be returned to the user.
"""
# Collect summary JSON and output files (if task_output_dir is not None).
output_collector = TaskOutputCollector(task_output_dir, len(task_ids))
seen_shards = set()
exit_code = None
total_duration = 0
try:
for index, metadata in yield_results(
swarming, task_ids, timeout, None, print_status_updates,
output_collector):
seen_shards.add(index)
# Default to failure if there was no process that even started.
shard_exit_code = metadata.get('exit_code')
if shard_exit_code:
# It's encoded as a string, so bool('0') is True.
shard_exit_code = int(shard_exit_code)
if shard_exit_code or exit_code is None:
exit_code = shard_exit_code
total_duration += metadata.get('duration', 0)
if decorate:
print(decorate_shard_output(swarming, index, metadata))
if len(seen_shards) < len(task_ids):
print('')
else:
print('%s: %s %s' % (
metadata.get('bot_id', 'N/A'),
metadata['task_id'],
shard_exit_code))
if metadata['output']:
output = metadata['output'].rstrip()
if output:
print(''.join(' %s\n' % l for l in output.splitlines()))
finally:
summary = output_collector.finalize()
if task_summary_json:
# TODO(maruel): Make this optional.
for i in summary['shards']:
if i:
convert_to_old_format(i)
tools.write_json(task_summary_json, summary, False)
if decorate and total_duration:
print('Total duration: %.1fs' % total_duration)
if len(seen_shards) != len(task_ids):
missing_shards = [x for x in range(len(task_ids)) if x not in seen_shards]
print >> sys.stderr, ('Results from some shards are missing: %s' %
', '.join(map(str, missing_shards)))
return 1
return exit_code if exit_code is not None else 1
### API management.
class APIError(Exception):
pass
def endpoints_api_discovery_apis(host):
"""Uses Cloud Endpoints' API Discovery Service to returns metadata about all
the APIs exposed by a host.
https://developers.google.com/discovery/v1/reference/apis/list
"""
data = net.url_read_json(host + '/_ah/api/discovery/v1/apis')
if data is None:
raise APIError('Failed to discover APIs on %s' % host)
out = {}
for api in data['items']:
if api['id'] == 'discovery:v1':
continue
# URL is of the following form:
# url = host + (
# '/_ah/api/discovery/v1/apis/%s/%s/rest' % (api['id'], api['version'])
api_data = net.url_read_json(api['discoveryRestUrl'])
if api_data is None:
raise APIError('Failed to discover %s on %s' % (api['id'], host))
out[api['id']] = api_data
return out
### Commands.
def abort_task(_swarming, _manifest):
"""Given a task manifest that was triggered, aborts its execution."""
# TODO(vadimsh): No supported by the server yet.
def add_filter_options(parser):
parser.filter_group = optparse.OptionGroup(parser, 'Filtering slaves')
parser.filter_group.add_option(
'-d', '--dimension', default=[], action='append', nargs=2,
dest='dimensions', metavar='FOO bar',
help='dimension to filter on')
parser.add_option_group(parser.filter_group)
def add_sharding_options(parser):
parser.sharding_group = optparse.OptionGroup(parser, 'Sharding options')
parser.sharding_group.add_option(
'--shards', type='int', default=1,
help='Number of shards to trigger and collect.')
parser.add_option_group(parser.sharding_group)
def add_trigger_options(parser):
"""Adds all options to trigger a task on Swarming."""
isolateserver.add_isolate_server_options(parser)
add_filter_options(parser)
parser.task_group = optparse.OptionGroup(parser, 'Task properties')
parser.task_group.add_option(
'-s', '--isolated',
help='Hash of the .isolated to grab from the isolate server')
parser.task_group.add_option(
'-e', '--env', default=[], action='append', nargs=2, metavar='FOO bar',
help='Environment variables to set')
parser.task_group.add_option(
'--priority', type='int', default=100,
help='The lower value, the more important the task is')
parser.task_group.add_option(
'-T', '--task-name',
help='Display name of the task. Defaults to '
'<base_name>/<dimensions>/<isolated hash>/<timestamp> if an '
'isolated file is provided, if a hash is provided, it defaults to '
'<user>/<dimensions>/<isolated hash>/<timestamp>')
parser.task_group.add_option(
'--tags', action='append', default=[],
help='Tags to assign to the task.')
parser.task_group.add_option(
'--user', default='',
help='User associated with the task. Defaults to authenticated user on '
'the server.')
parser.task_group.add_option(
'--idempotent', action='store_true', default=False,
help='When set, the server will actively try to find a previous task '
'with the same parameter and return this result instead if possible')
parser.task_group.add_option(
'--expiration', type='int', default=6*60*60,
help='Seconds to allow the task to be pending for a bot to run before '
'this task request expires.')
parser.task_group.add_option(
'--deadline', type='int', dest='expiration',
help=optparse.SUPPRESS_HELP)
parser.task_group.add_option(
'--hard-timeout', type='int', default=60*60,
help='Seconds to allow the task to complete.')
parser.task_group.add_option(
'--io-timeout', type='int', default=20*60,
help='Seconds to allow the task to be silent.')
parser.task_group.add_option(
'--raw-cmd', action='store_true', default=False,
help='When set, the command after -- is used as-is without run_isolated. '
'In this case, no .isolated file is expected.')
parser.add_option_group(parser.task_group)
def process_trigger_options(parser, options, args):
"""Processes trigger options and uploads files to isolate server if necessary.
"""
options.dimensions = dict(options.dimensions)
options.env = dict(options.env)
if not options.dimensions:
parser.error('Please at least specify one --dimension')
if options.raw_cmd:
if not args:
parser.error(
'Arguments with --raw-cmd should be passed after -- as command '
'delimiter.')
if options.isolate_server:
parser.error('Can\'t use both --raw-cmd and --isolate-server.')
command = args
if not options.task_name:
options.task_name = u'%s/%s' % (
options.user,
'_'.join(
'%s=%s' % (k, v)
for k, v in sorted(options.dimensions.iteritems())))
inputs_ref = None
else:
isolateserver.process_isolate_server_options(parser, options, False)
try:
command, inputs_ref = isolated_handle_options(options, args)
except ValueError as e:
parser.error(str(e))
# If inputs_ref is used, command is actually extra_args. Otherwise it's an
# actual command to run.
properties = TaskProperties(
command=None if inputs_ref else command,
dimensions=options.dimensions,
env=options.env,
execution_timeout_secs=options.hard_timeout,
extra_args=command if inputs_ref else None,
grace_period_secs=30,
idempotent=options.idempotent,
inputs_ref=inputs_ref,
io_timeout_secs=options.io_timeout)
if not all(len(t.split(':', 1)) == 2 for t in options.tags):
parser.error('--tags must be in the format key:value')
return NewTaskRequest(
expiration_secs=options.expiration,
name=options.task_name,
parent_task_id=os.environ.get('SWARMING_TASK_ID', ''),
priority=options.priority,
properties=properties,
tags=options.tags,
user=options.user)
def add_collect_options(parser):
parser.server_group.add_option(
'-t', '--timeout',
type='float',
default=80*60.,
help='Timeout to wait for result, set to 0 for no timeout; default: '
'%default s')
parser.group_logging.add_option(
'--decorate', action='store_true', help='Decorate output')
parser.group_logging.add_option(
'--print-status-updates', action='store_true',
help='Print periodic status updates')
parser.task_output_group = optparse.OptionGroup(parser, 'Task output')
parser.task_output_group.add_option(
'--task-summary-json',
metavar='FILE',
help='Dump a summary of task results to this file as json. It contains '
'only shards statuses as know to server directly. Any output files '
'emitted by the task can be collected by using --task-output-dir')
parser.task_output_group.add_option(
'--task-output-dir',
metavar='DIR',
help='Directory to put task results into. When the task finishes, this '
'directory contains per-shard directory with output files produced '
'by shards: <task-output-dir>/<zero-based-shard-index>/.')
parser.add_option_group(parser.task_output_group)
@subcommand.usage('bots...')
def CMDbot_delete(parser, args):
"""Forcibly deletes bots from the Swarming server."""
parser.add_option(
'-f', '--force', action='store_true',
help='Do not prompt for confirmation')
options, args = parser.parse_args(args)
if not args:
parser.error('Please specific bots to delete')
bots = sorted(args)
if not options.force:
print('Delete the following bots?')
for bot in bots:
print(' %s' % bot)
if raw_input('Continue? [y/N] ') not in ('y', 'Y'):
print('Goodbye.')
return 1
result = 0
for bot in bots:
url = '%s/_ah/api/swarming/v1/bot/%s/delete' % (options.swarming, bot)
if net.url_read_json(url, data={}, method='POST') is None:
print('Deleting %s failed. Probably already gone' % bot)
result = 1
return result
def CMDbots(parser, args):
"""Returns information about the bots connected to the Swarming server."""
add_filter_options(parser)
parser.filter_group.add_option(
'--dead-only', action='store_true',
help='Only print dead bots, useful to reap them and reimage broken bots')
parser.filter_group.add_option(
'-k', '--keep-dead', action='store_true',
help='Do not filter out dead bots')
parser.filter_group.add_option(
'-b', '--bare', action='store_true',
help='Do not print out dimensions')
options, args = parser.parse_args(args)
if options.keep_dead and options.dead_only:
parser.error('Use only one of --keep-dead and --dead-only')
bots = []
cursor = None
limit = 250
# Iterate via cursors.
base_url = (
options.swarming + '/_ah/api/swarming/v1/bots/list?limit=%d' % limit)
while True:
url = base_url
if cursor:
url += '&cursor=%s' % urllib.quote(cursor)
data = net.url_read_json(url)
if data is None:
print >> sys.stderr, 'Failed to access %s' % options.swarming
return 1
bots.extend(data['items'])
cursor = data.get('cursor')
if not cursor:
break
for bot in natsort.natsorted(bots, key=lambda x: x['bot_id']):
if options.dead_only:
if not bot.get('is_dead'):
continue
elif not options.keep_dead and bot.get('is_dead'):
continue
# If the user requested to filter on dimensions, ensure the bot has all the
# dimensions requested.
dimensions = {i['key']: i['value'] for i in bot['dimensions']}
for key, value in options.dimensions:
if key not in dimensions:
break
# A bot can have multiple value for a key, for example,
# {'os': ['Windows', 'Windows-6.1']}, so that --dimension os=Windows will
# be accepted.
if isinstance(dimensions[key], list):
if value not in dimensions[key]:
break
else:
if value != dimensions[key]:
break
else:
print bot['bot_id']
if not options.bare:
print ' %s' % json.dumps(dimensions, sort_keys=True)
if bot.get('task_id'):
print ' task: %s' % bot['task_id']
return 0
@subcommand.usage('--json file | task_id...')
def CMDcollect(parser, args):
"""Retrieves results of one or multiple Swarming task by its ID.
The result can be in multiple part if the execution was sharded. It can
potentially have retries.
"""
add_collect_options(parser)
parser.add_option(
'-j', '--json',
help='Load the task ids from .json as saved by trigger --dump-json')
options, args = parser.parse_args(args)
if not args and not options.json:
parser.error('Must specify at least one task id or --json.')
if args and options.json:
parser.error('Only use one of task id or --json.')
if options.json:
options.json = unicode(os.path.abspath(options.json))
try:
with fs.open(options.json, 'rb') as f:
tasks = sorted(
json.load(f)['tasks'].itervalues(), key=lambda x: x['shard_index'])
args = [t['task_id'] for t in tasks]
except (KeyError, IOError, TypeError, ValueError):
parser.error('Failed to parse %s' % options.json)
else:
valid = frozenset('0123456789abcdef')
if any(not valid.issuperset(task_id) for task_id in args):
parser.error('Task ids are 0-9a-f.')
try:
return collect(
options.swarming,
args,
options.timeout,
options.decorate,
options.print_status_updates,
options.task_summary_json,
options.task_output_dir)
except Failure:
on_error.report(None)
return 1
@subcommand.usage('[filename]')
def CMDput_bootstrap(parser, args):
"""Uploads a new version of bootstrap.py."""
options, args = parser.parse_args(args)
if len(args) != 1:
parser.error('Must specify file to upload')
url = options.swarming + '/_ah/api/swarming/v1/server/put_bootstrap'
path = unicode(os.path.abspath(args[0]))
with fs.open(path, 'rb') as f:
content = f.read().decode('utf-8')
data = net.url_read_json(url, data={'content': content})
print data
return 0
@subcommand.usage('[filename]')
def CMDput_bot_config(parser, args):
"""Uploads a new version of bot_config.py."""
options, args = parser.parse_args(args)
if len(args) != 1:
parser.error('Must specify file to upload')
url = options.swarming + '/_ah/api/swarming/v1/server/put_bot_config'
path = unicode(os.path.abspath(args[0]))
with fs.open(path, 'rb') as f:
content = f.read().decode('utf-8')
data = net.url_read_json(url, data={'content': content})
print data
return 0
@subcommand.usage('[method name]')
def CMDquery(parser, args):
"""Returns raw JSON information via an URL endpoint. Use 'query-list' to
gather the list of API methods from the server.
Examples:
Listing all bots:
swarming.py query -S server-url.com bots/list
Listing last 10 tasks on a specific bot named 'swarm1':
swarming.py query -S server-url.com --limit 10 bot/swarm1/tasks
Listing last 10 tasks with tags os:Ubuntu-12.04 and pool:Chrome. Note that
quoting is important!:
swarming.py query -S server-url.com --limit 10 \\
'tasks/list?tags=os:Ubuntu-12.04&tags=pool:Chrome'
"""
CHUNK_SIZE = 250
parser.add_option(
'-L', '--limit', type='int', default=200,
help='Limit to enforce on limitless items (like number of tasks); '
'default=%default')
parser.add_option(
'--json', help='Path to JSON output file (otherwise prints to stdout)')
parser.add_option(
'--progress', action='store_true',
help='Prints a dot at each request to show progress')
options, args = parser.parse_args(args)
if len(args) != 1:
parser.error(
'Must specify only method name and optionally query args properly '
'escaped.')
base_url = options.swarming + '/_ah/api/swarming/v1/' + args[0]
url = base_url
if options.limit:
# Check check, change if not working out.
merge_char = '&' if '?' in url else '?'
url += '%slimit=%d' % (merge_char, min(CHUNK_SIZE, options.limit))
data = net.url_read_json(url)
if data is None:
# TODO(maruel): Do basic diagnostic.
print >> sys.stderr, 'Failed to access %s' % url
return 1
# Some items support cursors. Try to get automatically if cursors are needed
# by looking at the 'cursor' items.
while (
data.get('cursor') and
(not options.limit or len(data['items']) < options.limit)):
merge_char = '&' if '?' in base_url else '?'
url = base_url + '%scursor=%s' % (merge_char, urllib.quote(data['cursor']))
if options.limit:
url += '&limit=%d' % min(CHUNK_SIZE, options.limit - len(data['items']))
if options.progress:
sys.stdout.write('.')
sys.stdout.flush()
new = net.url_read_json(url)
if new is None:
if options.progress:
print('')
print >> sys.stderr, 'Failed to access %s' % options.swarming
return 1
data['items'].extend(new.get('items', []))
data['cursor'] = new.get('cursor')
if options.progress:
print('')
if options.limit and len(data.get('items', [])) > options.limit:
data['items'] = data['items'][:options.limit]
data.pop('cursor', None)
if options.json:
options.json = unicode(os.path.abspath(options.json))
tools.write_json(options.json, data, True)
else:
try:
tools.write_json(sys.stdout, data, False)
sys.stdout.write('\n')
except IOError:
pass
return 0
def CMDquery_list(parser, args):
"""Returns list of all the Swarming APIs that can be used with command
'query'.
"""
parser.add_option(
'--json', help='Path to JSON output file (otherwise prints to stdout)')
options, args = parser.parse_args(args)
if args:
parser.error('No argument allowed.')
try:
apis = endpoints_api_discovery_apis(options.swarming)
except APIError as e:
parser.error(str(e))
if options.json:
options.json = unicode(os.path.abspath(options.json))
with fs.open(options.json, 'wb') as f:
json.dump(apis, f)
else:
help_url = (
'https://apis-explorer.appspot.com/apis-explorer/?base=%s/_ah/api#p/' %
options.swarming)
for api_id, api in sorted(apis.iteritems()):
print api_id
print ' ' + api['description']
for resource_name, resource in sorted(api['resources'].iteritems()):
print ''
for method_name, method in sorted(resource['methods'].iteritems()):
# Only list the GET ones.
if method['httpMethod'] != 'GET':
continue
print '- %s.%s: %s' % (
resource_name, method_name, method['path'])
print ' ' + method['description']
print ' %s%s%s' % (help_url, api['servicePath'], method['id'])
return 0
@subcommand.usage('(hash|isolated) [-- extra_args]')
def CMDrun(parser, args):
"""Triggers a task and wait for the results.
Basically, does everything to run a command remotely.
"""
add_trigger_options(parser)
add_collect_options(parser)
add_sharding_options(parser)
options, args = parser.parse_args(args)
task_request = process_trigger_options(parser, options, args)
try:
tasks = trigger_task_shards(
options.swarming, task_request, options.shards)
except Failure as e:
on_error.report(
'Failed to trigger %s(%s): %s' %
(options.task_name, args[0], e.args[0]))
return 1
if not tasks:
on_error.report('Failed to trigger the task.')
return 1
print('Triggered task: %s' % options.task_name)
task_ids = [
t['task_id']
for t in sorted(tasks.itervalues(), key=lambda x: x['shard_index'])
]
try:
return collect(
options.swarming,
task_ids,
options.timeout,
options.decorate,
options.print_status_updates,
options.task_summary_json,
options.task_output_dir)
except Failure:
on_error.report(None)
return 1
@subcommand.usage('task_id -- <extra_args>')
def CMDreproduce(parser, args):
"""Runs a task locally that was triggered on the server.
This running locally the same commands that have been run on the bot. The data
downloaded will be in a subdirectory named 'work' of the current working
directory.
You can pass further additional arguments to the target command by passing
them after --.
"""
options, args = parser.parse_args(args)
extra_args = []
if not args:
parser.error('Must specify exactly one task id.')
if len(args) > 1:
if args[1] == '--':
if len(args) > 2:
extra_args = args[2:]
else:
extra_args = args[1:]
url = options.swarming + '/_ah/api/swarming/v1/task/%s/request' % args[0]
request = net.url_read_json(url)
if not request:
print >> sys.stderr, 'Failed to retrieve request data for the task'
return 1
workdir = unicode(os.path.abspath('work'))
if not fs.isdir(workdir):
fs.mkdir(workdir)
properties = request['properties']
env = None
if properties.get('env'):
env = os.environ.copy()
logging.info('env: %r', properties['env'])
for i in properties['env']:
key = i['key'].encode('utf-8')
if not i['value']:
env.pop(key, None)
else:
env[key] = i['value'].encode('utf-8')
if properties.get('inputs_ref'):
# Create the tree.
with isolateserver.get_storage(
properties['inputs_ref']['isolatedserver'],
properties['inputs_ref']['namespace']) as storage:
bundle = isolateserver.fetch_isolated(
properties['inputs_ref']['isolated'],
storage,
isolateserver.MemoryCache(file_mode_mask=0700),
workdir,
False)
command = bundle.command
if bundle.relative_cwd:
workdir = os.path.join(workdir, bundle.relative_cwd)
else:
command = properties['command']
try:
return subprocess.call(command + extra_args, env=env, cwd=workdir)
except OSError as e:
print >> sys.stderr, 'Failed to run: %s' % ' '.join(command)
print >> sys.stderr, str(e)
return 1
@subcommand.usage('bot_id')
def CMDterminate(parser, args):
"""Tells a bot to gracefully shut itself down as soon as it can.
This is done by completing whatever current task there is then exiting the bot
process.
"""
parser.add_option(
'--wait', action='store_true', help='Wait for the bot to terminate')
options, args = parser.parse_args(args)
if len(args) != 1:
parser.error('Please provide the bot id')
url = options.swarming + '/_ah/api/swarming/v1/bot/%s/terminate' % args[0]
request = net.url_read_json(url, data={})
if not request:
print >> sys.stderr, 'Failed to ask for termination'
return 1
if options.wait:
return collect(
options.swarming, [request['task_id']], 0., False, False, None, None)
return 0
@subcommand.usage("(hash|isolated) [-- extra_args|raw command]")
def CMDtrigger(parser, args):
"""Triggers a Swarming task.
Accepts either the hash (sha1) of a .isolated file already uploaded or the
path to an .isolated file to archive.
If an .isolated file is specified instead of an hash, it is first archived.
Passes all extra arguments provided after '--' as additional command line
arguments for an isolated command specified in *.isolate file.
"""
add_trigger_options(parser)
add_sharding_options(parser)
parser.add_option(
'--dump-json',
metavar='FILE',
help='Dump details about the triggered task(s) to this file as json')
options, args = parser.parse_args(args)
task_request = process_trigger_options(parser, options, args)
try:
tasks = trigger_task_shards(
options.swarming, task_request, options.shards)
if tasks:
print('Triggered task: %s' % options.task_name)
tasks_sorted = sorted(
tasks.itervalues(), key=lambda x: x['shard_index'])
if options.dump_json:
data = {
'base_task_name': options.task_name,
'tasks': tasks,
}
tools.write_json(unicode(options.dump_json), data, True)
print('To collect results, use:')
print(' swarming.py collect -S %s --json %s' %
(options.swarming, options.dump_json))
else:
print('To collect results, use:')
print(' swarming.py collect -S %s %s' %
(options.swarming, ' '.join(t['task_id'] for t in tasks_sorted)))
print('Or visit:')
for t in tasks_sorted:
print(' ' + t['view_url'])
return int(not tasks)
except Failure:
on_error.report(None)
return 1
class OptionParserSwarming(logging_utils.OptionParserWithLogging):
def __init__(self, **kwargs):
logging_utils.OptionParserWithLogging.__init__(
self, prog='swarming.py', **kwargs)
self.server_group = optparse.OptionGroup(self, 'Server')
self.server_group.add_option(
'-S', '--swarming',
metavar='URL', default=os.environ.get('SWARMING_SERVER', ''),
help='Swarming server to use')
self.add_option_group(self.server_group)
auth.add_auth_options(self)
def parse_args(self, *args, **kwargs):
options, args = logging_utils.OptionParserWithLogging.parse_args(
self, *args, **kwargs)
auth.process_auth_options(self, options)
user = self._process_swarming(options)
if hasattr(options, 'user') and not options.user:
options.user = user
return options, args
def _process_swarming(self, options):
"""Processes the --swarming option and aborts if not specified.
Returns the identity as determined by the server.
"""
if not options.swarming:
self.error('--swarming is required.')
try:
options.swarming = net.fix_url(options.swarming)
except ValueError as e:
self.error('--swarming %s' % e)
on_error.report_on_exception_exit(options.swarming)
try:
user = auth.ensure_logged_in(options.swarming)
except ValueError as e:
self.error(str(e))
return user
def main(args):
dispatcher = subcommand.CommandDispatcher(__name__)
return dispatcher.execute(OptionParserSwarming(version=__version__), args)
if __name__ == '__main__':
fix_encoding.fix_encoding()
tools.disable_buffering()
colorama.init()
sys.exit(main(sys.argv[1:]))
| apache-2.0 | 4,930,416,379,992,762,000 | 32.125665 | 80 | 0.640272 | false |
Ali-aqrabawi/ezclinic | lib/django/middleware/common.py | 39 | 7843 | import logging
import re
from django import http
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.core.mail import mail_managers
from django.urls import is_valid_path
from django.utils.cache import get_conditional_response, set_response_etag
from django.utils.deprecation import MiddlewareMixin
from django.utils.encoding import force_text
from django.utils.http import unquote_etag
from django.utils.six.moves.urllib.parse import urlparse
logger = logging.getLogger('django.request')
class CommonMiddleware(MiddlewareMixin):
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing
"www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, a new URL is formed by
appending a slash at the end. If this new URL is found in
urlpatterns, then an HTTP-redirect is returned to this new URL;
otherwise the initial URL is processed as usual.
This behavior can be customized by subclassing CommonMiddleware and
overriding the response_redirect_class attribute.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
response_redirect_class = http.HttpResponsePermanentRedirect
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
raise PermissionDenied('Forbidden user agent')
# Check for a redirect based on settings.PREPEND_WWW
host = request.get_host()
must_prepend = settings.PREPEND_WWW and host and not host.startswith('www.')
redirect_url = ('%s://www.%s' % (request.scheme, host)) if must_prepend else ''
# Check if a slash should be appended
if self.should_redirect_with_slash(request):
path = self.get_full_path_with_slash(request)
else:
path = request.get_full_path()
# Return a redirect if necessary
if redirect_url or path != request.get_full_path():
redirect_url += path
return self.response_redirect_class(redirect_url)
def should_redirect_with_slash(self, request):
"""
Return True if settings.APPEND_SLASH is True and appending a slash to
the request path turns an invalid path into a valid one.
"""
if settings.APPEND_SLASH and not request.get_full_path().endswith('/'):
urlconf = getattr(request, 'urlconf', None)
return (
not is_valid_path(request.path_info, urlconf) and
is_valid_path('%s/' % request.path_info, urlconf)
)
return False
def get_full_path_with_slash(self, request):
"""
Return the full path of the request with a trailing slash appended.
Raise a RuntimeError if settings.DEBUG is True and request.method is
POST, PUT, or PATCH.
"""
new_path = request.get_full_path(force_append_slash=True)
if settings.DEBUG and request.method in ('POST', 'PUT', 'PATCH'):
raise RuntimeError(
"You called this URL via %(method)s, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining %(method)s data. "
"Change your form to point to %(url)s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django settings." % {
'method': request.method,
'url': request.get_host() + new_path,
}
)
return new_path
def process_response(self, request, response):
"""
Calculate the ETag, if needed.
When the status code of the response is 404, it may redirect to a path
with an appended slash if should_redirect_with_slash() returns True.
"""
# If the given URL is "Not Found", then check if we should redirect to
# a path with a slash appended.
if response.status_code == 404:
if self.should_redirect_with_slash(request):
return self.response_redirect_class(self.get_full_path_with_slash(request))
if settings.USE_ETAGS:
if not response.has_header('ETag'):
set_response_etag(response)
if response.has_header('ETag'):
return get_conditional_response(
request,
etag=unquote_etag(response['ETag']),
response=response,
)
return response
class BrokenLinkEmailsMiddleware(MiddlewareMixin):
def process_response(self, request, response):
"""
Send broken link emails for relevant 404 NOT FOUND responses.
"""
if response.status_code == 404 and not settings.DEBUG:
domain = request.get_host()
path = request.get_full_path()
referer = force_text(request.META.get('HTTP_REFERER', ''), errors='replace')
if not self.is_ignorable_request(request, path, domain, referer):
ua = force_text(request.META.get('HTTP_USER_AGENT', '<none>'), errors='replace')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers(
"Broken %slink on %s" % (
('INTERNAL ' if self.is_internal_request(domain, referer) else ''),
domain
),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\n"
"IP address: %s\n" % (referer, path, ua, ip),
fail_silently=True)
return response
def is_internal_request(self, domain, referer):
"""
Returns True if the referring URL is the same domain as the current request.
"""
# Different subdomains are treated as different domains.
return bool(re.match("^https?://%s/" % re.escape(domain), referer))
def is_ignorable_request(self, request, uri, domain, referer):
"""
Return True if the given request *shouldn't* notify the site managers
according to project settings or in situations outlined by the inline
comments.
"""
# The referer is empty.
if not referer:
return True
# APPEND_SLASH is enabled and the referer is equal to the current URL
# without a trailing slash indicating an internal redirect.
if settings.APPEND_SLASH and uri.endswith('/') and referer == uri[:-1]:
return True
# A '?' in referer is identified as a search engine source.
if not self.is_internal_request(domain, referer) and '?' in referer:
return True
# The referer is equal to the current URL, ignoring the scheme (assumed
# to be a poorly implemented bot).
parsed_referer = urlparse(referer)
if parsed_referer.netloc in ['', domain] and parsed_referer.path == uri:
return True
return any(pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
| mit | 7,563,688,845,073,268,000 | 40.497354 | 96 | 0.614178 | false |
Eric-Zhong/odoo | addons/l10n_bo/__openerp__.py | 259 | 1698 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Bolivia Localization Chart Account",
"version": "1.0",
"description": """
Bolivian accounting chart and tax localization.
Plan contable boliviano e impuestos de acuerdo a disposiciones vigentes
""",
"author": "Cubic ERP",
"website": "http://cubicERP.com",
"category": "Localization/Account Charts",
"depends": [
"account_chart",
],
"data":[
"account_tax_code.xml",
"l10n_bo_chart.xml",
"account_tax.xml",
"l10n_bo_wizard.xml",
],
"demo_xml": [
],
"data": [
],
"active": False,
"installable": True,
"certificate" : "",
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -1,501,651,276,036,674,000 | 32.294118 | 78 | 0.594817 | false |
btallman/incubator-airflow | tests/ti_deps/deps/dag_ti_slots_available_dep.py | 20 | 1458 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from airflow.ti_deps.deps.dag_ti_slots_available_dep import DagTISlotsAvailableDep
from fake_models import FakeDag, FakeTask, FakeTI
class DagTISlotsAvailableDepTest(unittest.TestCase):
def test_concurrency_reached(self):
"""
Test concurrency reached should fail dep
"""
dag = FakeDag(concurrency=1, concurrency_reached=True)
task = FakeTask(dag=dag)
ti = FakeTI(task=task, dag_id="fake_dag")
self.assertFalse(DagTISlotsAvailableDep().is_met(ti=ti, dep_context=None))
def test_all_conditions_met(self):
"""
Test all conditions met should pass dep
"""
dag = FakeDag(concurrency=1, concurrency_reached=False)
task = FakeTask(dag=dag)
ti = FakeTI(task=task, dag_id="fake_dag")
self.assertTrue(DagTISlotsAvailableDep().is_met(ti=ti, dep_context=None))
| apache-2.0 | 8,014,650,312,974,675,000 | 34.560976 | 82 | 0.701646 | false |
bank-netforce/netforce | netforce_hr/netforce_hr/models/hr_department.py | 4 | 1500 | # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
class Department(Model):
_name = "hr.department"
_string = "Department"
_key = ["name"]
_fields = {
"name": fields.Char("Name", required=True, search=True),
"code": fields.Char("Code"),
"comments": fields.One2Many("message", "related_id", "Comments"),
}
_order = "name"
Department.register()
| mit | 804,413,903,270,836,400 | 41.857143 | 80 | 0.73 | false |
RaghavPro/Runescape-Hiscores | hiscores/templatetags/hiscores_tags.py | 1 | 1077 | from django import template
register = template.Library()
@register.filter
def get_rank(page, loop_counter):
"""
Calculates the player rank from current page and loop index.
:param page: Current page number
:param loop_counter: Loop index
:return: rank
"""
rank = page.start_index() + loop_counter
return "{:,}".format(rank)
@register.filter
def display_skill(d, skill):
"""
Display skill in template
:param d: Dictionary
:param skill: key
:return: Grabs the key from the dictionary and formats it.
"""
return "{:,}".format(d[skill])
@register.filter
def display_exp(d, skill):
"""
Display exp of the specified skill in template
:param d: Dictionary
:param skill: skill
:return: formatted exp value
"""
skill_exp = skill + "_exp"
return "{:,}".format(int(d[skill_exp]))
@register.filter
def get_exp(l, index):
"""
Gets the 'exp' key from a list of dictionary
:param l: The list
:param index: List index
:return: 'exp' key
"""
return l[index]['exp'] | gpl-2.0 | 3,746,209,754,372,066,300 | 21 | 64 | 0.631383 | false |
lukaspetr/FEniCSopt | fenicsopt/exports/gnuplot.py | 1 | 1501 | from __future__ import division
from dolfin import *
import numpy
import pprint
# Gnuplot related functions
################################################################################
# discontinuous piecewise linear output
def gnuplot_dg1(file, mesh, fun):
file = open(file, 'w+')
i = 0
for myCell in cells(mesh):
i += 1
vs = []
for v in vertices(myCell):
vs.append(v.midpoint())
print('%e %e %e' % (vs[0].x(),vs[0].y(),fun(vs[0].x(),vs[0].y())), file=file)
print('%e %e %e' % (vs[1].x(),vs[1].y(),fun(vs[1].x(),vs[1].y())), file=file)
print('%e %e %e' % (vs[2].x(),vs[2].y(),fun(vs[2].x(),vs[2].y())), file=file)
print('%e %e %e' % (vs[0].x(),vs[0].y(),fun(vs[0].x(),vs[0].y())), file=file)
if (i == 1):
print('%e %e %e' % (vs[0].x(),vs[0].y(),fun(vs[0].x(),vs[0].y())), file=file)
print('', file=file)
################################################################################
# for printing with e.g. pm3d map
def gnuplot_square_equidistant(file, N, fun):
file = open(file, 'w+')
for i in range(0, N+1):
for j in range(0, N+1):
x = i/N
y = j/N
p = Point(x,y)
f = fun(p)
print('%e %e %e' % (x,y,f), file=file)
print('', file=file)
################################################################################
# graph output
def gnuplot_graph(file, data):
file = open(file, 'w+')
for point in data:
pprint.pprint(point)
print('%e %e' % (point['position'], point['phi']), file=file)
print('', file=file)
| mit | 8,669,523,056,949,258,000 | 28.431373 | 82 | 0.459694 | false |
macobo/python-grader | tasks/MTAT.03.100/2013/Midterm_1/KT2_R8_mood_tester.py | 1 | 1312 | """
Task description (in Estonian):
3. Arvude mood (5p)
Kirjuta funktsioon mood, mis võtab argumendiks täisarvujärjendi ja tagastab arvu,
mida leidub järjendis kõige rohkem (ehk moodi). Kui selliseid arve on mitu, siis
tuleb tagastada neist vähim.
Näide: mood([-10, 17, 13, 17, -10, 21]) peab tagastama -10.
"""
from grader import *
from KT2_util import make_checker
from random import *
def mood(lst):
parim = 0
parim_count = 0
for el in lst:
count = 0
for a in lst:
if a == el:
count += 1
if count > parim_count or (count == parim_count and el < parim):
parim = el
parim_count = count
return parim
def rand_count(length, _seed=None, N = None):
seed(_seed)
if N is None: N = max(int(length / 5), 2)
return [randint(-N, N) for _ in range(length)]
checker = make_checker(mood)
checker([1])
checker([1, 1, 1, 3])
checker([1, 2, 2, 3, 2, 1, -2, 3])
checker([-10, 17, 13, 17, -10, 21],
description="Erijuht, võrdsete esinemiste arvu korral tagasta vähim - {function}({args}) == {expected}")
checker([17, -10, 13, -10, 17, 21],
description="Erijuht, võrdsete esinemiste arvu korral tagasta vähim - {function}({args}) == {expected}")
for i in range(5):
checker(rand_count(20, i)) | mit | -6,235,041,246,223,998,000 | 27.304348 | 108 | 0.620292 | false |
donkirkby/django | tests/select_related/models.py | 276 | 3480 | """
Tests for select_related()
``select_related()`` follows all relationships and pre-caches any foreign key
values so that complex trees can be fetched in a single query. However, this
isn't always a good idea, so the ``depth`` argument control how many "levels"
the select-related behavior will traverse.
"""
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# Who remembers high school biology?
@python_2_unicode_compatible
class Domain(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Kingdom(models.Model):
name = models.CharField(max_length=50)
domain = models.ForeignKey(Domain, models.CASCADE)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Phylum(models.Model):
name = models.CharField(max_length=50)
kingdom = models.ForeignKey(Kingdom, models.CASCADE)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Klass(models.Model):
name = models.CharField(max_length=50)
phylum = models.ForeignKey(Phylum, models.CASCADE)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Order(models.Model):
name = models.CharField(max_length=50)
klass = models.ForeignKey(Klass, models.CASCADE)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Family(models.Model):
name = models.CharField(max_length=50)
order = models.ForeignKey(Order, models.CASCADE)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Genus(models.Model):
name = models.CharField(max_length=50)
family = models.ForeignKey(Family, models.CASCADE)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Species(models.Model):
name = models.CharField(max_length=50)
genus = models.ForeignKey(Genus, models.CASCADE)
def __str__(self):
return self.name
# and we'll invent a new thing so we have a model with two foreign keys
@python_2_unicode_compatible
class HybridSpecies(models.Model):
name = models.CharField(max_length=50)
parent_1 = models.ForeignKey(Species, models.CASCADE, related_name='child_1')
parent_2 = models.ForeignKey(Species, models.CASCADE, related_name='child_2')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Topping(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Pizza(models.Model):
name = models.CharField(max_length=100)
toppings = models.ManyToManyField(Topping)
def __str__(self):
return self.name
@python_2_unicode_compatible
class TaggedItem(models.Model):
tag = models.CharField(max_length=30)
content_type = models.ForeignKey(ContentType, models.CASCADE, related_name='select_related_tagged_items')
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def __str__(self):
return self.tag
@python_2_unicode_compatible
class Bookmark(models.Model):
url = models.URLField()
tags = GenericRelation(TaggedItem)
def __str__(self):
return self.url
| bsd-3-clause | -2,237,320,953,492,527,600 | 24.035971 | 109 | 0.70546 | false |
PaulKinlan/cli-caniuse | site/app/scripts/bower_components/jsrepl-build/extern/python/reloop-closured/lib/python2.7/distutils/cygwinccompiler.py | 132 | 17270 | """distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
# problems:
#
# * if you use a msvc compiled python version (1.5.2)
# 1. you have to insert a __GNUC__ section in its config.h
# 2. you have to generate a import library for its dll
# - create a def-file for python??.dll
# - create a import library using
# dlltool --dllname python15.dll --def python15.def \
# --output-lib libpython15.a
#
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
#
# * We put export_symbols in a def-file, and don't use
# --export-all-symbols because it doesn't worked reliable in some
# tested configurations. And because other windows compilers also
# need their symbols specified this no serious problem.
#
# tested configurations:
#
# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
# (after patching python's config.h and for C++ some other include files)
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
# (ld doesn't support -shared, so we use dllwrap)
# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
# - using gcc -mdll instead dllwrap doesn't work without -static because
# it tries to link against dlls instead their import libraries. (If
# it finds the dll first.)
# By specifying -static we force ld to link against the import libraries,
# this is windows standard and there are normally not the necessary symbols
# in the dlls.
# *** only the version of June 2000 shows these problems
# * cygwin gcc 3.2/ld 2.13.90 works
# (ld supports -shared)
# * mingw gcc 3.2/ld 2.13 works
# (ld supports -shared)
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id$"
import os,sys,copy
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils import log
def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
return ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
return ['msvcr71']
elif msc_ver == '1400':
# VS2005 / MSVC 8.0
return ['msvcr80']
elif msc_ver == '1500':
# VS2008 / MSVC 9.0
return ['msvcr90']
else:
raise ValueError("Unknown MS Compiler version %s " % msc_ver)
class CygwinCCompiler (UnixCCompiler):
compiler_type = 'cygwin'
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".dll"
static_lib_format = "lib%s%s"
shared_lib_format = "%s%s"
exe_extension = ".exe"
def __init__ (self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__ (self, verbose, dry_run, force)
(status, details) = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. "
"Reason: %s. "
"Compiling may fail because of undefined preprocessor macros."
% details)
self.gcc_version, self.ld_version, self.dllwrap_version = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
(self.gcc_version,
self.ld_version,
self.dllwrap_version) )
# ld_version >= "2.10.90" and < "2.13" should also be able to use
# gcc -mdll instead of dllwrap
# Older dllwraps had own version numbers, newer ones use the
# same as the rest of binutils ( also ld )
# dllwrap 2.10.90 is buggy
if self.ld_version >= "2.10.90":
self.linker_dll = "gcc"
else:
self.linker_dll = "dllwrap"
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -mcygwin -O -Wall',
compiler_so='gcc -mcygwin -mdll -O -Wall',
compiler_cxx='g++ -mcygwin -O -Wall',
linker_exe='gcc -mcygwin',
linker_so=('%s -mcygwin %s' %
(self.linker_dll, shared_option)))
# cygwin and mingw32 need different sets of libraries
if self.gcc_version == "2.91.57":
# cygwin shouldn't need msvcrt, but without the dlls will crash
# (gcc version 2.91.57) -- perhaps something about initialization
self.dll_libraries=["msvcrt"]
self.warn(
"Consider upgrading to a newer version of gcc")
else:
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# __init__ ()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if ext == '.rc' or ext == '.res':
# gcc needs '.res' and '.rc' compiled to object files !!!
try:
self.spawn(["windres", "-i", src, "-o", obj])
except DistutilsExecError, msg:
raise CompileError, msg
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
# Generate .def file
contents = [
"LIBRARY %s" % os.path.basename(output_filename),
"EXPORTS"]
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# dllwrap uses different options than gcc/ld
if self.linker_dll == "dllwrap":
extra_preargs.extend(["--output-lib", lib_file])
# for dllwrap we have to use a special option
extra_preargs.extend(["--def", def_file])
# we use gcc/ld here and can be sure ld is >= 2.9.10
else:
# doesn't work: bfd_close build\...\libfoo.a: Invalid operation
#extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
# for gcc/ld the def-file is specified as any object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KB < stripped_file < ??100KB
# unstripped_file = stripped_file + XXX KB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
# link ()
# -- Miscellaneous methods -----------------------------------------
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
# class CygwinCCompiler
# the same as cygwin plus some additional parameters
class Mingw32CCompiler (CygwinCCompiler):
compiler_type = 'mingw32'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CygwinCCompiler.__init__ (self, verbose, dry_run, force)
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
self.set_executables(compiler='gcc -mno-cygwin -O -Wall',
compiler_so='gcc -mno-cygwin -mdll -O -Wall',
compiler_cxx='g++ -mno-cygwin -O -Wall',
linker_exe='gcc -mno-cygwin',
linker_so='%s -mno-cygwin %s %s'
% (self.linker_dll, shared_option,
entry_point))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# __init__ ()
# class Mingw32CCompiler
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using a unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation (specifically, pyconfig.h)
appears amenable to building extensions with GCC. Returns a tuple
(status, details), where 'status' is one of the following constants:
CONFIG_H_OK
all is well, go ahead and compile
CONFIG_H_NOTOK
doesn't look good
CONFIG_H_UNCERTAIN
not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
import string
# if sys.version contains GCC then python was compiled with
# GCC, and the pyconfig.h file should be OK
if string.find(sys.version,"GCC") >= 0:
return (CONFIG_H_OK, "sys.version mentions 'GCC'")
fn = sysconfig.get_config_h_filename()
try:
# It would probably better to read single lines to search.
# But we do this only once, and it is fast enough
f = open(fn)
try:
s = f.read()
finally:
f.close()
except IOError, exc:
# if we can't read this file, we cannot say it is wrong
# the compiler will complain later about this file as missing
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
else:
# "pyconfig.h" contains an "#ifdef __GNUC__" or something similar
if string.find(s,"__GNUC__") >= 0:
return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
else:
return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
from distutils.version import LooseVersion
from distutils.spawn import find_executable
import re
gcc_exe = find_executable('gcc')
if gcc_exe:
out = os.popen(gcc_exe + ' -dumpversion','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
gcc_version = LooseVersion(result.group(1))
else:
gcc_version = None
else:
gcc_version = None
ld_exe = find_executable('ld')
if ld_exe:
out = os.popen(ld_exe + ' -v','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
ld_version = LooseVersion(result.group(1))
else:
ld_version = None
else:
ld_version = None
dllwrap_exe = find_executable('dllwrap')
if dllwrap_exe:
out = os.popen(dllwrap_exe + ' --version','r')
out_string = out.read()
out.close()
result = re.search(' (\d+\.\d+(\.\d+)*)',out_string)
if result:
dllwrap_version = LooseVersion(result.group(1))
else:
dllwrap_version = None
else:
dllwrap_version = None
return (gcc_version, ld_version, dllwrap_version)
| apache-2.0 | 8,135,514,040,425,359,000 | 37.463252 | 79 | 0.557267 | false |
lseyesl/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/model/queues.py | 120 | 3658 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from config.queues import all_queue_names
from model.activeworkitems import ActiveWorkItems
from model.workitems import WorkItems
class Queue(object):
def __init__(self, name):
assert(name in all_queue_names)
self._name = name
@classmethod
def queue_with_name(cls, queue_name):
if queue_name not in all_queue_names:
return None
return Queue(queue_name)
@classmethod
def all(cls):
return [Queue(name) for name in all_queue_names]
@classmethod
def all_ews(cls):
return [queue for queue in cls.all() if queue.is_ews()]
def name(self):
return self._name
def work_items(self):
return WorkItems.lookup_by_queue(self._name)
# FIXME: active_work_items is a bad name for this lock-table.
def active_work_items(self):
return ActiveWorkItems.lookup_by_queue(self._name)
def _caplitalize_after_dash(self, string):
return "-".join([word[0].upper() + word[1:] for word in string.split("-")])
# For use in status bubbles or table headers
def short_name(self):
short_name = self._name.replace("-ews", "")
short_name = short_name.replace("-queue", "")
return self._caplitalize_after_dash(short_name.capitalize())
def display_name(self):
display_name = self._name.replace("-", " ")
display_name = display_name.title()
display_name = display_name.replace("Wk2", "WK2")
display_name = display_name.replace("Ews", "EWS")
return display_name
_dash_regexp = re.compile("-")
def name_with_underscores(self):
return self._dash_regexp.sub("_", self._name)
def is_ews(self):
# Note: The style-queue is just like an EWS in that it has an EWS
# bubble, and it works off of the r? patches. If at some later
# point code wants to not treat the style-queue as an EWS
# (e.g. expecting is_ews() queues to have build results?)
# then we should fix all callers and change this check.
return self._name.endswith("-ews") or self._name == "style-queue"
| bsd-3-clause | 7,550,555,497,073,756,000 | 38.333333 | 83 | 0.693275 | false |
gtlzxt/synergy | ext/toolchain/generators.py | 33 | 2582 | # synergy -- mouse and keyboard sharing utility
# Copyright (C) 2012 Synergy Si Ltd.
# Copyright (C) 2009 Nick Bolton
#
# This package is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# found in the file LICENSE that should have accompanied this file.
#
# This package is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class Generator(object):
def __init__(self, cmakeName, buildDir='build', sourceDir='..', binDir='bin'):
self.cmakeName = cmakeName
self.buildDir = buildDir
self.sourceDir = sourceDir
self.binDir = binDir
def getBuildDir(self, target):
return self.buildDir
def getBinDir(self, target=''):
return self.binDir
def getSourceDir(self):
return self.sourceDir
class VisualStudioGenerator(Generator):
def __init__(self, version):
super(VisualStudioGenerator, self).__init__('Visual Studio ' + version)
def getBinDir(self, target=''):
return super(VisualStudioGenerator, self).getBinDir(target) + '/' + target
class MakefilesGenerator(Generator):
def __init__(self):
super(MakefilesGenerator, self).__init__('Unix Makefiles')
def getBuildDir(self, target):
return super(MakefilesGenerator, self).getBuildDir(target) + '/' + target
def getBinDir(self, target=''):
workingDir = super(MakefilesGenerator, self).getBinDir(target)
# only put debug files in separate bin dir
if target == 'debug':
workingDir += '/debug'
return workingDir
def getSourceDir(self):
return super(MakefilesGenerator, self).getSourceDir() + '/..'
class XcodeGenerator(Generator):
def __init__(self):
super(XcodeGenerator, self).__init__('Xcode')
def getBinDir(self, target=''):
if target == "":
return super(XcodeGenerator, self).getBinDir(target)
xcodeTarget = target[0].upper() + target[1:]
return super(XcodeGenerator, self).getBinDir(target) + '/' + xcodeTarget
class EclipseGenerator(Generator):
def __init__(self):
super(EclipseGenerator, self).__init__('Eclipse CDT4 - Unix Makefiles', '', '')
def getBuildDir(self, target):
# eclipse only works with in-source build.
return ''
def getBinDir(self, target=''):
# eclipse only works with in-source build.
return 'bin'
def getSourceDir(self):
return ''
| gpl-2.0 | -8,262,608,071,379,534,000 | 29.738095 | 81 | 0.71495 | false |
pombredanne/logbook | tests/test_logging_api.py | 6 | 2823 | import pickle
import sys
import logbook
from logbook.helpers import iteritems, xrange, u
import pytest
def test_basic_logging(active_handler, logger):
logger.warn('This is a warning. Nice hah?')
assert active_handler.has_warning('This is a warning. Nice hah?')
assert active_handler.formatted_records == [
'[WARNING] testlogger: This is a warning. Nice hah?']
def test_exception_catching(active_handler, logger):
assert not active_handler.has_error()
try:
1 / 0
except Exception:
logger.exception()
try:
1 / 0
except Exception:
logger.exception('Awesome')
assert active_handler.has_error('Uncaught exception occurred')
assert active_handler.has_error('Awesome')
assert active_handler.records[0].exc_info is not None
assert '1 / 0' in active_handler.records[0].formatted_exception
def test_exception_catching_with_unicode():
""" See https://github.com/getlogbook/logbook/issues/104
"""
try:
raise Exception(u('\u202a test \u202c'))
except:
r = logbook.LogRecord('channel', 'DEBUG', 'test',
exc_info=sys.exc_info())
r.exception_message
@pytest.mark.parametrize('as_tuple', [True, False])
def test_exc_info(as_tuple, logger, active_handler):
try:
1 / 0
except Exception:
exc_info = sys.exc_info()
logger.info("Exception caught",
exc_info=exc_info if as_tuple else True)
assert active_handler.records[0].exc_info is not None
assert active_handler.records[0].exc_info == exc_info
def test_to_dict(logger, active_handler):
try:
1 / 0
except Exception:
logger.exception()
record = active_handler.records[0]
exported = record.to_dict()
record.close()
imported = logbook.LogRecord.from_dict(exported)
for key, value in iteritems(record.__dict__):
if key[0] == '_':
continue
assert value == getattr(imported, key)
def test_pickle(active_handler, logger):
try:
1 / 0
except Exception:
logger.exception()
record = active_handler.records[0]
record.pull_information()
record.close()
for p in xrange(pickle.HIGHEST_PROTOCOL):
exported = pickle.dumps(record, p)
imported = pickle.loads(exported)
for key, value in iteritems(record.__dict__):
if key[0] == '_':
continue
imported_value = getattr(imported, key)
if isinstance(value, ZeroDivisionError):
# in Python 3.2, ZeroDivisionError(x) != ZeroDivisionError(x)
assert type(value) is type(imported_value)
assert value.args == imported_value.args
else:
assert value == imported_value
| bsd-3-clause | 8,424,146,303,829,561,000 | 29.031915 | 77 | 0.618491 | false |
google-code-export/django-hotclub | libs/external_libs/dateutil/dateutil/parser.py | 13 | 32492 | # -*- coding:iso-8859-1 -*-
"""
Copyright (c) 2003-2005 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <[email protected]>"
__license__ = "PSF License"
import os.path
import string
import sys
import time
import datetime
import relativedelta
import tz
__all__ = ["parse", "parserinfo"]
# Some pointers:
#
# http://www.cl.cam.ac.uk/~mgk25/iso-time.html
# http://www.iso.ch/iso/en/prods-services/popstds/datesandtime.html
# http://www.w3.org/TR/NOTE-datetime
# http://ringmaster.arc.nasa.gov/tools/time_formats.html
# http://search.cpan.org/author/MUIR/Time-modules-2003.0211/lib/Time/ParseDate.pm
# http://stein.cshl.org/jade/distrib/docs/java.text.SimpleDateFormat.html
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class _timelex:
def __init__(self, instream):
if isinstance(instream, basestring):
instream = StringIO(instream)
self.instream = instream
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ_'
'ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.numchars = '0123456789'
self.whitespace = ' \t\r\n'
self.charstack = []
self.tokenstack = []
self.eof = False
def get_token(self):
if self.tokenstack:
return self.tokenstack.pop(0)
seenletters = False
token = None
state = None
wordchars = self.wordchars
numchars = self.numchars
whitespace = self.whitespace
while not self.eof:
if self.charstack:
nextchar = self.charstack.pop(0)
else:
nextchar = self.instream.read(1)
while nextchar == '\x00':
nextchar = self.instream.read(1)
if not nextchar:
self.eof = True
break
elif not state:
token = nextchar
if nextchar in wordchars:
state = 'a'
elif nextchar in numchars:
state = '0'
elif nextchar in whitespace:
token = ' '
break # emit token
else:
break # emit token
elif state == 'a':
seenletters = True
if nextchar in wordchars:
token += nextchar
elif nextchar == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0':
if nextchar in numchars:
token += nextchar
elif nextchar == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == 'a.':
seenletters = True
if nextchar == '.' or nextchar in wordchars:
token += nextchar
elif nextchar in numchars and token[-1] == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0.':
if nextchar == '.' or nextchar in numchars:
token += nextchar
elif nextchar in wordchars and token[-1] == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
if (state in ('a.', '0.') and
(seenletters or token.count('.') > 1 or token[-1] == '.')):
l = token.split('.')
token = l[0]
for tok in l[1:]:
self.tokenstack.append('.')
if tok:
self.tokenstack.append(tok)
return token
def __iter__(self):
return self
def next(self):
token = self.get_token()
if token is None:
raise StopIteration
return token
def split(cls, s):
return list(cls(s))
split = classmethod(split)
class _resultbase(object):
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def _repr(self, classname):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
return "%s(%s)" % (classname, ", ".join(l))
def __repr__(self):
return self._repr(self.__class__.__name__)
class parserinfo:
# m from a.m/p.m, t from ISO T separator
JUMP = [" ", ".", ",", ";", "-", "/", "'",
"at", "on", "and", "ad", "m", "t", "of",
"st", "nd", "rd", "th"]
WEEKDAYS = [("Mon", "Monday"),
("Tue", "Tuesday"),
("Wed", "Wednesday"),
("Thu", "Thursday"),
("Fri", "Friday"),
("Sat", "Saturday"),
("Sun", "Sunday")]
MONTHS = [("Jan", "January"),
("Feb", "February"),
("Mar", "March"),
("Apr", "April"),
("May", "May"),
("Jun", "June"),
("Jul", "July"),
("Aug", "August"),
("Sep", "September"),
("Oct", "October"),
("Nov", "November"),
("Dec", "December")]
HMS = [("h", "hour", "hours"),
("m", "minute", "minutes"),
("s", "second", "seconds")]
AMPM = [("am", "a"),
("pm", "p")]
UTCZONE = ["UTC", "GMT", "Z"]
PERTAIN = ["of"]
TZOFFSET = {}
def __init__(self, dayfirst=False, yearfirst=False):
self._jump = self._convert(self.JUMP)
self._weekdays = self._convert(self.WEEKDAYS)
self._months = self._convert(self.MONTHS)
self._hms = self._convert(self.HMS)
self._ampm = self._convert(self.AMPM)
self._utczone = self._convert(self.UTCZONE)
self._pertain = self._convert(self.PERTAIN)
self.dayfirst = dayfirst
self.yearfirst = yearfirst
self._year = time.localtime().tm_year
self._century = self._year/100*100
def _convert(self, lst):
dct = {}
for i in range(len(lst)):
v = lst[i]
if isinstance(v, tuple):
for v in v:
dct[v.lower()] = i
else:
dct[v.lower()] = i
return dct
def jump(self, name):
return name.lower() in self._jump
def weekday(self, name):
if len(name) >= 3:
try:
return self._weekdays[name.lower()]
except KeyError:
pass
return None
def month(self, name):
if len(name) >= 3:
try:
return self._months[name.lower()]+1
except KeyError:
pass
return None
def hms(self, name):
try:
return self._hms[name.lower()]
except KeyError:
return None
def ampm(self, name):
try:
return self._ampm[name.lower()]
except KeyError:
return None
def pertain(self, name):
return name.lower() in self._pertain
def utczone(self, name):
return name.lower() in self._utczone
def tzoffset(self, name):
if name in self._utczone:
return 0
return self.TZOFFSET.get(name)
def convertyear(self, year):
if year < 100:
year += self._century
if abs(year-self._year) >= 50:
if year < self._year:
year += 100
else:
year -= 100
return year
def validate(self, res):
# move to info
if res.year is not None:
res.year = self.convertyear(res.year)
if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z':
res.tzname = "UTC"
res.tzoffset = 0
elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
res.tzoffset = 0
return True
class parser:
def __init__(self, info=parserinfo):
if issubclass(info, parserinfo):
self.info = parserinfo()
elif isinstance(info, parserinfo):
self.info = info
else:
raise TypeError, "Unsupported parserinfo type"
def parse(self, timestr, default=None,
ignoretz=False, tzinfos=None,
**kwargs):
if not default:
default = datetime.datetime.now().replace(hour=0, minute=0,
second=0, microsecond=0)
res = self._parse(timestr, **kwargs)
if res is None:
raise ValueError, "unknown string format"
repl = {}
for attr in ["year", "month", "day", "hour",
"minute", "second", "microsecond"]:
value = getattr(res, attr)
if value is not None:
repl[attr] = value
ret = default.replace(**repl)
if res.weekday is not None and not res.day:
ret = ret+relativedelta.relativedelta(weekday=res.weekday)
if not ignoretz:
if callable(tzinfos) or tzinfos and res.tzname in tzinfos:
if callable(tzinfos):
tzdata = tzinfos(res.tzname, res.tzoffset)
else:
tzdata = tzinfos.get(res.tzname)
if isinstance(tzdata, datetime.tzinfo):
tzinfo = tzdata
elif isinstance(tzdata, basestring):
tzinfo = tz.tzstr(tzdata)
elif isinstance(tzdata, int):
tzinfo = tz.tzoffset(res.tzname, tzdata)
else:
raise ValueError, "offset must be tzinfo subclass, " \
"tz string, or int offset"
ret = ret.replace(tzinfo=tzinfo)
elif res.tzname and res.tzname in time.tzname:
ret = ret.replace(tzinfo=tz.tzlocal())
elif res.tzoffset == 0:
ret = ret.replace(tzinfo=tz.tzutc())
elif res.tzoffset:
ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
return ret
class _result(_resultbase):
__slots__ = ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond",
"tzname", "tzoffset"]
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False):
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
if yearfirst is None:
yearfirst = info.yearfirst
res = self._result()
l = _timelex.split(timestr)
try:
# year/month/day list
ymd = []
# Index of the month string in ymd
mstridx = -1
len_l = len(l)
i = 0
while i < len_l:
# Check if it's a number
try:
value = float(l[i])
except ValueError:
value = None
if value is not None:
# Token is a number
len_li = len(l[i])
i += 1
if (len(ymd) == 3 and len_li in (2, 4)
and (i >= len_l or (l[i] != ':' and
info.hms(l[i]) is None))):
# 19990101T23[59]
s = l[i-1]
res.hour = int(s[:2])
if len_li == 4:
res.minute = int(s[2:])
elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6):
# YYMMDD or HHMMSS[.ss]
s = l[i-1]
if not ymd and l[i-1].find('.') == -1:
ymd.append(info.convertyear(int(s[:2])))
ymd.append(int(s[2:4]))
ymd.append(int(s[4:]))
else:
# 19990101T235959[.59]
res.hour = int(s[:2])
res.minute = int(s[2:4])
value = float(s[4:])
res.second = int(value)
if value%1:
res.microsecond = int(1000000*(value%1))
elif len_li == 8:
# YYYYMMDD
s = l[i-1]
ymd.append(int(s[:4]))
ymd.append(int(s[4:6]))
ymd.append(int(s[6:]))
elif len_li in (12, 14):
# YYYYMMDDhhmm[ss]
s = l[i-1]
ymd.append(int(s[:4]))
ymd.append(int(s[4:6]))
ymd.append(int(s[6:8]))
res.hour = int(s[8:10])
res.minute = int(s[10:12])
if len_li == 14:
res.second = int(s[12:])
elif ((i < len_l and info.hms(l[i]) is not None) or
(i+1 < len_l and l[i] == ' ' and
info.hms(l[i+1]) is not None)):
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
if l[i] == ' ':
i += 1
idx = info.hms(l[i])
while True:
if idx == 0:
res.hour = int(value)
if value%1:
res.minute = int(60*(value%1))
elif idx == 1:
res.minute = int(value)
if value%1:
res.second = int(60*(value%1))
elif idx == 2:
res.second = int(value)
if value%1:
res.microsecond = int(1000000*(value%1))
i += 1
if i >= len_l or idx == 2:
break
# 12h00
try:
value = float(l[i])
except ValueError:
break
else:
i += 1
idx += 1
if i < len_l:
newidx = info.hms(l[i])
if newidx is not None:
idx = newidx
elif i+1 < len_l and l[i] == ':':
# HH:MM[:SS[.ss]]
res.hour = int(value)
i += 1
value = float(l[i])
res.minute = int(value)
if value%1:
res.second = int(60*(value%1))
i += 1
if i < len_l and l[i] == ':':
value = float(l[i+1])
res.second = int(value)
if value%1:
res.microsecond = int(1000000*(value%1))
i += 2
elif i < len_l and l[i] in ('-', '/', '.'):
sep = l[i]
ymd.append(int(value))
i += 1
if i < len_l and not info.jump(l[i]):
try:
# 01-01[-01]
ymd.append(int(l[i]))
except ValueError:
# 01-Jan[-01]
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd)-1
else:
return None
i += 1
if i < len_l and l[i] == sep:
# We have three members
i += 1
value = info.month(l[i])
if value is not None:
ymd.append(value)
mstridx = len(ymd)-1
assert mstridx == -1
else:
ymd.append(int(l[i]))
i += 1
elif i >= len_l or info.jump(l[i]):
if i+1 < len_l and info.ampm(l[i+1]) is not None:
# 12 am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i+1]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i+1]) == 0:
res.hour = 0
i += 1
else:
# Year, month or day
ymd.append(int(value))
i += 1
elif info.ampm(l[i]) is not None:
# 12am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i]) == 0:
res.hour = 0
i += 1
elif not fuzzy:
return None
else:
i += 1
continue
# Check weekday
value = info.weekday(l[i])
if value is not None:
res.weekday = value
i += 1
continue
# Check month name
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd)-1
i += 1
if i < len_l:
if l[i] in ('-', '/'):
# Jan-01[-99]
sep = l[i]
i += 1
ymd.append(int(l[i]))
i += 1
if i < len_l and l[i] == sep:
# Jan-01-99
i += 1
ymd.append(int(l[i]))
i += 1
elif (i+3 < len_l and l[i] == l[i+2] == ' '
and info.pertain(l[i+1])):
# Jan of 01
# In this case, 01 is clearly year
try:
value = int(l[i+3])
except ValueError:
# Wrong guess
pass
else:
# Convert it here to become unambiguous
ymd.append(info.convertyear(value))
i += 4
continue
# Check am/pm
value = info.ampm(l[i])
if value is not None:
if value == 1 and res.hour < 12:
res.hour += 12
elif value == 0 and res.hour == 12:
res.hour = 0
i += 1
continue
# Check for a timezone name
if (res.hour is not None and len(l[i]) <= 5 and
res.tzname is None and res.tzoffset is None and
not [x for x in l[i] if x not in string.ascii_uppercase]):
res.tzname = l[i]
res.tzoffset = info.tzoffset(res.tzname)
i += 1
# Check for something like GMT+3, or BRST+3. Notice
# that it doesn't mean "I am 3 hours after GMT", but
# "my time +3 is GMT". If found, we reverse the
# logic so that timezone parsing code will get it
# right.
if i < len_l and l[i] in ('+', '-'):
l[i] = ('+', '-')[l[i] == '+']
res.tzoffset = None
if info.utczone(res.tzname):
# With something like GMT+3, the timezone
# is *not* GMT.
res.tzname = None
continue
# Check for a numbered timezone
if res.hour is not None and l[i] in ('+', '-'):
signal = (-1,1)[l[i] == '+']
i += 1
len_li = len(l[i])
if len_li == 4:
# -0300
res.tzoffset = int(l[i][:2])*3600+int(l[i][2:])*60
elif i+1 < len_l and l[i+1] == ':':
# -03:00
res.tzoffset = int(l[i])*3600+int(l[i+2])*60
i += 2
elif len_li <= 2:
# -[0]3
res.tzoffset = int(l[i][:2])*3600
else:
return None
i += 1
res.tzoffset *= signal
# Look for a timezone name between parenthesis
if (i+3 < len_l and
info.jump(l[i]) and l[i+1] == '(' and l[i+3] == ')' and
3 <= len(l[i+2]) <= 5 and
not [x for x in l[i+2]
if x not in string.ascii_uppercase]):
# -0300 (BRST)
res.tzname = l[i+2]
i += 4
continue
# Check jumps
if not (info.jump(l[i]) or fuzzy):
return None
i += 1
# Process year/month/day
len_ymd = len(ymd)
if len_ymd > 3:
# More than three members!?
return None
elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2):
# One member, or two members with a month string
if mstridx != -1:
res.month = ymd[mstridx]
del ymd[mstridx]
if len_ymd > 1 or mstridx == -1:
if ymd[0] > 31:
res.year = ymd[0]
else:
res.day = ymd[0]
elif len_ymd == 2:
# Two members with numbers
if ymd[0] > 31:
# 99-01
res.year, res.month = ymd
elif ymd[1] > 31:
# 01-99
res.month, res.year = ymd
elif dayfirst and ymd[1] <= 12:
# 13-01
res.day, res.month = ymd
else:
# 01-13
res.month, res.day = ymd
if len_ymd == 3:
# Three members
if mstridx == 0:
res.month, res.day, res.year = ymd
elif mstridx == 1:
if ymd[0] > 31 or (yearfirst and ymd[2] <= 31):
# 99-Jan-01
res.year, res.month, res.day = ymd
else:
# 01-Jan-01
# Give precendence to day-first, since
# two-digit years is usually hand-written.
res.day, res.month, res.year = ymd
elif mstridx == 2:
# WTF!?
if ymd[1] > 31:
# 01-99-Jan
res.day, res.year, res.month = ymd
else:
# 99-01-Jan
res.year, res.day, res.month = ymd
else:
if ymd[0] > 31 or \
(yearfirst and ymd[1] <= 12 and ymd[2] <= 31):
# 99-01-01
res.year, res.month, res.day = ymd
elif ymd[0] > 12 or (dayfirst and ymd[1] <= 12):
# 13-01-01
res.day, res.month, res.year = ymd
else:
# 01-13-01
res.month, res.day, res.year = ymd
except (IndexError, ValueError, AssertionError):
return None
if not info.validate(res):
return None
return res
DEFAULTPARSER = parser()
def parse(timestr, parserinfo=None, **kwargs):
if parserinfo:
return parser(parserinfo).parse(timestr, **kwargs)
else:
return DEFAULTPARSER.parse(timestr, **kwargs)
class _tzparser:
class _result(_resultbase):
__slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
"start", "end"]
class _attr(_resultbase):
__slots__ = ["month", "week", "weekday",
"yday", "jyday", "day", "time"]
def __repr__(self):
return self._repr("")
def __init__(self):
_resultbase.__init__(self)
self.start = self._attr()
self.end = self._attr()
def parse(self, tzstr):
res = self._result()
l = _timelex.split(tzstr)
try:
len_l = len(l)
i = 0
while i < len_l:
# BRST+3[BRDT[+2]]
j = i
while j < len_l and not [x for x in l[j]
if x in "0123456789:,-+"]:
j += 1
if j != i:
if not res.stdabbr:
offattr = "stdoffset"
res.stdabbr = "".join(l[i:j])
else:
offattr = "dstoffset"
res.dstabbr = "".join(l[i:j])
i = j
if (i < len_l and
(l[i] in ('+', '-') or l[i][0] in "0123456789")):
if l[i] in ('+', '-'):
signal = (1,-1)[l[i] == '+']
i += 1
else:
signal = -1
len_li = len(l[i])
if len_li == 4:
# -0300
setattr(res, offattr,
(int(l[i][:2])*3600+int(l[i][2:])*60)*signal)
elif i+1 < len_l and l[i+1] == ':':
# -03:00
setattr(res, offattr,
(int(l[i])*3600+int(l[i+2])*60)*signal)
i += 2
elif len_li <= 2:
# -[0]3
setattr(res, offattr,
int(l[i][:2])*3600*signal)
else:
return None
i += 1
if res.dstabbr:
break
else:
break
if i < len_l:
for j in range(i, len_l):
if l[j] == ';': l[j] = ','
assert l[i] == ','
i += 1
if i >= len_l:
pass
elif (8 <= l.count(',') <= 9 and
not [y for x in l[i:] if x != ','
for y in x if y not in "0123456789"]):
# GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
for x in (res.start, res.end):
x.month = int(l[i])
i += 2
if l[i] == '-':
value = int(l[i+1])*-1
i += 1
else:
value = int(l[i])
i += 2
if value:
x.week = value
x.weekday = (int(l[i])-1)%7
else:
x.day = int(l[i])
i += 2
x.time = int(l[i])
i += 2
if i < len_l:
if l[i] in ('-','+'):
signal = (-1,1)[l[i] == "+"]
i += 1
else:
signal = 1
res.dstoffset = (res.stdoffset+int(l[i]))*signal
elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
not [y for x in l[i:] if x not in (',','/','J','M',
'.','-',':')
for y in x if y not in "0123456789"]):
for x in (res.start, res.end):
if l[i] == 'J':
# non-leap year day (1 based)
i += 1
x.jyday = int(l[i])
elif l[i] == 'M':
# month[-.]week[-.]weekday
i += 1
x.month = int(l[i])
i += 1
assert l[i] in ('-', '.')
i += 1
x.week = int(l[i])
if x.week == 5:
x.week = -1
i += 1
assert l[i] in ('-', '.')
i += 1
x.weekday = (int(l[i])-1)%7
else:
# year day (zero based)
x.yday = int(l[i])+1
i += 1
if i < len_l and l[i] == '/':
i += 1
# start time
len_li = len(l[i])
if len_li == 4:
# -0300
x.time = (int(l[i][:2])*3600+int(l[i][2:])*60)
elif i+1 < len_l and l[i+1] == ':':
# -03:00
x.time = int(l[i])*3600+int(l[i+2])*60
i += 2
if i+1 < len_l and l[i+1] == ':':
i += 2
x.time += int(l[i])
elif len_li <= 2:
# -[0]3
x.time = (int(l[i][:2])*3600)
else:
return None
i += 1
assert i == len_l or l[i] == ','
i += 1
assert i >= len_l
except (IndexError, ValueError, AssertionError):
return None
return res
DEFAULTTZPARSER = _tzparser()
def _parsetz(tzstr):
return DEFAULTTZPARSER.parse(tzstr)
# vim:ts=4:sw=4:et
| mit | -9,197,935,670,924,959,000 | 36.133714 | 81 | 0.356088 | false |
F483/ngcccbase | ngcccbase/tests/test_wallet_model.py | 5 | 4256 | #!/usr/bin/env python
import unittest
from coloredcoinlib import (ColorSet, ColorDataBuilderManager,
AidedColorDataBuilder, ThinColorData)
from ngcccbase.deterministic import DWalletAddressManager
from ngcccbase.pwallet import PersistentWallet
from ngcccbase.txcons import BasicTxSpec, InvalidTargetError
from ngcccbase.txdb import TxDb
from ngcccbase.utxodb import UTXOQuery
from ngcccbase.wallet_model import CoinQueryFactory
from ngcccbase.wallet_controller import WalletController
class TestWalletModel(unittest.TestCase):
def setUp(self):
self.path = ":memory:"
self.config = {
'hdw_master_key':
'91813223e97697c42f05e54b3a85bae601f04526c5c053ff0811747db77cfdf5f1accb50b3765377c379379cd5aa512c38bf24a57e4173ef592305d16314a0f4',
'testnet': True,
'ccc': {'colordb_path' : self.path},
}
self.pwallet = PersistentWallet(self.path, self.config)
self.pwallet.init_model()
self.model = self.pwallet.get_model()
self.colormap = self.model.get_color_map()
self.bcolorset = ColorSet(self.colormap, [''])
self.basset = self.model.get_asset_definition_manager(
).get_asset_by_moniker('bitcoin')
self.cqf = self.model.get_coin_query_factory()
def test_get_tx_db(self):
self.assertTrue(isinstance(self.model.get_tx_db(), TxDb))
def test_is_testnet(self):
self.assertTrue(self.model.is_testnet())
def test_get_coin_query_factory(self):
self.assertTrue(isinstance(self.cqf, CoinQueryFactory))
self.cqf.make_query({'color_set': self.bcolorset})
self.cqf.make_query({'color_id_set': self.bcolorset.color_id_set})
self.cqf.make_query({'asset': self.basset})
self.assertRaises(Exception, self.cqf.make_query, {})
def test_transform(self):
tx_spec = BasicTxSpec(self.model)
self.assertRaises(InvalidTargetError,
self.model.transform_tx_spec, tx_spec, 'signed')
def test_make_query(self):
q = self.model.make_coin_query({'color_set': self.bcolorset})
self.assertTrue(isinstance(q, UTXOQuery))
def test_get_address_manager(self):
m = self.model.get_address_manager()
self.assertTrue(issubclass(m.__class__, DWalletAddressManager))
def test_get_history(self):
self.config['asset_definitions'] = [
{"color_set": [""], "monikers": ["bitcoin"], "unit": 100000000},
{"color_set": ["obc:03524a4d6492e8d43cb6f3906a99be5a1bcd93916241f759812828b301f25a6c:0:153267"], "monikers": ['test'], "unit": 1},]
self.config['hdwam'] = {
"genesis_color_sets": [
["obc:03524a4d6492e8d43cb6f3906a99be5a1bcd93916241f759812828b301f25a6c:0:153267"],
],
"color_set_states": [
{"color_set": [""], "max_index": 1},
{"color_set": ["obc:03524a4d6492e8d43cb6f3906a99be5a1bcd93916241f759812828b301f25a6c:0:153267"], "max_index": 7},
]
}
self.config['bip0032'] = True
self.pwallet = PersistentWallet(self.path, self.config)
self.pwallet.init_model()
self.model = self.pwallet.get_model()
# modify model colored coin context, so test runs faster
ccc = self.model.ccc
cdbuilder = ColorDataBuilderManager(
ccc.colormap, ccc.blockchain_state, ccc.cdstore,
ccc.metastore, AidedColorDataBuilder)
ccc.colordata = ThinColorData(
cdbuilder, ccc.blockchain_state, ccc.cdstore, ccc.colormap)
wc = WalletController(self.model)
adm = self.model.get_asset_definition_manager()
asset = adm.get_asset_by_moniker('test')
self.model.utxo_man.update_all()
cq = self.model.make_coin_query({"asset": asset})
utxo_list = cq.get_result()
# send to the second address so the mempool has something
addrs = wc.get_all_addresses(asset)
wc.send_coins(asset, [addrs[1].get_color_address()], [1000])
history = self.model.get_history_for_asset(asset)
self.assertTrue(len(history) > 30)
if __name__ == '__main__':
unittest.main()
| mit | -2,543,721,033,331,689,500 | 39.533333 | 147 | 0.641917 | false |
marketdial/helloworld | accounts/forms.py | 2 | 5965 | # -*- coding: utf-8 -*-
from flask import render_template, current_app
from wtforms import *
from flask.ext.wtf import Form
from flask_mail import Message
from application import mail
from common.utils import get_signer
from accounts.models import User
class LoginForm(Form):
user = None
username = TextField(
label=u'Login',
validators=[
validators.required()
]
)
password = PasswordField(
label=u'Password',
validators=[
validators.required()
]
)
next = HiddenField()
def validate_username(form, field):
username = field.data
try:
form.user = User.objects.get(username=username)
except:
raise ValidationError(u'Login not found.')
def validate_password(form, field):
password = field.data
if form.user:
if not form.user.check_password(password):
raise ValidationError(u'Incorrect password.')
class SignupForm(Form):
email = TextField(
label=u'E-mail',
validators=[
validators.required(),
validators.length(max=100),
validators.Email()
]
)
def validate_email(form, field):
email = field.data
if User.objects.filter(email=email):
raise ValidationError(u'E-mail in use.')
def save(self):
email = self.email.data
site_name = current_app.config['PROJECT_SITE_NAME']
site_url = current_app.config['PROJECT_SITE_URL']
sender = current_app.config['MAIL_DEFAULT_SENDER']
# create signed data
s = get_signer()
data = {
'email': email,
'signup': True
}
signed_data = s.dumps(data)
# set context to template render
context = dict(
site_name=site_name,
site_url=site_url,
email=email,
signed_data=signed_data
)
# load template
html = render_template(
'accounts/emails/signup.html', **context
)
# create and send message
msg = Message(
u'Confirm your account - {0}.'.format(site_name),
sender=sender,
recipients=[email]
)
msg.html = html
mail.send(msg)
class SignupConfirmForm(Form):
name = TextField(
label=u'Name',
validators=[
validators.required(),
validators.length(max=100)
]
)
username = TextField(
label=u'Login',
validators=[
validators.required(),
validators.length(min=3, max=30),
validators.Regexp(
regex=r'^[\w]+$',
message=u'Just letters and numbers.'
)
]
)
password = PasswordField(
label=u'Password',
validators=[
validators.required(),
validators.length(min=6, max=16)
]
)
password_confirm = PasswordField(
label=u'Password Confirm',
validators=[
validators.required()
]
)
next = HiddenField()
def validate_username(form, field):
username = field.data
if User.objects.filter(username=username):
raise ValidationError(u'Login in use.')
def validate_password_confirm(form, field):
password = form.password.data
password_confirm = field.data
if password != password_confirm:
raise ValidationError(u'Incorrect password.')
def save(self, email):
name = self.name.data
username = self.username.data
password = self.password.data
email = email
user = User(name=name, username=username, password=password, email=email)
user.save()
return user
class RecoverPasswordForm(Form):
email = TextField(
label=u'E-mail',
validators=[
validators.required(),
validators.length(max=100),
validators.Email()
]
)
def validate_email(form, field):
email = field.data
if not User.objects.filter(email=email):
raise ValidationError(u'E-mail not found.')
def save(self):
email = self.email.data
site_name = current_app.config['PROJECT_SITE_NAME']
site_url = current_app.config['PROJECT_SITE_URL']
sender = current_app.config['MAIL_DEFAULT_SENDER']
# create signed data
s = get_signer()
data = {
'email': email,
'recover-password': True
}
signed_data = s.dumps(data)
# set context to template render
context = dict(
site_name=site_name,
site_url=site_url,
email=email,
signed_data=signed_data
)
# load template
html = render_template(
'accounts/emails/recover_password.html', **context
)
# create and send message
msg = Message(
u'Recover your password - {0}.'.format(site_name),
sender=sender,
recipients=[email]
)
msg.html = html
mail.send(msg)
class RecoverPasswordConfirmForm(Form):
password = PasswordField(
label=u'Password',
validators=[
validators.required(),
validators.length(min=6, max=16)
]
)
password_confirm = PasswordField(
label=u'Password Confirm',
validators=[
validators.required()
]
)
def validate_password_confirm(form, field):
password = form.password.data
password_confirm = field.data
if password != password_confirm:
raise ValidationError(u'Incorrect password.')
def save(self):
password = self.password.data
self.user.set_password(password)
self.user.save()
return self.user
| mit | -3,401,161,158,409,010,700 | 23.854167 | 81 | 0.553562 | false |
daniaki/Enrich2 | enrich2/gui/seqlib_apply_dialog.py | 1 | 3571 | # Copyright 2016 Alan F Rubin
#
# This file is part of Enrich2.
#
# Enrich2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Enrich2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Enrich2. If not, see <http://www.gnu.org/licenses/>.
import tkinter as tk
import tkinter.ttk
import tkinter.simpledialog
import tkinter.messagebox
import tkinter.filedialog
class SeqLibApplyDialog(tkinter.simpledialog.Dialog):
"""
Confirmation dialog box for applying FASTQ filtering
options to selected SeqLibs from the Treeview.
"""
def __init__(self, parent_window, tree, source_id,
title="Confirm Filtering Changes"):
self.tree = tree
self.source_id = source_id
sel = self.tree.treeview.selection()
tree_elem = self.tree.get_element(self.source_id)
self.target_ids = [
x for x in sel
if x != source_id
and isinstance(tree_elem, self.tree.get_element(x))
]
tkinter.simpledialog.Dialog.__init__(self, parent_window, title)
def body(self, master):
"""
Generates the required text listing all SeqLibs that
will have their FASTQ options updated.
Displays the "OK" and "Cancel" buttons.
"""
if len(self.target_ids) == 0:
message_string = "No elegible SeqLibs selected."
elif len(self.target_ids) == 1:
message_string = \
'Apply FASTQ filtering options from "{}" to "{}"?' \
''.format(
self.tree.get_element(self.source_id).name,
self.tree.get_element(self.target_ids[0]).name
)
else:
bullet = " " + u"\u25C6"
message_string = \
'Apply FASTQ filtering options from "{}"" to the following?' \
'\n'.format(self.tree.get_element(self.source_id).name)
for x in self.target_ids:
message_string += u"{bullet} {name}\n".format(
bullet=bullet, name=self.tree.get_element(x).name)
message = tkinter.ttk.Label(
master, text=message_string, justify="left")
message.grid(row=0, sticky="w")
def buttonbox(self):
"""
Display only one button if there's no selection. Otherwise,
use the default method to display two buttons.
"""
if len(self.target_ids) == 0:
box = tk.Frame(self)
w = tk.Button(
box, text="OK", width=10,
command=self.cancel, default="active")
w.pack(side="left", padx=5, pady=5)
self.bind("<Return>", self.cancel)
box.pack()
else:
tkinter.simpledialog.Dialog.buttonbox(self)
def apply(self):
"""
Called when the user chooses "OK". Performs the FASTQ
filtering update.
"""
filter_cfg = self.tree.get_element(self.source_id).serialize_filters()
for x in self.target_ids:
self.tree.get_element(x).filters = filter_cfg
self.tree.refresh_treeview()
| gpl-3.0 | 2,738,175,926,527,774,000 | 36.197917 | 78 | 0.596752 | false |
shinexwang/Classy | Testing/TestMain/profParser.py | 1 | 4044 | """
Copyright 2013 Shine Wang
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import urllib
from webParser import CustomHTMLParser
class RateMyProfParser:
"""We will retrieve the attributes of the instructor"""
# call str.format(last name, school id, pageNum)
# OKAY, this search relies on the assumption that
# the last name is unique enough to narrow profs down to ~10
# which is the limit for a page
requestURL = "http://www.ratemyprofessors.com/SelectTeacher.jsp" \
"?searchName={}&search_submit1=Search&sid={}&pageNo={}"
cacheFile = "teacherCache.txt"
cache = {}
gotCache = False
def __init__(self, name, schoolID="1490"): # professor's name
# 1490 = Waterloo
# TODO: look up some other schools' IDs?
self.name = name.strip()
self.schoolID = schoolID
self.webData = []
def getCache(self):
"""get values of teacher cache"""
if self.gotCache:
# we only want to read the file once
return
self.gotCache = True
try:
# file data stored in standard "key\nvalue\n" format
with open(self.cacheFile, "r") as f:
name = f.readline().strip()
while name:
self.cache[name] = eval(f.readline().strip())
name = f.readline().strip()
except:
return
def getInfo(self):
"""will return (avgRating, numRatings) if exists.
Else, return None"""
# get cache names (if they exist)
self.getCache()
if self.name in self.cache:
return self.cache[self.name]
if self.name == "":
# lecture/tutorial has no name
return
# start at page 1
pageNum = 1
while pageNum <= 3: # if there are 60 Wang's, for example, tough
# two possible errors (page out of range, or website down)
err = self.getWebData(pageNum)
if err:
return
ret = self.parseWebData()
if ret:
# form of: (# ratings, overall quality, easiness)
with open(self.cacheFile, "a") as f:
f.write(self.name + "\n")
f.write(str(ret) + "\n")
return ret
else:
self.webData = [] # clear the data
pageNum += 1
def getWebData(self, pageNum):
"""fetching data from the webpage"""
try:
URL = self.requestURL.format(self.name.split()[1],
self.schoolID, str(pageNum))
page = urllib.urlopen(URL)
parser = CustomHTMLParser(self.webData)
parser.feed(page.read().replace(" ", " "))
for data in self.webData:
if "Invalid page number" in data or \
"didn't return any results for professors" in data:
# essentially, page out of range
return "InvalidPageError"
except:
return "WebPageError"
def parseWebData(self):
"""parsing the webData list to get attrs"""
"""if we have the desirable attributes"""
firstName, lastName = self.name.split()
for i, data in enumerate(self.webData):
if firstName in data and lastName in data:
# we found it!
return (int(self.webData[i+4]), float(self.webData[i+6]),
float(self.webData[i+8]))
| apache-2.0 | 4,193,864,331,926,947,300 | 34.165217 | 75 | 0.566024 | false |
luhanhan/horizon | openstack_dashboard/test/test_data/heat_data.py | 50 | 11072 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heatclient.v1 import resource_types
from heatclient.v1 import services
from heatclient.v1 import stacks
from openstack_dashboard.test.test_data import utils
# A slightly hacked up copy of a sample cloudformation template for testing.
TEMPLATE = """
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "AWS CloudFormation Sample Template.",
"Parameters": {
"KeyName": {
"Description": "Name of an EC2 Key Pair to enable SSH access to the instances",
"Type": "String"
},
"InstanceType": {
"Description": "WebServer EC2 instance type",
"Type": "String",
"Default": "m1.small",
"AllowedValues": [
"m1.tiny",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge"
],
"ConstraintDescription": "must be a valid EC2 instance type."
},
"DBName": {
"Default": "wordpress",
"Description": "The WordPress database name",
"Type": "String",
"MinLength": "1",
"MaxLength": "64",
"AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*",
"ConstraintDescription": "must begin with a letter and..."
},
"DBUsername": {
"Default": "admin",
"NoEcho": "true",
"Description": "The WordPress database admin account username",
"Type": "String",
"MinLength": "1",
"MaxLength": "16",
"AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*",
"ConstraintDescription": "must begin with a letter and..."
},
"DBPassword": {
"Default": "admin",
"NoEcho": "true",
"Description": "The WordPress database admin account password",
"Type": "String",
"MinLength": "1",
"MaxLength": "41",
"AllowedPattern": "[a-zA-Z0-9]*",
"ConstraintDescription": "must contain only alphanumeric characters."
},
"DBRootPassword": {
"Default": "admin",
"NoEcho": "true",
"Description": "Root password for MySQL",
"Type": "String",
"MinLength": "1",
"MaxLength": "41",
"AllowedPattern": "[a-zA-Z0-9]*",
"ConstraintDescription": "must contain only alphanumeric characters."
},
"LinuxDistribution": {
"Default": "F17",
"Description": "Distribution of choice",
"Type": "String",
"AllowedValues": [
"F18",
"F17",
"U10",
"RHEL-6.1",
"RHEL-6.2",
"RHEL-6.3"
]
},
"Network": {
"Type": "String",
"CustomConstraint": "neutron.network"
}
},
"Mappings": {
"AWSInstanceType2Arch": {
"m1.tiny": {
"Arch": "32"
},
"m1.small": {
"Arch": "64"
},
"m1.medium": {
"Arch": "64"
},
"m1.large": {
"Arch": "64"
},
"m1.xlarge": {
"Arch": "64"
}
},
"DistroArch2AMI": {
"F18": {
"32": "F18-i386-cfntools",
"64": "F18-x86_64-cfntools"
},
"F17": {
"32": "F17-i386-cfntools",
"64": "F17-x86_64-cfntools"
},
"U10": {
"32": "U10-i386-cfntools",
"64": "U10-x86_64-cfntools"
},
"RHEL-6.1": {
"32": "rhel61-i386-cfntools",
"64": "rhel61-x86_64-cfntools"
},
"RHEL-6.2": {
"32": "rhel62-i386-cfntools",
"64": "rhel62-x86_64-cfntools"
},
"RHEL-6.3": {
"32": "rhel63-i386-cfntools",
"64": "rhel63-x86_64-cfntools"
}
}
},
"Resources": {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Metadata": {
"AWS::CloudFormation::Init": {
"config": {
"packages": {
"yum": {
"mysql": [],
"mysql-server": [],
"httpd": [],
"wordpress": []
}
},
"services": {
"systemd": {
"mysqld": {
"enabled": "true",
"ensureRunning": "true"
},
"httpd": {
"enabled": "true",
"ensureRunning": "true"
}
}
}
}
}
},
"Properties": {
"ImageId": {
"Fn::FindInMap": [
"DistroArch2AMI",
{
"Ref": "LinuxDistribution"
},
{
"Fn::FindInMap": [
"AWSInstanceType2Arch",
{
"Ref": "InstanceType"
},
"Arch"
]
}
]
},
"InstanceType": {
"Ref": "InstanceType"
},
"KeyName": {
"Ref": "KeyName"
},
"UserData": {
"Fn::Base64": {
"Fn::Join": [
"",
[
"#!/bin/bash -v\n",
"/opt/aws/bin/cfn-init\n"
]
]
}
}
}
}
},
"Outputs": {
"WebsiteURL": {
"Value": {
"Fn::Join": [
"",
[
"http://",
{
"Fn::GetAtt": [
"WikiDatabase",
"PublicIp"
]
},
"/wordpress"
]
]
},
"Description": "URL for Wordpress wiki"
}
}
}
"""
VALIDATE = """
{
"Description": "AWS CloudFormation Sample Template.",
"Parameters": {
"DBUsername": {
"Type": "String",
"Description": "The WordPress database admin account username",
"Default": "admin",
"MinLength": "1",
"AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*",
"NoEcho": "true",
"MaxLength": "16",
"ConstraintDescription": "must begin with a letter and..."
},
"LinuxDistribution": {
"Default": "F17",
"Type": "String",
"Description": "Distribution of choice",
"AllowedValues": [
"F18",
"F17",
"U10",
"RHEL-6.1",
"RHEL-6.2",
"RHEL-6.3"
]
},
"DBRootPassword": {
"Type": "String",
"Description": "Root password for MySQL",
"Default": "admin",
"MinLength": "1",
"AllowedPattern": "[a-zA-Z0-9]*",
"NoEcho": "true",
"MaxLength": "41",
"ConstraintDescription": "must contain only alphanumeric characters."
},
"KeyName": {
"Type": "String",
"Description": "Name of an EC2 Key Pair to enable SSH access to the instances"
},
"DBName": {
"Type": "String",
"Description": "The WordPress database name",
"Default": "wordpress",
"MinLength": "1",
"AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*",
"MaxLength": "64",
"ConstraintDescription": "must begin with a letter and..."
},
"DBPassword": {
"Type": "String",
"Description": "The WordPress database admin account password",
"Default": "admin",
"MinLength": "1",
"AllowedPattern": "[a-zA-Z0-9]*",
"NoEcho": "true",
"MaxLength": "41",
"ConstraintDescription": "must contain only alphanumeric characters."
},
"InstanceType": {
"Default": "m1.small",
"Type": "String",
"ConstraintDescription": "must be a valid EC2 instance type.",
"Description": "WebServer EC2 instance type",
"AllowedValues": [
"m1.tiny",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge"
]
},
"Network": {
"Type": "String",
"CustomConstraint": "neutron.network"
}
}
}
"""
ENVIRONMENT = """
parameters:
InstanceType: m1.xsmall
db_password: verybadpass
KeyName: heat_key
"""
class Environment(object):
def __init__(self, data):
self.data = data
class Template(object):
def __init__(self, data, validate):
self.data = data
self.validate = validate
def data(TEST):
TEST.stacks = utils.TestDataContainer()
TEST.stack_templates = utils.TestDataContainer()
TEST.stack_environments = utils.TestDataContainer()
TEST.resource_types = utils.TestDataContainer()
TEST.heat_services = utils.TestDataContainer()
# Services
service_1 = services.Service(services.ServiceManager(None), {
"status": "up",
"binary": "heat-engine",
"report_interval": 60,
"engine_id": "2f7b5a9b-c50b-4b01-8248-f89f5fb338d1",
"created_at": "2015-02-06T03:23:32.000000",
"hostname": "mrkanag",
"updated_at": "2015-02-20T09:49:52.000000",
"topic": "engine",
"host": "engine-1",
"deleted_at": None,
"id": "1efd7015-5016-4caa-b5c8-12438af7b100"
})
service_2 = services.Service(services.ServiceManager(None), {
"status": "up",
"binary": "heat-engine",
"report_interval": 60,
"engine_id": "2f7b5a9b-c50b-4b01-8248-f89f5fb338d2",
"created_at": "2015-02-06T03:23:32.000000",
"hostname": "mrkanag",
"updated_at": "2015-02-20T09:49:52.000000",
"topic": "engine",
"host": "engine-2",
"deleted_at": None,
"id": "1efd7015-5016-4caa-b5c8-12438af7b100"
})
TEST.heat_services.add(service_1)
TEST.heat_services.add(service_2)
# Data return by heatclient.
TEST.api_resource_types = utils.TestDataContainer()
for i in range(10):
stack_data = {
"description": "No description",
"links": [{
"href": "http://192.168.1.70:8004/v1/"
"051c727ee67040d6a7b7812708485a97/"
"stacks/stack-1211-38/"
"05b4f39f-ea96-4d91-910c-e758c078a089",
"rel": "self"
}],
"parameters": {
'DBUsername': '******',
'InstanceType': 'm1.small',
'AWS::StackId': (
'arn:openstack:heat::2ce287:stacks/teststack/88553ec'),
'DBRootPassword': '******',
'AWS::StackName': "teststack{0}".format(i),
'DBPassword': '******',
'AWS::Region': 'ap-southeast-1',
'DBName': u'wordpress'
},
"stack_status_reason": "Stack successfully created",
"stack_name": "stack-test{0}".format(i),
"creation_time": "2013-04-22T00:11:39Z",
"updated_time": "2013-04-22T00:11:39Z",
"stack_status": "CREATE_COMPLETE",
"id": "05b4f39f-ea96-4d91-910c-e758c078a089{0}".format(i)
}
stack = stacks.Stack(stacks.StackManager(None), stack_data)
TEST.stacks.add(stack)
TEST.stack_templates.add(Template(TEMPLATE, VALIDATE))
TEST.stack_environments.add(Environment(ENVIRONMENT))
# Resource types list
r_type_1 = {
"resource_type": "AWS::CloudFormation::Stack",
"attributes": {},
"properties": {
"Parameters": {
"description":
"The set of parameters passed to this nested stack.",
"immutable": False,
"required": False,
"type": "map",
"update_allowed": True},
"TemplateURL": {
"description": "The URL of a template that specifies"
" the stack to be created as a resource.",
"immutable": False,
"required": True,
"type": "string",
"update_allowed": True},
"TimeoutInMinutes": {
"description": "The length of time, in minutes,"
" to wait for the nested stack creation.",
"immutable": False,
"required": False,
"type": "number",
"update_allowed": True}
}
}
r_type_2 = {
"resource_type": "OS::Heat::CloudConfig",
"attributes": {
"config": {
"description": "The config value of the software config."}
},
"properties": {
"cloud_config": {
"description": "Map representing the cloud-config data"
" structure which will be formatted as YAML.",
"immutable": False,
"required": False,
"type": "map",
"update_allowed": False}
}
}
r_types_list = [r_type_1, r_type_2]
for rt in r_types_list:
r_type = resource_types.ResourceType(
resource_types.ResourceTypeManager(None), rt['resource_type'])
TEST.resource_types.add(r_type)
TEST.api_resource_types.add(rt)
| apache-2.0 | 8,469,637,906,561,849,000 | 22.913607 | 79 | 0.585621 | false |
detiber/lib_openshift | test/test_wrapper.py | 2 | 3097 | # coding: utf-8
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import pyfakefs.fake_filesystem_unittest as fake_fs_unittest
import json
from nose.tools import assert_raises, assert_is_instance
from lib_openshift import Wrapper,WrapperException
class TestWrapper(fake_fs_unittest.TestCase):
""" Wrapper unit test stubs """
def setUp(self):
self.setUpPyfakefs()
def tearDown(self):
pass
def test_wrapper_no_args_kubeconfig(self):
kubeconfig = """
apiVersion: v1
clusters:
- cluster:
server: http://localhost:8080
name: local-server
contexts:
- context:
cluster: local-server
namespace: the-right-prefix
user: myself
name: default-context
current-context: default-context
kind: Config
preferences: {}
users:
- name: myself
user:
password: secret
username: admin"""
self.fs.CreateFile(os.path.expanduser('~/.kube/config'), contents=kubeconfig)
self.assertTrue(os.path.exists(os.path.expanduser('~/.kube/config')))
#wrapper = Wrapper()
#TODO: finish this test
def test_wrapper_no_args_no_kubeconfig(self):
self.assertFalse(os.path.exists(os.path.expanduser('~/.kube/config')))
assert_raises(WrapperException, Wrapper)
try:
Wrapper()
except WrapperException as e:
assert_is_instance(json.loads(str(e)), dict)
def test_wrapper_invalid_auth_args(self):
self.assertFalse(os.path.exists(os.path.expanduser('~/.kube/config')))
assert_raises(WrapperException, Wrapper, username="blue")
assert_raises(WrapperException, Wrapper, password="green")
assert_raises(WrapperException, Wrapper, client_cert="here")
assert_raises(WrapperException, Wrapper, client_key="there")
assert_raises(WrapperException, Wrapper, username="blue", token="orange")
assert_raises(WrapperException, Wrapper, password="green", token="orange")
assert_raises(WrapperException, Wrapper, username="green", client_cert="here")
assert_raises(WrapperException, Wrapper, username="green", client_key="here")
assert_raises(WrapperException, Wrapper, password="green", client_cert="here")
assert_raises(WrapperException, Wrapper, password="green", client_key="here")
assert_raises(WrapperException, Wrapper, token="green", client_cert="here")
assert_raises(WrapperException, Wrapper, token="green", client_key="here")
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 3,484,075,424,574,969,000 | 33.411111 | 86 | 0.694866 | false |
cybertk/depot_tools | third_party/pylint/checkers/classes.py | 19 | 23939 | # Copyright (c) 2003-2011 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""classes checker for Python code
"""
from __future__ import generators
from logilab import astng
from logilab.astng import YES, Instance, are_exclusive
from pylint.interfaces import IASTNGChecker
from pylint.checkers import BaseChecker
from pylint.checkers.utils import PYMETHODS, overrides_a_method, check_messages
def class_is_abstract(node):
"""return true if the given class node should be considered as an abstract
class
"""
for method in node.methods():
if method.parent.frame() is node:
if method.is_abstract(pass_is_abstract=False):
return True
return False
MSGS = {
'F0202': ('Unable to check methods signature (%s / %s)',
'Used when PyLint has been unable to check methods signature \
compatibility for an unexpected reason. Please report this kind \
if you don\'t make sense of it.'),
'E0202': ('An attribute affected in %s line %s hide this method',
'Used when a class defines a method which is hidden by an '
'instance attribute from an ancestor class or set by some '
'client code.'),
'E0203': ('Access to member %r before its definition line %s',
'Used when an instance member is accessed before it\'s actually\
assigned.'),
'W0201': ('Attribute %r defined outside __init__',
'Used when an instance attribute is defined outside the __init__\
method.'),
'W0212': ('Access to a protected member %s of a client class', # E0214
'Used when a protected member (i.e. class member with a name \
beginning with an underscore) is access outside the class or a \
descendant of the class where it\'s defined.'),
'E0211': ('Method has no argument',
'Used when a method which should have the bound instance as \
first argument has no argument defined.'),
'E0213': ('Method should have "self" as first argument',
'Used when a method has an attribute different the "self" as\
first argument. This is considered as an error since this is\
a so common convention that you shouldn\'t break it!'),
'C0202': ('Class method should have %s as first argument', # E0212
'Used when a class method has an attribute different than "cls"\
as first argument, to easily differentiate them from regular \
instance methods.'),
'C0203': ('Metaclass method should have "mcs" as first argument', # E0214
'Used when a metaclass method has an attribute different the \
"mcs" as first argument.'),
'W0211': ('Static method with %r as first argument',
'Used when a static method has "self" or "cls" as first argument.'
),
'R0201': ('Method could be a function',
'Used when a method doesn\'t use its bound instance, and so could\
be written as a function.'
),
'E0221': ('Interface resolved to %s is not a class',
'Used when a class claims to implement an interface which is not \
a class.'),
'E0222': ('Missing method %r from %s interface',
'Used when a method declared in an interface is missing from a \
class implementing this interface'),
'W0221': ('Arguments number differs from %s method',
'Used when a method has a different number of arguments than in \
the implemented interface or in an overridden method.'),
'W0222': ('Signature differs from %s method',
'Used when a method signature is different than in the \
implemented interface or in an overridden method.'),
'W0223': ('Method %r is abstract in class %r but is not overridden',
'Used when an abstract method (i.e. raise NotImplementedError) is \
not overridden in concrete class.'
),
'F0220': ('failed to resolve interfaces implemented by %s (%s)', # W0224
'Used when a PyLint as failed to find interfaces implemented by \
a class'),
'W0231': ('__init__ method from base class %r is not called',
'Used when an ancestor class method has an __init__ method \
which is not called by a derived class.'),
'W0232': ('Class has no __init__ method',
'Used when a class has no __init__ method, neither its parent \
classes.'),
'W0233': ('__init__ method from a non direct base class %r is called',
'Used when an __init__ method is called on a class which is not \
in the direct ancestors for the analysed class.'),
}
class ClassChecker(BaseChecker):
"""checks for :
* methods without self as first argument
* overridden methods signature
* access only to existent members via self
* attributes not defined in the __init__ method
* supported interfaces implementation
* unreachable code
"""
__implements__ = (IASTNGChecker,)
# configuration section name
name = 'classes'
# messages
msgs = MSGS
priority = -2
# configuration options
options = (('ignore-iface-methods',
{'default' : (#zope interface
'isImplementedBy', 'deferred', 'extends', 'names',
'namesAndDescriptions', 'queryDescriptionFor', 'getBases',
'getDescriptionFor', 'getDoc', 'getName', 'getTaggedValue',
'getTaggedValueTags', 'isEqualOrExtendedBy', 'setTaggedValue',
'isImplementedByInstancesOf',
# twisted
'adaptWith',
# logilab.common interface
'is_implemented_by'),
'type' : 'csv',
'metavar' : '<method names>',
'help' : 'List of interface methods to ignore, \
separated by a comma. This is used for instance to not check methods defines \
in Zope\'s Interface base class.'}
),
('defining-attr-methods',
{'default' : ('__init__', '__new__', 'setUp'),
'type' : 'csv',
'metavar' : '<method names>',
'help' : 'List of method names used to declare (i.e. assign) \
instance attributes.'}
),
('valid-classmethod-first-arg',
{'default' : ('cls',),
'type' : 'csv',
'metavar' : '<argument names>',
'help' : 'List of valid names for the first argument in \
a class method.'}
),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self._accessed = []
self._first_attrs = []
self._meth_could_be_func = None
def visit_class(self, node):
"""init visit variable _accessed and check interfaces
"""
self._accessed.append({})
self._check_bases_classes(node)
self._check_interfaces(node)
# if not an interface, exception, metaclass
if node.type == 'class':
try:
node.local_attr('__init__')
except astng.NotFoundError:
self.add_message('W0232', args=node, node=node)
@check_messages('E0203', 'W0201')
def leave_class(self, cnode):
"""close a class node:
check that instance attributes are defined in __init__ and check
access to existent members
"""
# check access to existent members on non metaclass classes
accessed = self._accessed.pop()
if cnode.type != 'metaclass':
self._check_accessed_members(cnode, accessed)
# checks attributes are defined in an allowed method such as __init__
if 'W0201' not in self.active_msgs:
return
defining_methods = self.config.defining_attr_methods
for attr, nodes in cnode.instance_attrs.items():
nodes = [n for n in nodes if not
isinstance(n.statement(), (astng.Delete, astng.AugAssign))]
if not nodes:
continue # error detected by typechecking
attr_defined = False
# check if any method attr is defined in is a defining method
for node in nodes:
if node.frame().name in defining_methods:
attr_defined = True
if not attr_defined:
# check attribute is defined in a parent's __init__
for parent in cnode.instance_attr_ancestors(attr):
attr_defined = False
# check if any parent method attr is defined in is a defining method
for node in parent.instance_attrs[attr]:
if node.frame().name in defining_methods:
attr_defined = True
if attr_defined:
# we're done :)
break
else:
# check attribute is defined as a class attribute
try:
cnode.local_attr(attr)
except astng.NotFoundError:
self.add_message('W0201', args=attr, node=node)
def visit_function(self, node):
"""check method arguments, overriding"""
# ignore actual functions
if not node.is_method():
return
klass = node.parent.frame()
self._meth_could_be_func = True
# check first argument is self if this is actually a method
self._check_first_arg_for_type(node, klass.type == 'metaclass')
if node.name == '__init__':
self._check_init(node)
return
# check signature if the method overloads inherited method
for overridden in klass.local_attr_ancestors(node.name):
# get astng for the searched method
try:
meth_node = overridden[node.name]
except KeyError:
# we have found the method but it's not in the local
# dictionary.
# This may happen with astng build from living objects
continue
if not isinstance(meth_node, astng.Function):
continue
self._check_signature(node, meth_node, 'overridden')
break
# check if the method overload an attribute
try:
overridden = klass.instance_attr(node.name)[0] # XXX
args = (overridden.root().name, overridden.fromlineno)
self.add_message('E0202', args=args, node=node)
except astng.NotFoundError:
pass
def leave_function(self, node):
"""on method node, check if this method couldn't be a function
ignore class, static and abstract methods, initializer,
methods overridden from a parent class and any
kind of method defined in an interface for this warning
"""
if node.is_method():
if node.args.args is not None:
self._first_attrs.pop()
if 'R0201' not in self.active_msgs:
return
class_node = node.parent.frame()
if (self._meth_could_be_func and node.type == 'method'
and not node.name in PYMETHODS
and not (node.is_abstract() or
overrides_a_method(class_node, node.name))
and class_node.type != 'interface'):
self.add_message('R0201', node=node)
def visit_getattr(self, node):
"""check if the getattr is an access to a class member
if so, register it. Also check for access to protected
class member from outside its class (but ignore __special__
methods)
"""
attrname = node.attrname
if self._first_attrs and isinstance(node.expr, astng.Name) and \
node.expr.name == self._first_attrs[-1]:
self._accessed[-1].setdefault(attrname, []).append(node)
return
if 'W0212' not in self.active_msgs:
return
if attrname[0] == '_' and not attrname == '_' and not (
attrname.startswith('__') and attrname.endswith('__')):
# XXX move this in a reusable function
klass = node.frame()
while klass is not None and not isinstance(klass, astng.Class):
if klass.parent is None:
klass = None
else:
klass = klass.parent.frame()
# XXX infer to be more safe and less dirty ??
# in classes, check we are not getting a parent method
# through the class object or through super
callee = node.expr.as_string()
if klass is None or not (callee == klass.name or
callee in klass.basenames
or (isinstance(node.expr, astng.CallFunc)
and isinstance(node.expr.func, astng.Name)
and node.expr.func.name == 'super')):
self.add_message('W0212', node=node, args=attrname)
def visit_name(self, node):
"""check if the name handle an access to a class member
if so, register it
"""
if self._first_attrs and (node.name == self._first_attrs[-1] or
not self._first_attrs[-1]):
self._meth_could_be_func = False
def _check_accessed_members(self, node, accessed):
"""check that accessed members are defined"""
# XXX refactor, probably much simpler now that E0201 is in type checker
for attr, nodes in accessed.items():
# deactivate "except doesn't do anything", that's expected
# pylint: disable=W0704
# is it a class attribute ?
try:
node.local_attr(attr)
# yes, stop here
continue
except astng.NotFoundError:
pass
# is it an instance attribute of a parent class ?
try:
node.instance_attr_ancestors(attr).next()
# yes, stop here
continue
except StopIteration:
pass
# is it an instance attribute ?
try:
defstmts = node.instance_attr(attr)
except astng.NotFoundError:
pass
else:
if len(defstmts) == 1:
defstmt = defstmts[0]
# check that if the node is accessed in the same method as
# it's defined, it's accessed after the initial assignment
frame = defstmt.frame()
lno = defstmt.fromlineno
for _node in nodes:
if _node.frame() is frame and _node.fromlineno < lno \
and not are_exclusive(_node.statement(), defstmt, ('AttributeError', 'Exception', 'BaseException')):
self.add_message('E0203', node=_node,
args=(attr, lno))
def _check_first_arg_for_type(self, node, metaclass=0):
"""check the name of first argument, expect:
* 'self' for a regular method
* 'cls' for a class method
* 'mcs' for a metaclass
* not one of the above for a static method
"""
# don't care about functions with unknown argument (builtins)
if node.args.args is None:
return
first_arg = node.args.args and node.argnames()[0]
self._first_attrs.append(first_arg)
first = self._first_attrs[-1]
# static method
if node.type == 'staticmethod':
if first_arg in ('self', 'cls', 'mcs'):
self.add_message('W0211', args=first, node=node)
self._first_attrs[-1] = None
# class / regular method with no args
elif not node.args.args:
self.add_message('E0211', node=node)
# metaclass method
elif metaclass:
if first != 'mcs':
self.add_message('C0203', node=node)
# class method
elif node.type == 'classmethod':
if first not in self.config.valid_classmethod_first_arg:
if len(self.config.valid_classmethod_first_arg) == 1:
valid = repr(self.config.valid_classmethod_first_arg[0])
else:
valid = ', '.join(
repr(v)
for v in self.config.valid_classmethod_first_arg[:-1])
valid = '%s or %r' % (
valid, self.config.valid_classmethod_first_arg[-1])
self.add_message('C0202', args=valid, node=node)
# regular method without self as argument
elif first != 'self':
self.add_message('E0213', node=node)
def _check_bases_classes(self, node):
"""check that the given class node implements abstract methods from
base classes
"""
# check if this class abstract
if class_is_abstract(node):
return
for method in node.methods():
owner = method.parent.frame()
if owner is node:
continue
# owner is not this class, it must be a parent class
# check that the ancestor's method is not abstract
if method.is_abstract(pass_is_abstract=False):
self.add_message('W0223', node=node,
args=(method.name, owner.name))
def _check_interfaces(self, node):
"""check that the given class node really implements declared
interfaces
"""
e0221_hack = [False]
def iface_handler(obj):
"""filter interface objects, it should be classes"""
if not isinstance(obj, astng.Class):
e0221_hack[0] = True
self.add_message('E0221', node=node,
args=(obj.as_string(),))
return False
return True
ignore_iface_methods = self.config.ignore_iface_methods
try:
for iface in node.interfaces(handler_func=iface_handler):
for imethod in iface.methods():
name = imethod.name
if name.startswith('_') or name in ignore_iface_methods:
# don't check method beginning with an underscore,
# usually belonging to the interface implementation
continue
# get class method astng
try:
method = node_method(node, name)
except astng.NotFoundError:
self.add_message('E0222', args=(name, iface.name),
node=node)
continue
# ignore inherited methods
if method.parent.frame() is not node:
continue
# check signature
self._check_signature(method, imethod,
'%s interface' % iface.name)
except astng.InferenceError:
if e0221_hack[0]:
return
implements = Instance(node).getattr('__implements__')[0]
assignment = implements.parent
assert isinstance(assignment, astng.Assign)
# assignment.expr can be a Name or a Tuple or whatever.
# Use as_string() for the message
# FIXME: in case of multiple interfaces, find which one could not
# be resolved
self.add_message('F0220', node=implements,
args=(node.name, assignment.value.as_string()))
def _check_init(self, node):
"""check that the __init__ method call super or ancestors'__init__
method
"""
if not set(('W0231', 'W0233')) & self.active_msgs:
return
klass_node = node.parent.frame()
to_call = _ancestors_to_call(klass_node)
not_called_yet = dict(to_call)
for stmt in node.nodes_of_class(astng.CallFunc):
expr = stmt.func
if not isinstance(expr, astng.Getattr) \
or expr.attrname != '__init__':
continue
# skip the test if using super
if isinstance(expr.expr, astng.CallFunc) and \
isinstance(expr.expr.func, astng.Name) and \
expr.expr.func.name == 'super':
return
try:
klass = expr.expr.infer().next()
if klass is YES:
continue
try:
del not_called_yet[klass]
except KeyError:
if klass not in to_call:
self.add_message('W0233', node=expr, args=klass.name)
except astng.InferenceError:
continue
for klass in not_called_yet.keys():
if klass.name == 'object':
continue
self.add_message('W0231', args=klass.name, node=node)
def _check_signature(self, method1, refmethod, class_type):
"""check that the signature of the two given methods match
class_type is in 'class', 'interface'
"""
if not (isinstance(method1, astng.Function)
and isinstance(refmethod, astng.Function)):
self.add_message('F0202', args=(method1, refmethod), node=method1)
return
# don't care about functions with unknown argument (builtins)
if method1.args.args is None or refmethod.args.args is None:
return
# if we use *args, **kwargs, skip the below checks
if method1.args.vararg or method1.args.kwarg:
return
if len(method1.args.args) != len(refmethod.args.args):
self.add_message('W0221', args=class_type, node=method1)
elif len(method1.args.defaults) < len(refmethod.args.defaults):
self.add_message('W0222', args=class_type, node=method1)
def _ancestors_to_call(klass_node, method='__init__'):
"""return a dictionary where keys are the list of base classes providing
the queried method, and so that should/may be called from the method node
"""
to_call = {}
for base_node in klass_node.ancestors(recurs=False):
try:
base_node.local_attr(method)
to_call[base_node] = 1
except astng.NotFoundError:
continue
return to_call
def node_method(node, method_name):
"""get astng for <method_name> on the given class node, ensuring it
is a Function node
"""
for n in node.local_attr(method_name):
if isinstance(n, astng.Function):
return n
raise astng.NotFoundError(method_name)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(ClassChecker(linter))
| bsd-3-clause | -1,408,638,957,608,805,400 | 42.289331 | 127 | 0.55867 | false |
Lukc/ospace-lukc | client-pygame/lib/osci/dialog/NewMessageDlg.py | 1 | 7294 | #
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of IGE - Outer Space.
#
# IGE - Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# IGE - Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IGE - Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import pygameui as ui
from osci import gdata, res, client
from ige.ospace.Const import *
import ige, string
class NewMessageDlg:
def __init__(self, app):
self.app = app
self.createUI()
def display(self, caller, objID, objType, forum, msgID = None):
self.caller = caller
self.msgSpec = gdata.mailboxSpec[objType, gdata.mailboxStripLang(forum)]
messages = client.get(objID)._messages
if self.msgSpec[1] == None:
# cannot reply
return
elif self.msgSpec[1] == "forum":
self.recipientObjID = [objID]
self.recipientForum = forum
if msgID != None:
self.topic = messages[msgID]["topic"]
else:
self.topic = ""
elif self.msgSpec[1] == "sender" and msgID != None:
message = messages[msgID]
self.recipientObjID = [message["senderID"]]
self.recipientForum = "INBOX"
self.topic = message["topic"]
if self.topic[:4] != "Re: ":
self.topic = "Re: %s" % self.topic
elif self.msgSpec[1]:
self.recipientObjID = []
self.recipientForum = "INBOX"
self.topic = ""
self.show()
self.win.show()
# register for updates
if self not in gdata.updateDlgs:
gdata.updateDlgs.append(self)
def hide(self):
self.win.setStatus(_("Ready."))
self.win.hide()
# unregister updates
if self in gdata.updateDlgs:
gdata.updateDlgs.remove(self)
def update(self):
pass
# self.show()
def show(self):
if len(self.recipientObjID) > 0:
text = ""
for objId in self.recipientObjID:
recipient = client.get(objId)
text = u"%s, %s" % (text, _("%s / %s") % (recipient.name, _(self.msgSpec[0])))
self.win.vRecipient.text = text[2:]
self.win.vRecipient.action = None
else:
self.win.vRecipient.text = _("[Click to select]")
self.win.vRecipient.action = "onSelectContact"
if self.topic:
self.win.vTopic.text = self.topic
self.win.vTopic.enabled = 0
else:
self.win.vTopic.text = ""
self.win.vTopic.enabled = 1
self.win.vText.text = [""]
def onSelectContact(self, widget, action, data):
player = client.getPlayer()
items = []
for contactID in player.diplomacyRels:
contact = client.get(contactID)
item = ui.Item(contact.name, tRecipientID = contactID)
items.append(item)
self.cwin.vContacts.items = items
self.cwin.vContacts.itemsChanged()
self.cwin.show()
def onContactCancel(self, widget, action, data):
self.cwin.hide()
def onContactSelected(self, widget, action, data):
self.recipientObjID = []
text = ""
for item in self.cwin.vContacts.selection:
self.recipientObjID.append(item.tRecipientID)
recipient = client.get(item.tRecipientID)
text = u"%s, %s" % (text, _("%s / %s") % (recipient.name, _(self.msgSpec[0])))
self.win.vRecipient.text = text[2:]
self.cwin.hide()
def onCancel(self, widget, action, data):
self.hide()
def onSend(self, widget, action, data):
if not self.recipientObjID:
self.win.setStatus(_("Select a recipient, please."))
return
if not self.win.vTopic.text:
self.win.setStatus(_("Specify a topic, please."))
return
if self.win.vText.text == [""]:
self.win.setStatus(_("Type a message, please."))
return
try:
self.win.setStatus(_("Executing SEND MESSAGE command..."))
message = {
"forum": self.recipientForum,
"topic": self.win.vTopic.text,
"text": string.join(self.win.vText.text, "\n"),
}
# send message to all recipients
for objID in self.recipientObjID:
client.cmdProxy.sendMsg(objID, message)
# put message into outbox if forum is INBOX
if self.recipientForum == "INBOX":
recipients = ""
for objID in self.recipientObjID:
recipient = client.get(objID)
recipients = u"%s, %s" % (recipients, recipient.name)
message = {
"forum": "OUTBOX",
"topic": "To %s - %s" % (
recipients[2:],
self.win.vTopic.text,
),
"text": _("To %s / %s:\n\n%s") % (
recipients[2:],
_(self.msgSpec[0]),
"\n".join(self.win.vText.text),
)
}
client.cmdProxy.sendMsg(client.getPlayerID(), message)
self.win.setStatus(_("Command has been executed."))
except ige.GameException, e:
self.win.setStatus(e.args[0])
return
client.getMessages()
self.hide()
self.caller.update()
def createUI(self):
w, h = gdata.scrnSize
width = 764 # 38 * 20 + 4
height = 464 # 23 * 20 + 4
self.win = ui.Window(self.app,
modal = 1,
escKeyClose = 1,
movable = 0,
title = _("New message"),
rect = ui.Rect((w - width) / 2, (h - height) / 2, width, height),
layoutManager = ui.SimpleGridLM(),
tabChange = True
)
self.win.subscribeAction('*', self)
# headers
ui.Label(self.win, layout = (0, 0, 5, 1), text = _("Recipient"), align = ui.ALIGN_W)
ui.ActiveLabel(self.win, layout = (5, 0, 33, 1), id = "vRecipient", align = ui.ALIGN_W)
ui.Label(self.win, layout = (0, 1, 5, 1), text = _("Subject"), align = ui.ALIGN_W)
ui.Entry(self.win, layout = (5, 1, 33, 1), id = "vTopic", align = ui.ALIGN_W, orderNo = 1)
ui.Title(self.win, layout = (0, 2, 38, 1), text = _("Message"),
font = "normal-bold", align = ui.ALIGN_W)
s = ui.Scrollbar(self.win, layout = (37, 3, 1, 18))
t = ui.Text(self.win, layout = (0, 3, 37, 18), id = "vText", orderNo = 2)
t.attachVScrollbar(s)
# info
ui.Title(self.win, layout = (0, 21, 28, 1), id = 'vStatusBar', align = ui.ALIGN_W)
ui.TitleButton(self.win, layout = (28, 21, 5, 1), text = _("Cancel"), action = 'onCancel')
ui.TitleButton(self.win, layout = (33, 21, 5, 1), text = _("Send"), action = 'onSend')
# status bar
#self.win.statusBar = self.win.vStatusBar
#
# contact window
#
width = 304 # 15 * 20 + 4
height = 264 # 13 * 20 + 4
self.cwin = ui.Window(self.app,
modal = 1,
escKeyClose = 1,
titleOnly = 0,
movable = 0,
title = _("Select recipient"),
rect = ui.Rect((w - width) / 2, (h - height) / 2, width, height),
layoutManager = ui.SimpleGridLM(),
)
self.cwin.subscribeAction('*', self)
# rename
ui.Listbox(self.cwin, layout = (0, 0, 15, 11), id = 'vContacts', columnLabels = 0,
columns = ((None, 'text', 0, ui.ALIGN_W),), multiselection = 1, sortedBy=('text', 1))
# status bar + submit/cancel
ui.TitleButton(self.cwin, layout = (10, 11, 5, 1), text = _("Select"), action = 'onContactSelected')
ui.TitleButton(self.cwin, layout = (5, 11, 5, 1), text = _("Cancel"), action = 'onContactCancel')
ui.Title(self.cwin, id = 'vStatusBar', layout = (0, 11, 5, 1), align = ui.ALIGN_W)
#self.cwin.statusBar = self.cwin.vStatusBar | gpl-2.0 | 9,155,487,491,296,367,000 | 31.86036 | 102 | 0.643543 | false |
thinkopensolutions/server-tools | module_prototyper/tests/test_prototype_module_export.py | 2 | 3093 | # -*- coding: utf-8 -*-
# #############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2010 - 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo.tests import common
import zipfile
import StringIO
class TestPrototypeModuleExport(common.TransactionCase):
def setUp(self):
super(TestPrototypeModuleExport, self).setUp()
self.main_model = self.env['module_prototyper.module.export']
self.prototype_model = self.env['module_prototyper']
self.module_category_model = self.env[
'ir.module.category'
]
self.prototype = self.prototype_model.create({
'name': 't_name',
'category_id': self.module_category_model.browse(1).id,
'human_name': 't_human_name',
'summary': 't_summary',
'description': 't_description',
'author': 't_author',
'maintainer': 't_maintainer',
'website': 't_website',
})
self.exporter = self.main_model.create({'name': 't_name'})
def test_action_export_assert_for_wrong_active_model(self):
"""Test if the assertion raises."""
exporter = self.main_model.with_context(
active_model='t_active_model'
).create({})
self.assertRaises(
AssertionError,
exporter.action_export,
[exporter.id],
)
def test_action_export_update_wizard(self):
"""Test if the wizard is updated during the process."""
exporter = self.main_model.with_context(
active_model=self.prototype_model._name,
active_id=self.prototype.id
).create({})
exporter.action_export(exporter.id)
self.assertEqual(exporter.state, 'get')
self.assertEqual(exporter.name, '%s.zip' % (self.prototype.name,))
def test_zip_files_returns_tuple(self):
"""Test the method return of the method that generate the zip file."""
ret = self.main_model.zip_files(self.exporter, [self.prototype])
self.assertIsInstance(ret, tuple)
self.assertIsInstance(
ret.zip_file, zipfile.ZipFile
)
self.assertIsInstance(
ret.stringIO, StringIO.StringIO
)
| agpl-3.0 | 8,163,227,980,017,495,000 | 37.6625 | 79 | 0.602651 | false |
datapythonista/pandas | pandas/tests/io/test_compression.py | 3 | 8199 | import io
import os
from pathlib import Path
import subprocess
import sys
import textwrap
import time
import pytest
import pandas as pd
import pandas._testing as tm
import pandas.io.common as icom
@pytest.mark.parametrize(
"obj",
[
pd.DataFrame(
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
columns=["X", "Y", "Z"],
),
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
],
)
@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"])
def test_compression_size(obj, method, compression_only):
with tm.ensure_clean() as path:
getattr(obj, method)(path, compression=compression_only)
compressed_size = os.path.getsize(path)
getattr(obj, method)(path, compression=None)
uncompressed_size = os.path.getsize(path)
assert uncompressed_size > compressed_size
@pytest.mark.parametrize(
"obj",
[
pd.DataFrame(
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
columns=["X", "Y", "Z"],
),
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
],
)
@pytest.mark.parametrize("method", ["to_csv", "to_json"])
def test_compression_size_fh(obj, method, compression_only):
with tm.ensure_clean() as path:
with icom.get_handle(path, "w", compression=compression_only) as handles:
getattr(obj, method)(handles.handle)
assert not handles.handle.closed
compressed_size = os.path.getsize(path)
with tm.ensure_clean() as path:
with icom.get_handle(path, "w", compression=None) as handles:
getattr(obj, method)(handles.handle)
assert not handles.handle.closed
uncompressed_size = os.path.getsize(path)
assert uncompressed_size > compressed_size
@pytest.mark.parametrize(
"write_method, write_kwargs, read_method",
[
("to_csv", {"index": False}, pd.read_csv),
("to_json", {}, pd.read_json),
("to_pickle", {}, pd.read_pickle),
],
)
def test_dataframe_compression_defaults_to_infer(
write_method, write_kwargs, read_method, compression_only
):
# GH22004
input = pd.DataFrame([[1.0, 0, -4], [3.4, 5, 2]], columns=["X", "Y", "Z"])
extension = icom._compression_to_extension[compression_only]
with tm.ensure_clean("compressed" + extension) as path:
getattr(input, write_method)(path, **write_kwargs)
output = read_method(path, compression=compression_only)
tm.assert_frame_equal(output, input)
@pytest.mark.parametrize(
"write_method,write_kwargs,read_method,read_kwargs",
[
("to_csv", {"index": False, "header": True}, pd.read_csv, {"squeeze": True}),
("to_json", {}, pd.read_json, {"typ": "series"}),
("to_pickle", {}, pd.read_pickle, {}),
],
)
def test_series_compression_defaults_to_infer(
write_method, write_kwargs, read_method, read_kwargs, compression_only
):
# GH22004
input = pd.Series([0, 5, -2, 10], name="X")
extension = icom._compression_to_extension[compression_only]
with tm.ensure_clean("compressed" + extension) as path:
getattr(input, write_method)(path, **write_kwargs)
output = read_method(path, compression=compression_only, **read_kwargs)
tm.assert_series_equal(output, input, check_names=False)
def test_compression_warning(compression_only):
# Assert that passing a file object to to_csv while explicitly specifying a
# compression protocol triggers a RuntimeWarning, as per GH21227.
df = pd.DataFrame(
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
columns=["X", "Y", "Z"],
)
with tm.ensure_clean() as path:
with icom.get_handle(path, "w", compression=compression_only) as handles:
with tm.assert_produces_warning(RuntimeWarning):
df.to_csv(handles.handle, compression=compression_only)
def test_compression_binary(compression_only):
"""
Binary file handles support compression.
GH22555
"""
df = tm.makeDataFrame()
# with a file
with tm.ensure_clean() as path:
with open(path, mode="wb") as file:
df.to_csv(file, mode="wb", compression=compression_only)
file.seek(0) # file shouldn't be closed
tm.assert_frame_equal(
df, pd.read_csv(path, index_col=0, compression=compression_only)
)
# with BytesIO
file = io.BytesIO()
df.to_csv(file, mode="wb", compression=compression_only)
file.seek(0) # file shouldn't be closed
tm.assert_frame_equal(
df, pd.read_csv(file, index_col=0, compression=compression_only)
)
def test_gzip_reproducibility_file_name():
"""
Gzip should create reproducible archives with mtime.
Note: Archives created with different filenames will still be different!
GH 28103
"""
df = tm.makeDataFrame()
compression_options = {"method": "gzip", "mtime": 1}
# test for filename
with tm.ensure_clean() as path:
path = Path(path)
df.to_csv(path, compression=compression_options)
time.sleep(2)
output = path.read_bytes()
df.to_csv(path, compression=compression_options)
assert output == path.read_bytes()
def test_gzip_reproducibility_file_object():
"""
Gzip should create reproducible archives with mtime.
GH 28103
"""
df = tm.makeDataFrame()
compression_options = {"method": "gzip", "mtime": 1}
# test for file object
buffer = io.BytesIO()
df.to_csv(buffer, compression=compression_options, mode="wb")
output = buffer.getvalue()
time.sleep(2)
buffer = io.BytesIO()
df.to_csv(buffer, compression=compression_options, mode="wb")
assert output == buffer.getvalue()
def test_with_missing_lzma():
"""Tests if import pandas works when lzma is not present."""
# https://github.com/pandas-dev/pandas/issues/27575
code = textwrap.dedent(
"""\
import sys
sys.modules['lzma'] = None
import pandas
"""
)
subprocess.check_output([sys.executable, "-c", code], stderr=subprocess.PIPE)
def test_with_missing_lzma_runtime():
"""Tests if RuntimeError is hit when calling lzma without
having the module available.
"""
code = textwrap.dedent(
"""
import sys
import pytest
sys.modules['lzma'] = None
import pandas as pd
df = pd.DataFrame()
with pytest.raises(RuntimeError, match='lzma module'):
df.to_csv('foo.csv', compression='xz')
"""
)
subprocess.check_output([sys.executable, "-c", code], stderr=subprocess.PIPE)
@pytest.mark.parametrize(
"obj",
[
pd.DataFrame(
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
columns=["X", "Y", "Z"],
),
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
],
)
@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"])
def test_gzip_compression_level(obj, method):
# GH33196
with tm.ensure_clean() as path:
getattr(obj, method)(path, compression="gzip")
compressed_size_default = os.path.getsize(path)
getattr(obj, method)(path, compression={"method": "gzip", "compresslevel": 1})
compressed_size_fast = os.path.getsize(path)
assert compressed_size_default < compressed_size_fast
@pytest.mark.parametrize(
"obj",
[
pd.DataFrame(
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
columns=["X", "Y", "Z"],
),
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
],
)
@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"])
def test_bzip_compression_level(obj, method):
"""GH33196 bzip needs file size > 100k to show a size difference between
compression levels, so here we just check if the call works when
compression is passed as a dict.
"""
with tm.ensure_clean() as path:
getattr(obj, method)(path, compression={"method": "bz2", "compresslevel": 1})
| bsd-3-clause | 6,395,101,653,837,909,000 | 31.796 | 86 | 0.620807 | false |
veblush/PyPhabricatorDb | sample/create-task-summary.py | 1 | 1927 | import sys
import codecs
import datetime
from pyphabricatordb import *
from sqlalchemy.orm import sessionmaker
def create_session():
DBSession = sessionmaker()
return DBSession()
def get_user(session, phid):
return session.query(user.User).filter(user.User.phid == phid).first()
def create_task_range_summary(session, file, start_date, end_date):
tasks = []
for t in session.query(maniphest.Task).filter(maniphest.Task.dateModified >= start_date).all():
t.status_last_modified_date = t.dateCreated
t.last_commit_date = None
for trx in t.transactions:
if trx.transactionType == u"status":
t.status_last_modified_date = max(t.status_last_modified_date, trx.dateModified)
if trx.transactionType == u"core:edge":
t.last_commit_date = trx.dateModified if t.last_commit_date is None else max(t.last_commit_date, trx.dateModified)
if t.status_last_modified_date >= start_date and t.status_last_modified_date < end_date:
tasks.append(t)
user_tasks = {}
for t in tasks:
owner = get_user(session, t.ownerPHID)
owner_name = owner.realName if owner else ""
user_tasks.setdefault(owner_name, [])
user_tasks[owner_name].append(t)
for user_name, tasks in user_tasks.iteritems():
print "[", user_name, "]"
for t in tasks:
if t.last_commit_date and t.last_commit_date >= start_date and t.last_commit_date < end_date:
print " ", t.id, t.title
def main():
db_url = sys.argv[1] if len(sys.argv) >= 2 else 'mysql://localhost'
file = codecs.open(sys.argv[2], "wb", "utf-8") if len(sys.argv) >= 3 else sys.stdout
connector.connect_all(db_url)
session = create_session()
create_task_range_summary(session, file, datetime.datetime(2014, 12, 1), datetime.datetime(2014, 12, 8))
if __name__ == "__main__":
main()
| mit | -207,738,207,165,663,500 | 38.326531 | 130 | 0.641412 | false |
wecatch/app-turbo | turbo/mongo_model.py | 1 | 10225 | # -*- coding:utf-8 -*-
from __future__ import (
absolute_import,
division,
print_function,
with_statement,
)
from collections import defaultdict
from datetime import datetime
import functools
import time
from bson.objectid import ObjectId
from turbo.log import model_log
from turbo.util import escape as _es, import_object
def _record(x):
return defaultdict(lambda: None, x)
def convert_to_record(func):
"""Wrap mongodb record to a dict record with default value None
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
result = func(self, *args, **kwargs)
if result is not None:
if isinstance(result, dict):
return _record(result)
return (_record(i) for i in result)
return result
return wrapper
class MixinModel(object):
@staticmethod
def utctimestamp(seconds=None):
if seconds:
return long(time.mktime(time.gmtime(seconds)))
else:
return long(time.mktime(time.gmtime()))
@staticmethod
def timestamp():
return long(time.time())
@staticmethod
def datetime(dt=None):
if dt:
return datetime.strptime(dt, '%Y-%m-%d %H:%M')
else:
return datetime.now()
@staticmethod
def utcdatetime(dt=None):
if dt:
return datetime.strptime(dt, '%Y-%m-%d %H:%M')
else:
return datetime.utcnow()
@classmethod
def to_one_str(cls, value, *args, **kwargs):
"""Convert single record's values to str
"""
if kwargs.get('wrapper'):
return cls._wrapper_to_one_str(value)
return _es.to_dict_str(value)
@classmethod
def to_str(cls, values, callback=None):
"""Convert many records's values to str
"""
if callback and callable(callback):
if isinstance(values, dict):
return callback(_es.to_str(values))
return [callback(_es.to_str(i)) for i in values]
return _es.to_str(values)
@staticmethod
@convert_to_record
def _wrapper_to_one_str(value):
return _es.to_dict_str(value)
@staticmethod
def default_encode(v):
return _es.default_encode(v)
@staticmethod
def json_encode(v):
return _es.json_encode(v)
@staticmethod
def json_decode(v):
return _es.json_decode(v)
@staticmethod
def to_objectid(objid):
return _es.to_objectid(objid)
@staticmethod
def create_objectid():
"""Create new objectid
"""
return ObjectId()
_instance = {}
@classmethod
def instance(cls, name):
"""Instantiate a model class according to import path
args:
name: class import path like `user.User`
return:
model instance
"""
if not cls._instance.get(name):
model_name = name.split('.')
ins_name = '.'.join(
['models', model_name[0], 'model', model_name[1]])
cls._instance[name] = cls.import_model(ins_name)()
return cls._instance[name]
@classmethod
def import_model(cls, ins_name):
"""Import model class in models package
"""
try:
package_space = getattr(cls, 'package_space')
except AttributeError:
raise ValueError('package_space not exist')
else:
return import_object(ins_name, package_space)
@staticmethod
def default_record():
"""Generate one default record that return empty str when key not exist
"""
return defaultdict(lambda: '')
def collection_method_call(turbo_connect_ins, name):
def outwrapper(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if name in turbo_connect_ins._write_operators:
turbo_connect_ins._model_ins.write_action_call(
name, *args, **kwargs)
if name in turbo_connect_ins._read_operators:
turbo_connect_ins._model_ins.read_action_call(
name, *args, **kwargs)
return func(*args, **kwargs)
return wrapper
return outwrapper
class MongoTurboConnect(object):
_write_operators = frozenset([
'insert',
'save',
'update',
'find_and_modify',
'bulk_write',
'insert_one',
'insert_many',
'replace_one',
'update_one',
'update_many',
'delete_one',
'delete_many',
'find_one_and_delete',
'find_one_and_replace',
'find_one_and_update',
'create_index',
'drop_index',
'create_indexes',
'drop_indexes',
'drop',
'remove',
'ensure_index',
'rename',
])
_read_operators = frozenset([
'find',
'find_one',
'count',
'index_information',
])
def __init__(self, model_ins, db_collect=None):
self._model_ins = model_ins
self._collect = db_collect
def __getattr__(self, name):
collection_method = getattr(self._collect, name)
if callable(collection_method):
return collection_method_call(self, name)(collection_method)
return collection_method
def __getitem__(self, name):
"""Sub-collection
"""
return self._collect[name]
class AbstractModel(MixinModel):
"""
name = None mongodb collection name
field = None collection record map
column = None need to query field
index = [
tuple([('uid', 1)])
] query index
"""
_operators = frozenset([
'$set',
'$unset',
'$rename',
'$currentDate',
'$inc',
'$max',
'$min',
'$mul',
'$setOnInsert',
'$addToSet',
'$pop',
'$pushAll',
'$push',
'$pull'])
PRIMARY_KEY_TYPE = ObjectId
def _init(self, db_name, _mongo_db_mapping):
if _mongo_db_mapping is None:
raise Exception("db mapping is invalid")
# databases
db = _mongo_db_mapping['db']
# databases file
db_file = _mongo_db_mapping['db_file']
# databse name
if db_name not in db or db.get(db_name, None) is None:
raise Exception('%s is invalid databse' % db_name)
# collection name
if not self.name:
raise Exception('%s is invalid collection name' % self.name)
# collection field
if not self.field or not isinstance(self.field, dict):
raise Exception('%s is invalid collection field' % self.field)
# collect as private variable
collect = getattr(db.get(db_name, object), self.name, None)
if collect is None:
raise Exception('%s is invalid collection' % self.name)
# replace pymongo collect with custome connect
_collect = MongoTurboConnect(self, collect)
# gridfs as private variable
_gridfs = db_file.get(db_name, None)
if _gridfs is None:
model_log.info('%s is invalid gridfs' % _gridfs)
return _collect, _gridfs
def _to_primary_key(self, _id):
if self.PRIMARY_KEY_TYPE is ObjectId:
return self.to_objectid(_id)
return _id
def __setitem__(self, k, v):
setattr(self, k, v)
def __getitem__(self, k):
return getattr(self, k)
def __str__(self):
if isinstance(self.field, dict):
return str(self.field)
return None
def sub_collection(self, name):
raise NotImplementedError()
def find_by_id(self, _id, column=None):
raise NotImplementedError()
def remove_by_id(self, _id):
raise NotImplementedError()
def find_new_one(self, *args, **kwargs):
"""return latest one record sort by _id
"""
raise NotImplementedError()
def get_as_dict(self, condition=None, column=None, skip=0, limit=0, sort=None):
raise NotImplementedError()
def _valide_update_document(self, document):
for opk in document.keys():
if not opk.startswith('$') or opk not in self._operators:
raise ValueError('invalid document update operator')
if not document:
raise ValueError('empty document update not allowed')
def _valid_record(self, record):
if not isinstance(record, dict):
raise Exception('%s record is not dict' % record)
rset = set(record.keys())
fset = set(self.field.keys())
rset.discard('_id')
fset.discard('_id')
if not (fset ^ rset) <= fset:
raise Exception('record keys is not equal to fields keys %s' % (
list((fset ^ rset) - fset)))
for k, v in self.field.items():
if k not in record:
if v[0] is datetime and not v[1]:
record[k] = self.datetime()
continue
if v[0] is time and not v[1]:
record[k] = self.timestamp()
continue
record[k] = v[1]
return record
def inc(self, spec_or_id, key, num=1):
raise NotImplementedError()
def put(self, value, **kwargs):
"""gridfs put method
"""
raise NotImplementedError()
def delete(self, _id):
"""gridfs delete method
"""
raise NotImplementedError()
def get(self, _id):
"""gridfs get method
"""
raise NotImplementedError()
def read(self, _id):
"""gridfs read method
"""
raise NotImplementedError()
def write_action_call(self, name, *args, **kwargs):
"""Execute when write action occurs, note: in this method write action must be called asynchronously
"""
pass
def read_action_call(self, name, *args, **kwargs):
"""Execute when read action occurs, note: in this method read action must be called asynchronously
"""
pass
| apache-2.0 | 3,731,095,449,400,572,000 | 25.697128 | 108 | 0.551589 | false |
jeremiahyan/odoo | odoo/addons/base/models/ir_qweb.py | 1 | 19636 | # -*- coding: utf-8 -*-
from __future__ import print_function
import ast
import copy
import logging
from collections import OrderedDict
from time import time
from lxml import html
from lxml import etree
from odoo import api, models, tools
from odoo.tools.safe_eval import assert_valid_codeobj, _BUILTINS, _SAFE_OPCODES
from odoo.tools.misc import get_lang
from odoo.http import request
from odoo.modules.module import get_resource_path
from odoo.addons.base.models.qweb import QWeb, Contextifier, MarkupSafeBytes
from odoo.addons.base.models.assetsbundle import AssetsBundle
from odoo.addons.base.models.ir_asset import can_aggregate, STYLE_EXTENSIONS, SCRIPT_EXTENSIONS
_logger = logging.getLogger(__name__)
class IrQWeb(models.AbstractModel, QWeb):
""" Base QWeb rendering engine
* to customize ``t-field`` rendering, subclass ``ir.qweb.field`` and
create new models called :samp:`ir.qweb.field.{widget}`
Beware that if you need extensions or alterations which could be
incompatible with other subsystems, you should create a local object
inheriting from ``ir.qweb`` and customize that.
"""
_name = 'ir.qweb'
_description = 'Qweb'
@api.model
def _render(self, id_or_xml_id, values=None, **options):
""" render(id_or_xml_id, values, **options)
Render the template specified by the given name.
:param id_or_xml_id: name or etree (see get_template)
:param dict values: template values to be used for rendering
:param options: used to compile the template (the dict available for the rendering is frozen)
* ``load`` (function) overrides the load method
* ``profile`` (float) profile the rendering (use astor lib) (filter
profile line with time ms >= profile)
"""
context = dict(self.env.context, dev_mode='qweb' in tools.config['dev_mode'])
context.update(options)
result = super(IrQWeb, self)._render(id_or_xml_id, values=values, **context)
if b'data-pagebreak=' not in result:
return result
fragments = html.fragments_fromstring(result.decode('utf-8'))
for fragment in fragments:
for row in fragment.iterfind('.//tr[@data-pagebreak]'):
table = next(row.iterancestors('table'))
newtable = html.Element('table', attrib=dict(table.attrib))
thead = table.find('thead')
if thead:
newtable.append(copy.deepcopy(thead))
# TODO: copy caption & tfoot as well?
# TODO: move rows in a tbody if row.getparent() is one?
pos = row.get('data-pagebreak')
assert pos in ('before', 'after')
for sibling in row.getparent().iterchildren('tr'):
if sibling is row:
if pos == 'after':
newtable.append(sibling)
break
newtable.append(sibling)
table.addprevious(newtable)
table.addprevious(html.Element('div', attrib={
'style': 'page-break-after: always'
}))
return MarkupSafeBytes(b''.join(html.tostring(f) for f in fragments))
def default_values(self):
""" attributes add to the values for each computed template
"""
default = super(IrQWeb, self).default_values()
default.update(request=request, cache_assets=round(time()/180), true=True, false=False) # true and false added for backward compatibility to remove after v10
return default
# assume cache will be invalidated by third party on write to ir.ui.view
def _get_template_cache_keys(self):
""" Return the list of context keys to use for caching ``_get_template``. """
return ['lang', 'inherit_branding', 'editable', 'translatable', 'edit_translations', 'website_id']
# apply ormcache_context decorator unless in dev mode...
@tools.conditional(
'xml' not in tools.config['dev_mode'],
tools.ormcache('id_or_xml_id', 'tuple(options.get(k) for k in self._get_template_cache_keys())'),
)
def compile(self, id_or_xml_id, options):
try:
id_or_xml_id = int(id_or_xml_id)
except:
pass
return super(IrQWeb, self).compile(id_or_xml_id, options=options)
def _load(self, name, options):
lang = options.get('lang', get_lang(self.env).code)
env = self.env
if lang != env.context.get('lang'):
env = env(context=dict(env.context, lang=lang))
view_id = self.env['ir.ui.view'].get_view_id(name)
template = env['ir.ui.view'].sudo()._read_template(view_id)
# QWeb's `_read_template` will check if one of the first children of
# what we send to it has a "t-name" attribute having `name` as value
# to consider it has found it. As it'll never be the case when working
# with view ids or children view or children primary views, force it here.
def is_child_view(view_name):
view_id = self.env['ir.ui.view'].get_view_id(view_name)
view = self.env['ir.ui.view'].sudo().browse(view_id)
return view.inherit_id is not None
if isinstance(name, int) or is_child_view(name):
view = etree.fromstring(template)
for node in view:
if node.get('t-name'):
node.set('t-name', str(name))
return view
else:
return template
# order
def _directives_eval_order(self):
directives = super(IrQWeb, self)._directives_eval_order()
directives.insert(directives.index('call'), 'lang')
directives.insert(directives.index('field'), 'call-assets')
return directives
# compile directives
def _compile_directive_lang(self, el, options):
lang = el.attrib.pop('t-lang', get_lang(self.env).code)
if el.get('t-call-options'):
el.set('t-call-options', el.get('t-call-options')[0:-1] + u', "lang": %s}' % lang)
else:
el.set('t-call-options', u'{"lang": %s}' % lang)
return self._compile_node(el, options)
def _compile_directive_call_assets(self, el, options):
""" This special 't-call' tag can be used in order to aggregate/minify javascript and css assets"""
if len(el):
raise SyntaxError("t-call-assets cannot contain children nodes")
# nodes = self._get_asset_nodes(bundle, options, css=css, js=js, debug=values.get('debug'), async=async, values=values)
#
# for index, (tagName, t_attrs, content) in enumerate(nodes):
# if index:
# append('\n ')
# append('<')
# append(tagName)
#
# self._post_processing_att(tagName, t_attrs, options)
# for name, value in t_attrs.items():
# if value or isinstance(value, string_types)):
# append(u' ')
# append(name)
# append(u'="')
# append(escape(pycompat.to_text((value)))
# append(u'"')
#
# if not content and tagName in self._void_elements:
# append('/>')
# else:
# append('>')
# if content:
# append(content)
# append('</')
# append(tagName)
# append('>')
#
space = el.getprevious() is not None and el.getprevious().tail or el.getparent().text
sep = u'\n' + space.rsplit('\n').pop()
return [
ast.Assign(
targets=[ast.Name(id='nodes', ctx=ast.Store())],
value=ast.Call(
func=ast.Attribute(
value=ast.Name(id='self', ctx=ast.Load()),
attr='_get_asset_nodes',
ctx=ast.Load()
),
args=[
ast.Str(el.get('t-call-assets')),
ast.Name(id='options', ctx=ast.Load()),
],
keywords=[
ast.keyword('css', self._get_attr_bool(el.get('t-css', True))),
ast.keyword('js', self._get_attr_bool(el.get('t-js', True))),
ast.keyword('debug', ast.Call(
func=ast.Attribute(
value=ast.Name(id='values', ctx=ast.Load()),
attr='get',
ctx=ast.Load()
),
args=[ast.Str('debug')],
keywords=[], starargs=None, kwargs=None
)),
ast.keyword('async_load', self._get_attr_bool(el.get('async_load', False))),
ast.keyword('defer_load', self._get_attr_bool(el.get('defer_load', False))),
ast.keyword('lazy_load', self._get_attr_bool(el.get('lazy_load', False))),
ast.keyword('media', ast.Constant(el.get('media'))),
],
starargs=None, kwargs=None
)
),
ast.For(
target=ast.Tuple(elts=[
ast.Name(id='index', ctx=ast.Store()),
ast.Tuple(elts=[
ast.Name(id='tagName', ctx=ast.Store()),
ast.Name(id='t_attrs', ctx=ast.Store()),
ast.Name(id='content', ctx=ast.Store())
], ctx=ast.Store())
], ctx=ast.Store()),
iter=ast.Call(
func=ast.Name(id='enumerate', ctx=ast.Load()),
args=[ast.Name(id='nodes', ctx=ast.Load())],
keywords=[],
starargs=None, kwargs=None
),
body=[
ast.If(
test=ast.Name(id='index', ctx=ast.Load()),
body=[self._append(ast.Str(sep))],
orelse=[]
),
self._append(ast.Str(u'<')),
self._append(ast.Name(id='tagName', ctx=ast.Load())),
] + self._append_attributes() + [
ast.If(
test=ast.BoolOp(
op=ast.And(),
values=[
ast.UnaryOp(ast.Not(), ast.Name(id='content', ctx=ast.Load()), lineno=0, col_offset=0),
ast.Compare(
left=ast.Name(id='tagName', ctx=ast.Load()),
ops=[ast.In()],
comparators=[ast.Attribute(
value=ast.Name(id='self', ctx=ast.Load()),
attr='_void_elements',
ctx=ast.Load()
)]
),
]
),
body=[self._append(ast.Str(u'/>'))],
orelse=[
self._append(ast.Str(u'>')),
ast.If(
test=ast.Name(id='content', ctx=ast.Load()),
body=[self._append(ast.Name(id='content', ctx=ast.Load()))],
orelse=[]
),
self._append(ast.Str(u'</')),
self._append(ast.Name(id='tagName', ctx=ast.Load())),
self._append(ast.Str(u'>')),
]
)
],
orelse=[]
)
]
# method called by computing code
def get_asset_bundle(self, bundle_name, files, env=None, css=True, js=True):
return AssetsBundle(bundle_name, files, env=env, css=css, js=js)
def _get_asset_nodes(self, bundle, options, css=True, js=True, debug=False, async_load=False, defer_load=False, lazy_load=False, media=None):
"""Generates asset nodes.
If debug=assets, the assets will be regenerated when a file which composes them has been modified.
Else, the assets will be generated only once and then stored in cache.
"""
if debug and 'assets' in debug:
return self._generate_asset_nodes(bundle, options, css, js, debug, async_load, defer_load, lazy_load, media)
else:
return self._generate_asset_nodes_cache(bundle, options, css, js, debug, async_load, defer_load, lazy_load, media)
@tools.conditional(
# in non-xml-debug mode we want assets to be cached forever, and the admin can force a cache clear
# by restarting the server after updating the source code (or using the "Clear server cache" in debug tools)
'xml' not in tools.config['dev_mode'],
tools.ormcache_context('bundle', 'options.get("lang", "en_US")', 'css', 'js', 'debug', 'async_load', 'defer_load', 'lazy_load', keys=("website_id",)),
)
def _generate_asset_nodes_cache(self, bundle, options, css=True, js=True, debug=False, async_load=False, defer_load=False, lazy_load=False, media=None):
return self._generate_asset_nodes(bundle, options, css, js, debug, async_load, defer_load, lazy_load, media)
def _generate_asset_nodes(self, bundle, options, css=True, js=True, debug=False, async_load=False, defer_load=False, lazy_load=False, media=None):
nodeAttrs = None
if css and media:
nodeAttrs = {
'media': media,
}
files, remains = self._get_asset_content(bundle, options, nodeAttrs)
asset = self.get_asset_bundle(bundle, files, env=self.env, css=css, js=js)
remains = [node for node in remains if (css and node[0] == 'link') or (js and node[0] == 'script')]
return remains + asset.to_node(css=css, js=js, debug=debug, async_load=async_load, defer_load=defer_load, lazy_load=lazy_load)
def _get_asset_link_urls(self, bundle, options):
asset_nodes = self._get_asset_nodes(bundle, options, js=False)
return [node[1]['href'] for node in asset_nodes if node[0] == 'link']
@tools.ormcache_context('bundle', 'options.get("lang", "en_US")', keys=("website_id",))
def _get_asset_content(self, bundle, options, nodeAttrs=None):
options = dict(options,
inherit_branding=False, inherit_branding_auto=False,
edit_translations=False, translatable=False,
rendering_bundle=True)
options['website_id'] = self.env.context.get('website_id')
asset_paths = self.env['ir.asset']._get_asset_paths(bundle=bundle, css=True, js=True)
files = []
remains = []
for path, *_ in asset_paths:
ext = path.split('.')[-1]
is_js = ext in SCRIPT_EXTENSIONS
is_css = ext in STYLE_EXTENSIONS
if not is_js and not is_css:
continue
mimetype = 'text/javascript' if is_js else 'text/%s' % ext
if can_aggregate(path):
segments = [segment for segment in path.split('/') if segment]
files.append({
'atype': mimetype,
'url': path,
'filename': get_resource_path(*segments) if segments else None,
'content': '',
'media': nodeAttrs and nodeAttrs.get('media'),
})
else:
if is_js:
tag = 'script'
attributes = {
"type": mimetype,
"src": path,
}
else:
tag = 'link'
attributes = {
"type": mimetype,
"rel": "stylesheet",
"href": path,
'media': nodeAttrs and nodeAttrs.get('media'),
}
remains.append((tag, attributes, ''))
return (files, remains)
def _get_field(self, record, field_name, expression, tagName, field_options, options, values):
field = record._fields[field_name]
# adds template compile options for rendering fields
field_options['template_options'] = options
# adds generic field options
field_options['tagName'] = tagName
field_options['expression'] = expression
field_options['type'] = field_options.get('widget', field.type)
inherit_branding = options.get('inherit_branding', options.get('inherit_branding_auto') and record.check_access_rights('write', False))
field_options['inherit_branding'] = inherit_branding
translate = options.get('edit_translations') and options.get('translatable') and field.translate
field_options['translate'] = translate
# field converter
model = 'ir.qweb.field.' + field_options['type']
converter = self.env[model] if model in self.env else self.env['ir.qweb.field']
# get content
content = converter.record_to_html(record, field_name, field_options)
attributes = converter.attributes(record, field_name, field_options, values)
return (attributes, content, inherit_branding or translate)
def _get_widget(self, value, expression, tagName, field_options, options, values):
# adds template compile options for rendering fields
field_options['template_options'] = options
field_options['type'] = field_options['widget']
field_options['tagName'] = tagName
field_options['expression'] = expression
# field converter
model = 'ir.qweb.field.' + field_options['type']
converter = self.env[model] if model in self.env else self.env['ir.qweb.field']
# get content
content = converter.value_to_html(value, field_options)
attributes = OrderedDict()
attributes['data-oe-type'] = field_options['type']
attributes['data-oe-expression'] = field_options['expression']
return (attributes, content, None)
# compile expression add safe_eval
def _compile_expr(self, expr):
""" Compiles a purported Python expression to ast, verifies that it's safe
(according to safe_eval's semantics) and alter its variable references to
access values data instead
"""
# string must be stripped otherwise whitespace before the start for
# formatting purpose are going to break parse/compile
st = ast.parse(expr.strip(), mode='eval')
assert_valid_codeobj(
_SAFE_OPCODES,
compile(st, '<>', 'eval'), # could be expr, but eval *should* be fine
expr
)
# ast.Expression().body -> expr
return Contextifier(_BUILTINS).visit(st).body
def _get_attr_bool(self, attr, default=False):
if attr:
if attr is True:
return ast.Constant(True)
attr = attr.lower()
if attr in ('false', '0'):
return ast.Constant(False)
elif attr in ('true', '1'):
return ast.Constant(True)
return ast.Constant(attr if attr is False else bool(default))
| gpl-3.0 | 7,832,907,001,631,781,000 | 43.425339 | 165 | 0.532644 | false |
Thraxis/pymedusa | lib/sqlalchemy/orm/persistence.py | 34 | 51028 | # orm/persistence.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to emit INSERT, UPDATE
and DELETE statements on behalf of a :class:`.Mapper` and its descending
mappers.
The functions here are called only by the unit of work functions
in unitofwork.py.
"""
import operator
from itertools import groupby, chain
from .. import sql, util, exc as sa_exc
from . import attributes, sync, exc as orm_exc, evaluator
from .base import state_str, _attr_as_key, _entity_descriptor
from ..sql import expression
from ..sql.base import _from_objects
from . import loading
def _bulk_insert(
mapper, mappings, session_transaction, isstates, return_defaults):
base_mapper = mapper.base_mapper
cached_connections = _cached_connection_dict(base_mapper)
if session_transaction.session.connection_callable:
raise NotImplementedError(
"connection_callable / per-instance sharding "
"not supported in bulk_insert()")
if isstates:
if return_defaults:
states = [(state, state.dict) for state in mappings]
mappings = [dict_ for (state, dict_) in states]
else:
mappings = [state.dict for state in mappings]
else:
mappings = list(mappings)
connection = session_transaction.connection(base_mapper)
for table, super_mapper in base_mapper._sorted_tables.items():
if not mapper.isa(super_mapper):
continue
records = (
(None, state_dict, params, mapper,
connection, value_params, has_all_pks, has_all_defaults)
for
state, state_dict, params, mp,
conn, value_params, has_all_pks,
has_all_defaults in _collect_insert_commands(table, (
(None, mapping, mapper, connection)
for mapping in mappings),
bulk=True, return_defaults=return_defaults
)
)
_emit_insert_statements(base_mapper, None,
cached_connections,
super_mapper, table, records,
bookkeeping=return_defaults)
if return_defaults and isstates:
identity_cls = mapper._identity_class
identity_props = [p.key for p in mapper._identity_key_props]
for state, dict_ in states:
state.key = (
identity_cls,
tuple([dict_[key] for key in identity_props])
)
def _bulk_update(mapper, mappings, session_transaction,
isstates, update_changed_only):
base_mapper = mapper.base_mapper
cached_connections = _cached_connection_dict(base_mapper)
def _changed_dict(mapper, state):
return dict(
(k, v)
for k, v in state.dict.items() if k in state.committed_state or k
in mapper._primary_key_propkeys
)
if isstates:
if update_changed_only:
mappings = [_changed_dict(mapper, state) for state in mappings]
else:
mappings = [state.dict for state in mappings]
else:
mappings = list(mappings)
if session_transaction.session.connection_callable:
raise NotImplementedError(
"connection_callable / per-instance sharding "
"not supported in bulk_update()")
connection = session_transaction.connection(base_mapper)
for table, super_mapper in base_mapper._sorted_tables.items():
if not mapper.isa(super_mapper):
continue
records = _collect_update_commands(None, table, (
(None, mapping, mapper, connection,
(mapping[mapper._version_id_prop.key]
if mapper._version_id_prop else None))
for mapping in mappings
), bulk=True)
_emit_update_statements(base_mapper, None,
cached_connections,
super_mapper, table, records,
bookkeeping=False)
def save_obj(
base_mapper, states, uowtransaction, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
of objects.
This is called within the context of a UOWTransaction during a
flush operation, given a list of states to be flushed. The
base mapper in an inheritance hierarchy handles the inserts/
updates for all descendant mappers.
"""
# if batch=false, call _save_obj separately for each object
if not single and not base_mapper.batch:
for state in _sort_states(states):
save_obj(base_mapper, [state], uowtransaction, single=True)
return
states_to_update = []
states_to_insert = []
cached_connections = _cached_connection_dict(base_mapper)
for (state, dict_, mapper, connection,
has_identity,
row_switch, update_version_id) in _organize_states_for_save(
base_mapper, states, uowtransaction
):
if has_identity or row_switch:
states_to_update.append(
(state, dict_, mapper, connection, update_version_id)
)
else:
states_to_insert.append(
(state, dict_, mapper, connection)
)
for table, mapper in base_mapper._sorted_tables.items():
if table not in mapper._pks_by_table:
continue
insert = _collect_insert_commands(table, states_to_insert)
update = _collect_update_commands(
uowtransaction, table, states_to_update)
_emit_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
_emit_insert_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, insert)
_finalize_insert_update_commands(
base_mapper, uowtransaction,
chain(
(
(state, state_dict, mapper, connection, False)
for state, state_dict, mapper, connection in states_to_insert
),
(
(state, state_dict, mapper, connection, True)
for state, state_dict, mapper, connection,
update_version_id in states_to_update
)
)
)
def post_update(base_mapper, states, uowtransaction, post_update_cols):
"""Issue UPDATE statements on behalf of a relationship() which
specifies post_update.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_update = list(_organize_states_for_post_update(
base_mapper,
states, uowtransaction))
for table, mapper in base_mapper._sorted_tables.items():
if table not in mapper._pks_by_table:
continue
update = (
(state, state_dict, sub_mapper, connection)
for
state, state_dict, sub_mapper, connection in states_to_update
if table in sub_mapper._pks_by_table
)
update = _collect_post_update_commands(base_mapper, uowtransaction,
table, update,
post_update_cols)
_emit_post_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
def delete_obj(base_mapper, states, uowtransaction):
"""Issue ``DELETE`` statements for a list of objects.
This is called within the context of a UOWTransaction during a
flush operation.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_delete = list(_organize_states_for_delete(
base_mapper,
states,
uowtransaction))
table_to_mapper = base_mapper._sorted_tables
for table in reversed(list(table_to_mapper.keys())):
mapper = table_to_mapper[table]
if table not in mapper._pks_by_table:
continue
delete = _collect_delete_commands(base_mapper, uowtransaction,
table, states_to_delete)
_emit_delete_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, delete)
for state, state_dict, mapper, connection, \
update_version_id in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
def _organize_states_for_save(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for INSERT or
UPDATE.
This includes splitting out into distinct lists for
each, calling before_insert/before_update, obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state,
and the identity flag.
"""
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
has_identity = bool(state.key)
instance_key = state.key or mapper._identity_key_from_state(state)
row_switch = update_version_id = None
# call before_XXX extensions
if not has_identity:
mapper.dispatch.before_insert(mapper, connection, state)
else:
mapper.dispatch.before_update(mapper, connection, state)
if mapper._validate_polymorphic_identity:
mapper._validate_polymorphic_identity(mapper, state, dict_)
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# with the same identity key already exists as persistent.
# convert to an UPDATE if so.
if not has_identity and \
instance_key in uowtransaction.session.identity_map:
instance = \
uowtransaction.session.identity_map[instance_key]
existing = attributes.instance_state(instance)
if not uowtransaction.is_deleted(existing):
raise orm_exc.FlushError(
"New instance %s with identity key %s conflicts "
"with persistent instance %s" %
(state_str(state), instance_key,
state_str(existing)))
base_mapper._log_debug(
"detected row switch for identity %s. "
"will update %s, remove %s from "
"transaction", instance_key,
state_str(state), state_str(existing))
# remove the "delete" flag from the existing element
uowtransaction.remove_state_actions(existing)
row_switch = existing
if (has_identity or row_switch) and mapper.version_id_col is not None:
update_version_id = mapper._get_committed_state_attr_by_column(
row_switch if row_switch else state,
row_switch.dict if row_switch else dict_,
mapper.version_id_col)
yield (state, dict_, mapper, connection,
has_identity, row_switch, update_version_id)
def _organize_states_for_post_update(base_mapper, states,
uowtransaction):
"""Make an initial pass across a set of states for UPDATE
corresponding to post_update.
This includes obtaining key information for each state
including its dictionary, mapper, the connection to use for
the execution per state.
"""
return _connections_for_states(base_mapper, uowtransaction, states)
def _organize_states_for_delete(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for DELETE.
This includes calling out before_delete and obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state.
"""
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
mapper.dispatch.before_delete(mapper, connection, state)
if mapper.version_id_col is not None:
update_version_id = \
mapper._get_committed_state_attr_by_column(
state, dict_,
mapper.version_id_col)
else:
update_version_id = None
yield (
state, dict_, mapper, connection, update_version_id)
def _collect_insert_commands(
table, states_to_insert,
bulk=False, return_defaults=False):
"""Identify sets of values to use in INSERT statements for a
list of states.
"""
for state, state_dict, mapper, connection in states_to_insert:
if table not in mapper._pks_by_table:
continue
params = {}
value_params = {}
propkey_to_col = mapper._propkey_to_col[table]
for propkey in set(propkey_to_col).intersection(state_dict):
value = state_dict[propkey]
col = propkey_to_col[propkey]
if value is None:
continue
elif not bulk and isinstance(value, sql.ClauseElement):
value_params[col.key] = value
else:
params[col.key] = value
if not bulk:
for colkey in mapper._insert_cols_as_none[table].\
difference(params).difference(value_params):
params[colkey] = None
if not bulk or return_defaults:
has_all_pks = mapper._pk_keys_by_table[table].issubset(params)
if mapper.base_mapper.eager_defaults:
has_all_defaults = mapper._server_default_cols[table].\
issubset(params)
else:
has_all_defaults = True
else:
has_all_defaults = has_all_pks = True
if mapper.version_id_generator is not False \
and mapper.version_id_col is not None and \
mapper.version_id_col in mapper._cols_by_table[table]:
params[mapper.version_id_col.key] = \
mapper.version_id_generator(None)
yield (
state, state_dict, params, mapper,
connection, value_params, has_all_pks,
has_all_defaults)
def _collect_update_commands(
uowtransaction, table, states_to_update,
bulk=False):
"""Identify sets of values to use in UPDATE statements for a
list of states.
This function works intricately with the history system
to determine exactly what values should be updated
as well as how the row should be matched within an UPDATE
statement. Includes some tricky scenarios where the primary
key of an object might have been changed.
"""
for state, state_dict, mapper, connection, \
update_version_id in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
value_params = {}
propkey_to_col = mapper._propkey_to_col[table]
if bulk:
params = dict(
(propkey_to_col[propkey].key, state_dict[propkey])
for propkey in
set(propkey_to_col).intersection(state_dict).difference(
mapper._pk_keys_by_table[table])
)
has_all_defaults = True
else:
params = {}
for propkey in set(propkey_to_col).intersection(
state.committed_state):
value = state_dict[propkey]
col = propkey_to_col[propkey]
if isinstance(value, sql.ClauseElement):
value_params[col] = value
# guard against values that generate non-__nonzero__
# objects for __eq__()
elif state.manager[propkey].impl.is_equal(
value, state.committed_state[propkey]) is not True:
params[col.key] = value
if mapper.base_mapper.eager_defaults:
has_all_defaults = mapper._server_onupdate_default_cols[table].\
issubset(params)
else:
has_all_defaults = True
if update_version_id is not None and \
mapper.version_id_col in mapper._cols_by_table[table]:
if not bulk and not (params or value_params):
# HACK: check for history in other tables, in case the
# history is only in a different table than the one
# where the version_id_col is. This logic was lost
# from 0.9 -> 1.0.0 and restored in 1.0.6.
for prop in mapper._columntoproperty.values():
history = (
state.manager[prop.key].impl.get_history(
state, state_dict,
attributes.PASSIVE_NO_INITIALIZE))
if history.added:
break
else:
# no net change, break
continue
col = mapper.version_id_col
params[col._label] = update_version_id
if (bulk or col.key not in params) and \
mapper.version_id_generator is not False:
val = mapper.version_id_generator(update_version_id)
params[col.key] = val
elif not (params or value_params):
continue
if bulk:
pk_params = dict(
(propkey_to_col[propkey]._label, state_dict.get(propkey))
for propkey in
set(propkey_to_col).
intersection(mapper._pk_keys_by_table[table])
)
else:
pk_params = {}
for col in pks:
propkey = mapper._columntoproperty[col].key
history = state.manager[propkey].impl.get_history(
state, state_dict, attributes.PASSIVE_OFF)
if history.added:
if not history.deleted or \
("pk_cascaded", state, col) in \
uowtransaction.attributes:
pk_params[col._label] = history.added[0]
params.pop(col.key, None)
else:
# else, use the old value to locate the row
pk_params[col._label] = history.deleted[0]
params[col.key] = history.added[0]
else:
pk_params[col._label] = history.unchanged[0]
if pk_params[col._label] is None:
raise orm_exc.FlushError(
"Can't update table %s using NULL for primary "
"key value on column %s" % (table, col))
if params or value_params:
params.update(pk_params)
yield (
state, state_dict, params, mapper,
connection, value_params, has_all_defaults)
def _collect_post_update_commands(base_mapper, uowtransaction, table,
states_to_update, post_update_cols):
"""Identify sets of values to use in UPDATE statements for a
list of states within a post_update operation.
"""
for state, state_dict, mapper, connection in states_to_update:
# assert table in mapper._pks_by_table
pks = mapper._pks_by_table[table]
params = {}
hasdata = False
for col in mapper._cols_by_table[table]:
if col in pks:
params[col._label] = \
mapper._get_state_attr_by_column(
state,
state_dict, col, passive=attributes.PASSIVE_OFF)
elif col in post_update_cols:
prop = mapper._columntoproperty[col]
history = state.manager[prop.key].impl.get_history(
state, state_dict,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
value = history.added[0]
params[col.key] = value
hasdata = True
if hasdata:
yield params, connection
def _collect_delete_commands(base_mapper, uowtransaction, table,
states_to_delete):
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
for state, state_dict, mapper, connection, \
update_version_id in states_to_delete:
if table not in mapper._pks_by_table:
continue
params = {}
for col in mapper._pks_by_table[table]:
params[col.key] = \
value = \
mapper._get_committed_state_attr_by_column(
state, state_dict, col)
if value is None:
raise orm_exc.FlushError(
"Can't delete from table %s "
"using NULL for primary "
"key value on column %s" % (table, col))
if update_version_id is not None and \
mapper.version_id_col in mapper._cols_by_table[table]:
params[mapper.version_id_col.key] = update_version_id
yield params, connection
def _emit_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update,
bookkeeping=True):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_update_commands()."""
needs_version_id = mapper.version_id_col is not None and \
mapper.version_id_col in mapper._cols_by_table[table]
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
if needs_version_id:
clause.clauses.append(
mapper.version_id_col == sql.bindparam(
mapper.version_id_col._label,
type_=mapper.version_id_col.type))
stmt = table.update(clause)
return stmt
cached_stmt = base_mapper._memo(('update', table), update_stmt)
for (connection, paramkeys, hasvalue, has_all_defaults), \
records in groupby(
update,
lambda rec: (
rec[4], # connection
set(rec[2]), # set of parameter keys
bool(rec[5]), # whether or not we have "value" parameters
rec[6] # has_all_defaults
)
):
rows = 0
records = list(records)
statement = cached_stmt
# TODO: would be super-nice to not have to determine this boolean
# inside the loop here, in the 99.9999% of the time there's only
# one connection in use
assert_singlerow = connection.dialect.supports_sane_rowcount
assert_multirow = assert_singlerow and \
connection.dialect.supports_sane_multi_rowcount
allow_multirow = has_all_defaults and not needs_version_id
if bookkeeping and not has_all_defaults and \
mapper.base_mapper.eager_defaults:
statement = statement.return_defaults()
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
if hasvalue:
for state, state_dict, params, mapper, \
connection, value_params, has_all_defaults in records:
c = connection.execute(
statement.values(value_params),
params)
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params)
rows += c.rowcount
check_rowcount = True
else:
if not allow_multirow:
check_rowcount = assert_singlerow
for state, state_dict, params, mapper, \
connection, value_params, has_all_defaults in records:
c = cached_connections[connection].\
execute(statement, params)
# TODO: why with bookkeeping=False?
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params)
rows += c.rowcount
else:
multiparams = [rec[2] for rec in records]
check_rowcount = assert_multirow or (
assert_singlerow and
len(multiparams) == 1
)
c = cached_connections[connection].\
execute(statement, multiparams)
rows += c.rowcount
# TODO: why with bookkeeping=False?
for state, state_dict, params, mapper, \
connection, value_params, has_all_defaults in records:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params)
if check_rowcount:
if rows != len(records):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched." %
(table.description, len(records), rows))
elif needs_version_id:
util.warn("Dialect %s does not support updated rowcount "
"- versioning cannot be verified." %
c.dialect.dialect_description)
def _emit_insert_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, insert,
bookkeeping=True):
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
cached_stmt = base_mapper._memo(('insert', table), table.insert)
for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \
records in groupby(
insert,
lambda rec: (
rec[4], # connection
set(rec[2]), # parameter keys
bool(rec[5]), # whether we have "value" parameters
rec[6],
rec[7])):
statement = cached_stmt
if not bookkeeping or \
(
has_all_defaults
or not base_mapper.eager_defaults
or not connection.dialect.implicit_returning
) and has_all_pks and not hasvalue:
records = list(records)
multiparams = [rec[2] for rec in records]
c = cached_connections[connection].\
execute(statement, multiparams)
if bookkeeping:
for (state, state_dict, params, mapper_rec,
conn, value_params, has_all_pks, has_all_defaults), \
last_inserted_params in \
zip(records, c.context.compiled_parameters):
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
last_inserted_params,
value_params)
else:
if not has_all_defaults and base_mapper.eager_defaults:
statement = statement.return_defaults()
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
for state, state_dict, params, mapper_rec, \
connection, value_params, \
has_all_pks, has_all_defaults in records:
if value_params:
result = connection.execute(
statement.values(value_params),
params)
else:
result = cached_connections[connection].\
execute(statement, params)
primary_key = result.context.inserted_primary_key
if primary_key is not None:
# set primary key attributes
for pk, col in zip(primary_key,
mapper._pks_by_table[table]):
prop = mapper_rec._columntoproperty[col]
if state_dict.get(prop.key) is None:
state_dict[prop.key] = pk
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
result,
result.context.compiled_parameters[0],
value_params)
def _emit_post_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
return table.update(clause)
statement = base_mapper._memo(('post_update', table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# to support executemany().
for key, grouper in groupby(
update, lambda rec: (
rec[1], # connection
set(rec[0]) # parameter keys
)
):
connection = key[0]
multiparams = [params for params, conn in grouper]
cached_connections[connection].\
execute(statement, multiparams)
def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
mapper, table, delete):
"""Emit DELETE statements corresponding to value lists collected
by _collect_delete_commands()."""
need_version_id = mapper.version_id_col is not None and \
mapper.version_id_col in mapper._cols_by_table[table]
def delete_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col.key, type_=col.type))
if need_version_id:
clause.clauses.append(
mapper.version_id_col ==
sql.bindparam(
mapper.version_id_col.key,
type_=mapper.version_id_col.type
)
)
return table.delete(clause)
statement = base_mapper._memo(('delete', table), delete_stmt)
for connection, recs in groupby(
delete,
lambda rec: rec[1] # connection
):
del_objects = [params for params, connection in recs]
connection = cached_connections[connection]
expected = len(del_objects)
rows_matched = -1
only_warn = False
if connection.dialect.supports_sane_multi_rowcount:
c = connection.execute(statement, del_objects)
if not need_version_id:
only_warn = True
rows_matched = c.rowcount
elif need_version_id:
if connection.dialect.supports_sane_rowcount:
rows_matched = 0
# execute deletes individually so that versioned
# rows can be verified
for params in del_objects:
c = connection.execute(statement, params)
rows_matched += c.rowcount
else:
util.warn(
"Dialect %s does not support deleted rowcount "
"- versioning cannot be verified." %
connection.dialect.dialect_description,
stacklevel=12)
connection.execute(statement, del_objects)
else:
connection.execute(statement, del_objects)
if base_mapper.confirm_deleted_rows and \
rows_matched > -1 and expected != rows_matched:
if only_warn:
util.warn(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning." %
(table.description, expected, rows_matched)
)
else:
raise orm_exc.StaleDataError(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning." %
(table.description, expected, rows_matched)
)
def _finalize_insert_update_commands(base_mapper, uowtransaction, states):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
"""
for state, state_dict, mapper, connection, has_identity in states:
if mapper._readonly_props:
readonly = state.unmodified_intersection(
[p.key for p in mapper._readonly_props
if p.expire_on_flush or p.key not in state.dict]
)
if readonly:
state._expire_attributes(state.dict, readonly)
# if eager_defaults option is enabled, load
# all expired cols. Else if we have a version_id_col, make sure
# it isn't expired.
toload_now = []
if base_mapper.eager_defaults:
toload_now.extend(state._unloaded_non_object)
elif mapper.version_id_col is not None and \
mapper.version_id_generator is False:
if mapper._version_id_prop.key in state.unloaded:
toload_now.extend([mapper._version_id_prop.key])
if toload_now:
state.key = base_mapper._identity_key_from_state(state)
loading.load_on_ident(
uowtransaction.session.query(base_mapper),
state.key, refresh_state=state,
only_load_props=toload_now)
# call after_XXX extensions
if not has_identity:
mapper.dispatch.after_insert(mapper, connection, state)
else:
mapper.dispatch.after_update(mapper, connection, state)
def _postfetch(mapper, uowtransaction, table,
state, dict_, result, params, value_params, bulk=False):
"""Expire attributes in need of newly persisted database state,
after an INSERT or UPDATE statement has proceeded for that
state."""
# TODO: bulk is never non-False, need to clean this up
prefetch_cols = result.context.compiled.prefetch
postfetch_cols = result.context.compiled.postfetch
returning_cols = result.context.compiled.returning
if mapper.version_id_col is not None and \
mapper.version_id_col in mapper._cols_by_table[table]:
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush)
if refresh_flush:
load_evt_attrs = []
if returning_cols:
row = result.context.returned_defaults
if row is not None:
for col in returning_cols:
if col.primary_key:
continue
dict_[mapper._columntoproperty[col].key] = row[col]
if refresh_flush:
load_evt_attrs.append(mapper._columntoproperty[col].key)
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
dict_[mapper._columntoproperty[c].key] = params[c.key]
if refresh_flush:
load_evt_attrs.append(mapper._columntoproperty[c].key)
if refresh_flush and load_evt_attrs:
mapper.class_manager.dispatch.refresh_flush(
state, uowtransaction, load_evt_attrs)
if postfetch_cols and state:
state._expire_attributes(state.dict,
[mapper._columntoproperty[c].key
for c in postfetch_cols if c in
mapper._columntoproperty]
)
# synchronize newly inserted ids from one table to the next
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
if state is None:
sync.bulk_populate_inherit_keys(dict_, m, equated_pairs)
else:
sync.populate(state, m, state, m,
equated_pairs,
uowtransaction,
mapper.passive_updates)
def _connections_for_states(base_mapper, uowtransaction, states):
"""Return an iterator of (state, state.dict, mapper, connection).
The states are sorted according to _sort_states, then paired
with the connection they should be using for the given
unit of work transaction.
"""
# if session has a connection callable,
# organize individual states with the connection
# to use for update
if uowtransaction.session.connection_callable:
connection_callable = \
uowtransaction.session.connection_callable
else:
connection = uowtransaction.transaction.connection(base_mapper)
connection_callable = None
for state in _sort_states(states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
mapper = state.manager.mapper
yield state, state.dict, mapper, connection
def _cached_connection_dict(base_mapper):
# dictionary of connection->connection_with_cache_options.
return util.PopulateDict(
lambda conn: conn.execution_options(
compiled_cache=base_mapper._compiled_cache
))
def _sort_states(states):
pending = set(states)
persistent = set(s for s in pending if s.key is not None)
pending.difference_update(persistent)
return sorted(pending, key=operator.attrgetter("insert_order")) + \
sorted(persistent, key=lambda q: q.key[1])
class BulkUD(object):
"""Handle bulk update and deletes via a :class:`.Query`."""
def __init__(self, query):
self.query = query.enable_eagerloads(False)
self.mapper = self.query._bind_mapper()
self._validate_query_state()
def _validate_query_state(self):
for attr, methname, notset, op in (
('_limit', 'limit()', None, operator.is_),
('_offset', 'offset()', None, operator.is_),
('_order_by', 'order_by()', False, operator.is_),
('_group_by', 'group_by()', False, operator.is_),
('_distinct', 'distinct()', False, operator.is_),
(
'_from_obj',
'join(), outerjoin(), select_from(), or from_self()',
(), operator.eq)
):
if not op(getattr(self.query, attr), notset):
raise sa_exc.InvalidRequestError(
"Can't call Query.update() or Query.delete() "
"when %s has been called" %
(methname, )
)
@property
def session(self):
return self.query.session
@classmethod
def _factory(cls, lookup, synchronize_session, *arg):
try:
klass = lookup[synchronize_session]
except KeyError:
raise sa_exc.ArgumentError(
"Valid strategies for session synchronization "
"are %s" % (", ".join(sorted(repr(x)
for x in lookup))))
else:
return klass(*arg)
def exec_(self):
self._do_pre()
self._do_pre_synchronize()
self._do_exec()
self._do_post_synchronize()
self._do_post()
@util.dependencies("sqlalchemy.orm.query")
def _do_pre(self, querylib):
query = self.query
self.context = querylib.QueryContext(query)
if isinstance(query._entities[0], querylib._ColumnEntity):
# check for special case of query(table)
tables = set()
for ent in query._entities:
if not isinstance(ent, querylib._ColumnEntity):
tables.clear()
break
else:
tables.update(_from_objects(ent.column))
if len(tables) != 1:
raise sa_exc.InvalidRequestError(
"This operation requires only one Table or "
"entity be specified as the target."
)
else:
self.primary_table = tables.pop()
else:
self.primary_table = query._only_entity_zero(
"This operation requires only one Table or "
"entity be specified as the target."
).mapper.local_table
session = query.session
if query._autoflush:
session._autoflush()
def _do_pre_synchronize(self):
pass
def _do_post_synchronize(self):
pass
class BulkEvaluate(BulkUD):
"""BulkUD which does the 'evaluate' method of session state resolution."""
def _additional_evaluators(self, evaluator_compiler):
pass
def _do_pre_synchronize(self):
query = self.query
target_cls = query._mapper_zero().class_
try:
evaluator_compiler = evaluator.EvaluatorCompiler(target_cls)
if query.whereclause is not None:
eval_condition = evaluator_compiler.process(
query.whereclause)
else:
def eval_condition(obj):
return True
self._additional_evaluators(evaluator_compiler)
except evaluator.UnevaluatableError:
raise sa_exc.InvalidRequestError(
"Could not evaluate current criteria in Python. "
"Specify 'fetch' or False for the "
"synchronize_session parameter.")
# TODO: detect when the where clause is a trivial primary key match
self.matched_objects = [
obj for (cls, pk), obj in
query.session.identity_map.items()
if issubclass(cls, target_cls) and
eval_condition(obj)]
class BulkFetch(BulkUD):
"""BulkUD which does the 'fetch' method of session state resolution."""
def _do_pre_synchronize(self):
query = self.query
session = query.session
context = query._compile_context()
select_stmt = context.statement.with_only_columns(
self.primary_table.primary_key)
self.matched_rows = session.execute(
select_stmt,
mapper=self.mapper,
params=query._params).fetchall()
class BulkUpdate(BulkUD):
"""BulkUD which handles UPDATEs."""
def __init__(self, query, values, update_kwargs):
super(BulkUpdate, self).__init__(query)
self.values = values
self.update_kwargs = update_kwargs
@classmethod
def factory(cls, query, synchronize_session, values, update_kwargs):
return BulkUD._factory({
"evaluate": BulkUpdateEvaluate,
"fetch": BulkUpdateFetch,
False: BulkUpdate
}, synchronize_session, query, values, update_kwargs)
def _resolve_string_to_expr(self, key):
if self.mapper and isinstance(key, util.string_types):
attr = _entity_descriptor(self.mapper, key)
return attr.__clause_element__()
else:
return key
def _resolve_key_to_attrname(self, key):
if self.mapper and isinstance(key, util.string_types):
attr = _entity_descriptor(self.mapper, key)
return attr.property.key
elif isinstance(key, attributes.InstrumentedAttribute):
return key.key
elif hasattr(key, '__clause_element__'):
key = key.__clause_element__()
if self.mapper and isinstance(key, expression.ColumnElement):
try:
attr = self.mapper._columntoproperty[key]
except orm_exc.UnmappedColumnError:
return None
else:
return attr.key
else:
raise sa_exc.InvalidRequestError(
"Invalid expression type: %r" % key)
def _do_exec(self):
values = [
(self._resolve_string_to_expr(k), v)
for k, v in (
self.values.items() if hasattr(self.values, 'items')
else self.values)
]
if not self.update_kwargs.get('preserve_parameter_order', False):
values = dict(values)
update_stmt = sql.update(self.primary_table,
self.context.whereclause, values,
**self.update_kwargs)
self.result = self.query.session.execute(
update_stmt, params=self.query._params,
mapper=self.mapper)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_update(self)
class BulkDelete(BulkUD):
"""BulkUD which handles DELETEs."""
def __init__(self, query):
super(BulkDelete, self).__init__(query)
@classmethod
def factory(cls, query, synchronize_session):
return BulkUD._factory({
"evaluate": BulkDeleteEvaluate,
"fetch": BulkDeleteFetch,
False: BulkDelete
}, synchronize_session, query)
def _do_exec(self):
delete_stmt = sql.delete(self.primary_table,
self.context.whereclause)
self.result = self.query.session.execute(
delete_stmt,
params=self.query._params,
mapper=self.mapper)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_delete(self)
class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate):
"""BulkUD which handles UPDATEs using the "evaluate"
method of session resolution."""
def _additional_evaluators(self, evaluator_compiler):
self.value_evaluators = {}
values = (self.values.items() if hasattr(self.values, 'items')
else self.values)
for key, value in values:
key = self._resolve_key_to_attrname(key)
if key is not None:
self.value_evaluators[key] = evaluator_compiler.process(
expression._literal_as_binds(value))
def _do_post_synchronize(self):
session = self.query.session
states = set()
evaluated_keys = list(self.value_evaluators.keys())
for obj in self.matched_objects:
state, dict_ = attributes.instance_state(obj),\
attributes.instance_dict(obj)
# only evaluate unmodified attributes
to_evaluate = state.unmodified.intersection(
evaluated_keys)
for key in to_evaluate:
dict_[key] = self.value_evaluators[key](obj)
state._commit(dict_, list(to_evaluate))
# expire attributes with pending changes
# (there was no autoflush, so they are overwritten)
state._expire_attributes(dict_,
set(evaluated_keys).
difference(to_evaluate))
states.add(state)
session._register_altered(states)
class BulkDeleteEvaluate(BulkEvaluate, BulkDelete):
"""BulkUD which handles DELETEs using the "evaluate"
method of session resolution."""
def _do_post_synchronize(self):
self.query.session._remove_newly_deleted(
[attributes.instance_state(obj)
for obj in self.matched_objects])
class BulkUpdateFetch(BulkFetch, BulkUpdate):
"""BulkUD which handles UPDATEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
states = set([
attributes.instance_state(session.identity_map[identity_key])
for identity_key in [
target_mapper.identity_key_from_primary_key(
list(primary_key))
for primary_key in self.matched_rows
]
if identity_key in session.identity_map
])
attrib = [_attr_as_key(k) for k in self.values]
for state in states:
session._expire_state(state, attrib)
session._register_altered(states)
class BulkDeleteFetch(BulkFetch, BulkDelete):
"""BulkUD which handles DELETEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
for primary_key in self.matched_rows:
# TODO: inline this and call remove_newly_deleted
# once
identity_key = target_mapper.identity_key_from_primary_key(
list(primary_key))
if identity_key in session.identity_map:
session._remove_newly_deleted(
[attributes.instance_state(
session.identity_map[identity_key]
)]
)
| gpl-3.0 | -7,167,322,708,506,918,000 | 35.241477 | 80 | 0.556302 | false |
ahmetcemturan/SFACT | fabmetheus_utilities/geometry/creation/linear_bearing_cage.py | 12 | 12862 | """
Linear bearing cage.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.creation import extrude
from fabmetheus_utilities.geometry.creation import lineation
from fabmetheus_utilities.geometry.creation import peg
from fabmetheus_utilities.geometry.creation import solid
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities.geometry.geometry_utilities import matrix
from fabmetheus_utilities.geometry.manipulation_matrix import translate
from fabmetheus_utilities.geometry.solids import cylinder
from fabmetheus_utilities.geometry.solids import sphere
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import euclidean
import math
__author__ = 'Enrique Perez ([email protected])'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def addAssemblyCage(derivation, negatives, positives):
'Add assembly linear bearing cage.'
addCageGroove(derivation, negatives, positives)
for pegCenterX in derivation.pegCenterXs:
addPositivePeg(derivation, positives, pegCenterX, -derivation.pegY)
addPositivePeg(derivation, positives, pegCenterX, derivation.pegY)
translate.translateNegativesPositives(negatives, positives, Vector3(0.0, -derivation.halfSeparationWidth))
femaleNegatives = []
femalePositives = []
addCageGroove(derivation, femaleNegatives, femalePositives)
for pegCenterX in derivation.pegCenterXs:
addNegativePeg(derivation, femaleNegatives, pegCenterX, -derivation.pegY)
addNegativePeg(derivation, femaleNegatives, pegCenterX, derivation.pegY)
translate.translateNegativesPositives(femaleNegatives, femalePositives, Vector3(0.0, derivation.halfSeparationWidth))
negatives += femaleNegatives
positives += femalePositives
def addCage(derivation, height, negatives, positives):
'Add linear bearing cage.'
copyShallow = derivation.elementNode.getCopyShallow()
copyShallow.attributes['path'] = [Vector3(), Vector3(0.0, 0.0, height)]
extrudeDerivation = extrude.ExtrudeDerivation(copyShallow)
roundedExtendedRectangle = getRoundedExtendedRectangle(derivation.demiwidth, derivation.rectangleCenterX, 14)
outsidePath = euclidean.getVector3Path(roundedExtendedRectangle)
extrude.addPositives(extrudeDerivation, [outsidePath], positives)
for bearingCenterX in derivation.bearingCenterXs:
addNegativeSphere(derivation, negatives, bearingCenterX)
def addCageGroove(derivation, negatives, positives):
'Add cage and groove.'
addCage(derivation, derivation.demiheight, negatives, positives)
addGroove(derivation, negatives)
def addGroove(derivation, negatives):
'Add groove on each side of cage.'
copyShallow = derivation.elementNode.getCopyShallow()
extrude.setElementNodeToEndStart(copyShallow, Vector3(-derivation.demilength), Vector3(derivation.demilength))
extrudeDerivation = extrude.ExtrudeDerivation(copyShallow)
bottom = derivation.demiheight - 0.5 * derivation.grooveWidth
outside = derivation.demiwidth
top = derivation.demiheight
leftGroove = [
complex(-outside, bottom),
complex(-derivation.innerDemiwidth, derivation.demiheight),
complex(-outside, top)]
rightGroove = [
complex(outside, top),
complex(derivation.innerDemiwidth, derivation.demiheight),
complex(outside, bottom)]
extrude.addNegatives(extrudeDerivation, negatives, euclidean.getVector3Paths([leftGroove, rightGroove]))
def addNegativePeg(derivation, negatives, x, y):
'Add negative cylinder at x and y.'
negativePegRadius = derivation.pegRadiusArealized + derivation.halfPegClearance
inradius = complex(negativePegRadius, negativePegRadius)
copyShallow = derivation.elementNode.getCopyShallow()
start = Vector3(x, y, derivation.height)
sides = evaluate.getSidesMinimumThreeBasedOnPrecision(copyShallow, negativePegRadius)
cylinder.addCylinderOutputByEndStart(0.0, inradius, negatives, sides, start, derivation.topOverBottom)
def addNegativeSphere(derivation, negatives, x):
'Add negative sphere at x.'
radius = Vector3(derivation.radiusPlusClearance, derivation.radiusPlusClearance, derivation.radiusPlusClearance)
sphereOutput = sphere.getGeometryOutput(derivation.elementNode.getCopyShallow(), radius)
euclidean.translateVector3Path(matrix.getVertexes(sphereOutput), Vector3(x, 0.0, derivation.demiheight))
negatives.append(sphereOutput)
def addPositivePeg(derivation, positives, x, y):
'Add positive cylinder at x and y.'
positivePegRadius = derivation.pegRadiusArealized - derivation.halfPegClearance
radiusArealized = complex(positivePegRadius, positivePegRadius)
copyShallow = derivation.elementNode.getCopyShallow()
start = Vector3(x, y, derivation.demiheight)
endZ = derivation.height
peg.addPegOutput(derivation.pegBevel, endZ, positives, radiusArealized, derivation.sides, start, derivation.topOverBottom)
def getBearingCenterXs(bearingCenterX, numberOfSteps, stepX):
'Get the bearing center x list.'
bearingCenterXs = []
for stepIndex in xrange(numberOfSteps + 1):
bearingCenterXs.append(bearingCenterX)
bearingCenterX += stepX
return bearingCenterXs
def getGeometryOutput(elementNode):
'Get vector3 vertexes from attribute dictionary.'
derivation = LinearBearingCageDerivation(elementNode)
negatives = []
positives = []
if derivation.typeStringFirstCharacter == 'a':
addAssemblyCage(derivation, negatives, positives)
else:
addCage(derivation, derivation.height, negatives, positives)
return extrude.getGeometryOutputByNegativesPositives(elementNode, negatives, positives)
def getGeometryOutputByArguments(arguments, elementNode):
'Get vector3 vertexes from attribute dictionary by arguments.'
evaluate.setAttributesByArguments(['length', 'radius'], arguments, elementNode)
return getGeometryOutput(elementNode)
def getNewDerivation(elementNode):
'Get new derivation.'
return LinearBearingCageDerivation(elementNode)
def getPegCenterXs(numberOfSteps, pegCenterX, stepX):
'Get the peg center x list.'
pegCenterXs = []
for stepIndex in xrange(numberOfSteps):
pegCenterXs.append(pegCenterX)
pegCenterX += stepX
return pegCenterXs
def getRoundedExtendedRectangle(radius, rectangleCenterX, sides):
'Get the rounded extended rectangle.'
roundedExtendedRectangle = []
halfSides = int(sides / 2)
halfSidesPlusOne = abs(halfSides + 1)
sideAngle = math.pi / float(halfSides)
extensionMultiplier = 1.0 / math.cos(0.5 * sideAngle)
center = complex(rectangleCenterX, 0.0)
startAngle = 0.5 * math.pi
for halfSide in xrange(halfSidesPlusOne):
unitPolar = euclidean.getWiddershinsUnitPolar(startAngle)
unitPolarExtended = complex(unitPolar.real * extensionMultiplier, unitPolar.imag)
roundedExtendedRectangle.append(unitPolarExtended * radius + center)
startAngle += sideAngle
center = complex(-rectangleCenterX, 0.0)
startAngle = -0.5 * math.pi
for halfSide in xrange(halfSidesPlusOne):
unitPolar = euclidean.getWiddershinsUnitPolar(startAngle)
unitPolarExtended = complex(unitPolar.real * extensionMultiplier, unitPolar.imag)
roundedExtendedRectangle.append(unitPolarExtended * radius + center)
startAngle += sideAngle
return roundedExtendedRectangle
def processElementNode(elementNode):
'Process the xml element.'
solid.processElementNodeByGeometry(elementNode, getGeometryOutput(elementNode))
class LinearBearingCageDerivation:
'Class to hold linear bearing cage variables.'
def __init__(self, elementNode):
'Set defaults.'
self.length = evaluate.getEvaluatedFloat(50.0, elementNode, 'length')
self.demilength = 0.5 * self.length
self.elementNode = elementNode
self.radius = lineation.getFloatByPrefixBeginEnd(elementNode, 'radius', 'diameter', 5.0)
self.cageClearanceOverRadius = evaluate.getEvaluatedFloat(0.05, elementNode, 'cageClearanceOverRadius')
self.cageClearance = self.cageClearanceOverRadius * self.radius
self.cageClearance = evaluate.getEvaluatedFloat(self.cageClearance, elementNode, 'cageClearance')
self.racewayClearanceOverRadius = evaluate.getEvaluatedFloat(0.1, elementNode, 'racewayClearanceOverRadius')
self.racewayClearance = self.racewayClearanceOverRadius * self.radius
self.racewayClearance = evaluate.getEvaluatedFloat(self.racewayClearance, elementNode, 'racewayClearance')
self.typeMenuRadioStrings = 'assembly integral'.split()
self.typeString = evaluate.getEvaluatedString('assembly', elementNode, 'type')
self.typeStringFirstCharacter = self.typeString[: 1 ].lower()
self.wallThicknessOverRadius = evaluate.getEvaluatedFloat(0.5, elementNode, 'wallThicknessOverRadius')
self.wallThickness = self.wallThicknessOverRadius * self.radius
self.wallThickness = evaluate.getEvaluatedFloat(self.wallThickness, elementNode, 'wallThickness')
self.zenithAngle = evaluate.getEvaluatedFloat(45.0, elementNode, 'zenithAngle')
self.zenithRadian = math.radians(self.zenithAngle)
self.demiheight = self.radius * math.cos(self.zenithRadian) - self.racewayClearance
self.height = self.demiheight + self.demiheight
self.radiusPlusClearance = self.radius + self.cageClearance
self.cageRadius = self.radiusPlusClearance + self.wallThickness
self.demiwidth = self.cageRadius
self.bearingCenterX = self.cageRadius - self.demilength
separation = self.cageRadius + self.radiusPlusClearance
bearingLength = -self.bearingCenterX - self.bearingCenterX
self.numberOfSteps = int(math.floor(bearingLength / separation))
self.stepX = bearingLength / float(self.numberOfSteps)
self.bearingCenterXs = getBearingCenterXs(self.bearingCenterX, self.numberOfSteps, self.stepX)
if self.typeStringFirstCharacter == 'a':
self.setAssemblyCage()
self.rectangleCenterX = self.demiwidth - self.demilength
def setAssemblyCage(self):
'Set two piece assembly parameters.'
self.grooveDepthOverRadius = evaluate.getEvaluatedFloat(0.15, self.elementNode, 'grooveDepthOverRadius')
self.grooveDepth = self.grooveDepthOverRadius * self.radius
self.grooveDepth = evaluate.getEvaluatedFloat(self.grooveDepth, self.elementNode, 'grooveDepth')
self.grooveWidthOverRadius = evaluate.getEvaluatedFloat(0.6, self.elementNode, 'grooveWidthOverRadius')
self.grooveWidth = self.grooveWidthOverRadius * self.radius
self.grooveWidth = evaluate.getEvaluatedFloat(self.grooveWidth, self.elementNode, 'grooveWidth')
self.pegClearanceOverRadius = evaluate.getEvaluatedFloat(0.0, self.elementNode, 'pegClearanceOverRadius')
self.pegClearance = self.pegClearanceOverRadius * self.radius
self.pegClearance = evaluate.getEvaluatedFloat(self.pegClearance, self.elementNode, 'pegClearance')
self.halfPegClearance = 0.5 * self.pegClearance
self.pegRadiusOverRadius = evaluate.getEvaluatedFloat(0.5, self.elementNode, 'pegRadiusOverRadius')
self.pegRadius = self.pegRadiusOverRadius * self.radius
self.pegRadius = evaluate.getEvaluatedFloat(self.pegRadius, self.elementNode, 'pegRadius')
self.sides = evaluate.getSidesMinimumThreeBasedOnPrecision(self.elementNode, self.pegRadius)
self.pegRadiusArealized = evaluate.getRadiusArealizedBasedOnAreaRadius(self.elementNode, self.pegRadius, self.sides)
self.pegBevelOverPegRadius = evaluate.getEvaluatedFloat(0.25, self.elementNode, 'pegBevelOverPegRadius')
self.pegBevel = self.pegBevelOverPegRadius * self.pegRadiusArealized
self.pegBevel = evaluate.getEvaluatedFloat(self.pegBevel, self.elementNode, 'pegBevel')
self.pegMaximumRadius = self.pegRadiusArealized + abs(self.halfPegClearance)
self.separationOverRadius = evaluate.getEvaluatedFloat(0.5, self.elementNode, 'separationOverRadius')
self.separation = self.separationOverRadius * self.radius
self.separation = evaluate.getEvaluatedFloat(self.separation, self.elementNode, 'separation')
self.topOverBottom = evaluate.getEvaluatedFloat(0.8, self.elementNode, 'topOverBottom')
peg.setTopOverBottomByRadius(self, 0.0, self.pegRadiusArealized, self.height)
self.quarterHeight = 0.5 * self.demiheight
self.pegY = 0.5 * self.wallThickness + self.pegMaximumRadius
cagePegRadius = self.cageRadius + self.pegMaximumRadius
halfStepX = 0.5 * self.stepX
pegHypotenuse = math.sqrt(self.pegY * self.pegY + halfStepX * halfStepX)
if cagePegRadius > pegHypotenuse:
self.pegY = math.sqrt(cagePegRadius * cagePegRadius - halfStepX * halfStepX)
self.demiwidth = max(self.pegY + self.pegMaximumRadius + self.wallThickness, self.demiwidth)
self.innerDemiwidth = self.demiwidth
self.demiwidth += self.grooveDepth
self.halfSeparationWidth = self.demiwidth + 0.5 * self.separation
if self.pegRadiusArealized <= 0.0:
self.pegCenterXs = []
else:
self.pegCenterXs = getPegCenterXs(self.numberOfSteps, self.bearingCenterX + halfStepX, self.stepX)
| agpl-3.0 | 5,960,471,037,751,927,000 | 51.072874 | 157 | 0.806251 | false |
apache/airflow | tests/providers/amazon/aws/hooks/test_kinesis.py | 3 | 2515 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import uuid
from airflow.providers.amazon.aws.hooks.kinesis import AwsFirehoseHook
try:
from moto import mock_kinesis
except ImportError:
mock_kinesis = None
class TestAwsFirehoseHook(unittest.TestCase):
@unittest.skipIf(mock_kinesis is None, 'mock_kinesis package not present')
@mock_kinesis
def test_get_conn_returns_a_boto3_connection(self):
hook = AwsFirehoseHook(
aws_conn_id='aws_default', delivery_stream="test_airflow", region_name="us-east-1"
)
assert hook.get_conn() is not None
@unittest.skipIf(mock_kinesis is None, 'mock_kinesis package not present')
@mock_kinesis
def test_insert_batch_records_kinesis_firehose(self):
hook = AwsFirehoseHook(
aws_conn_id='aws_default', delivery_stream="test_airflow", region_name="us-east-1"
)
response = hook.get_conn().create_delivery_stream(
DeliveryStreamName="test_airflow",
S3DestinationConfiguration={
'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role',
'BucketARN': 'arn:aws:s3:::kinesis-test',
'Prefix': 'airflow/',
'BufferingHints': {'SizeInMBs': 123, 'IntervalInSeconds': 124},
'CompressionFormat': 'UNCOMPRESSED',
},
)
stream_arn = response['DeliveryStreamARN']
assert stream_arn == "arn:aws:firehose:us-east-1:123456789012:deliverystream/test_airflow"
records = [{"Data": str(uuid.uuid4())} for _ in range(100)]
response = hook.put_records(records)
assert response['FailedPutCount'] == 0
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
| apache-2.0 | -6,371,513,636,730,374,000 | 37.692308 | 98 | 0.681511 | false |
sbalde/edx-platform | common/lib/xmodule/xmodule/tests/test_stringify.py | 187 | 1256 | """
Tests stringify functions used in xmodule html
"""
from nose.tools import assert_equals # pylint: disable=no-name-in-module
from lxml import etree
from xmodule.stringify import stringify_children
def test_stringify():
text = 'Hi <div x="foo">there <span>Bruce</span><b>!</b></div>'
html = '''<html a="b" foo="bar">{0}</html>'''.format(text)
xml = etree.fromstring(html)
out = stringify_children(xml)
assert_equals(out, text)
def test_stringify_again():
html = r"""<html name="Voltage Source Answer" >A voltage source is non-linear!
<div align="center">
<img src="/static/images/circuits/voltage-source.png"/>
\(V=V_C\)
</div>
But it is <a href="http://mathworld.wolfram.com/AffineFunction.html">affine</a>,
which means linear except for an offset.
</html>
"""
html = """<html>A voltage source is non-linear!
<div align="center">
</div>
But it is <a href="http://mathworld.wolfram.com/AffineFunction.html">affine</a>,
which means linear except for an offset.
</html>
"""
xml = etree.fromstring(html)
out = stringify_children(xml)
print "output:"
print out
# Tracking strange content repeating bug
# Should appear once
assert_equals(out.count("But it is "), 1)
| agpl-3.0 | -4,546,936,460,027,800,600 | 27.545455 | 82 | 0.663217 | false |
interedition/collatex | collatex-pythonport/docs/conf.py | 4 | 8473 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import collatex
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CollateX-Python'
copyright = u'2014, Ronald Haentjens Dekker'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = collatex.__version__
# The full version, including alpha/beta/rc tags.
release = collatex.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'collatexdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'collatex.tex',
u'CollateX-Python Documentation',
u'Ronald Haentjens Dekker', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'collatex',
u'CollateX-Python Documentation',
[u'Ronald Haentjens Dekker'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'collatex',
u'CollateX-Python Documentation',
u'Ronald Haentjens Dekker',
'Collatex',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | gpl-3.0 | -4,680,962,521,294,584,000 | 29.814545 | 76 | 0.706243 | false |
Learning-from-our-past/Kaira | books/karelians/extraction/extractors/weddingextractor.py | 2 | 1845 | # -*- coding: utf-8 -*-
import re
from books.karelians.extraction.extractors.baseExtractor import BaseExtractor
from books.karelians.extraction.extractionExceptions import *
from books.karelians.extraction.extractors.dateExtractor import DateExtractor
from shared import textUtils
from books.karelians.extractionkeys import KEYS
from interface.valuewrapper import ValueWrapper
from shared import regexUtils
class WeddingExtractor(BaseExtractor):
def extract(self, text, entry):
super(WeddingExtractor, self).extract(text)
self.PATTERN = r"(?:avioit)\.?\s?-(?P<year>\d{2,4})"
self.OPTIONS = (re.UNICODE | re.IGNORECASE)
self.REQUIRES_MATCH_POSITION = True
self.SUBSTRING_WIDTH = 100
self.weddingYear = ""
self.preparedText = ""
self.initVars(text)
self._findDate(self.preparedText)
return self._constructReturnDict()
def initVars(self,text):
self.preparedText = self._prepareTextForExtraction(text)
def _prepareTextForExtraction(self, text):
t = textUtils.takeSubStrBasedOnPos(text, self.matchStartPosition, self.SUBSTRING_WIDTH)
t = textUtils.removeSpacesFromText(t)
return t
def _findDate(self, text):
try:
wedding = regexUtils.safeSearch(self.PATTERN, text, self.OPTIONS)
self._setFinalMatchPosition(wedding.end())
self.weddingYear = "19" + wedding.group("year")
except regexUtils.RegexNoneMatchException as e:
self.weddingYear = ""
def _setFinalMatchPosition(self, end):
#Dirty fix for inaccuracy in positions which would screw the Location extraction
self.matchFinalPosition = end + self.matchStartPosition - 4
def _constructReturnDict(self):
return {KEYS["weddingYear"]: ValueWrapper(self.weddingYear)}
| gpl-2.0 | -8,257,079,874,267,493,000 | 35.176471 | 95 | 0.698645 | false |
psiorx/drake | drake/thirdParty/xacro.py | 19 | 18904 | #! /usr/bin/env python
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author: Stuart Glaser
import os.path, sys, os, getopt
import subprocess
from xml.dom.minidom import parse, parseString
import xml.dom
import re
import string
class XacroException(Exception): pass
def isnumber(x):
return hasattr(x, '__int__')
#import roslib; roslib.load_manifest('xacro')
#import roslib.substitution_args
def eval_extension(str):
return str; #roslib.substitution_args.resolve_args(str, resolve_anon=False)
# Better pretty printing of xml
# Taken from http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-and-silly-whitespace/
def fixed_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
xml.dom.minidom._write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
if len(self.childNodes) == 1 \
and self.childNodes[0].nodeType == xml.dom.minidom.Node.TEXT_NODE:
writer.write(">")
self.childNodes[0].writexml(writer, "", "", "")
writer.write("</%s>%s" % (self.tagName, newl))
return
writer.write(">%s"%(newl))
for node in self.childNodes:
if node.nodeType is not xml.dom.minidom.Node.TEXT_NODE: # 3:
node.writexml(writer,indent+addindent,addindent,newl)
#node.writexml(writer,indent+addindent,addindent,newl)
writer.write("%s</%s>%s" % (indent,self.tagName,newl))
else:
writer.write("/>%s"%(newl))
# replace minidom's function with ours
xml.dom.minidom.Element.writexml = fixed_writexml
class Table:
def __init__(self, parent = None):
self.parent = parent
self.table = {}
def __getitem__(self, key):
if key in self.table:
return self.table[key]
elif self.parent:
return self.parent[key]
else:
raise KeyError(key)
def __setitem__(self, key, value):
self.table[key] = value
def __contains__(self, key):
return \
key in self.table or \
(self.parent and key in self.parent)
class QuickLexer(object):
def __init__(self, **res):
self.str = ""
self.top = None
self.res = []
for k,v in res.items():
self.__setattr__(k, len(self.res))
self.res.append(v)
def lex(self, str):
self.str = str
self.top = None
self.next()
def peek(self):
return self.top
def next(self):
result = self.top
self.top = None
for i in range(len(self.res)):
m = re.match(self.res[i], self.str)
if m:
self.top = (i, m.group(0))
self.str = self.str[m.end():]
break
return result
def first_child_element(elt):
c = elt.firstChild
while c:
if c.nodeType == xml.dom.Node.ELEMENT_NODE:
return c
c = c.nextSibling
return None
def next_sibling_element(elt):
c = elt.nextSibling
while c:
if c.nodeType == xml.dom.Node.ELEMENT_NODE:
return c
c = c.nextSibling
return None
# Pre-order traversal of the elements
def next_element(elt):
child = first_child_element(elt)
if child: return child
while elt and elt.nodeType == xml.dom.Node.ELEMENT_NODE:
next = next_sibling_element(elt)
if next:
return next
elt = elt.parentNode
return None
# Pre-order traversal of all the nodes
def next_node(node):
if node.firstChild:
return node.firstChild
while node:
if node.nextSibling:
return node.nextSibling
node = node.parentNode
return None
def child_elements(elt):
c = elt.firstChild
while c:
if c.nodeType == xml.dom.Node.ELEMENT_NODE:
yield c
c = c.nextSibling
all_includes = []
## @throws XacroException if a parsing error occurs with an included document
def process_includes(doc, base_dir):
namespaces = {}
previous = doc.documentElement
elt = next_element(previous)
while elt:
if elt.tagName == 'include' or elt.tagName == 'xacro:include':
filename = eval_text(elt.getAttribute('filename'), {})
if not os.path.isabs(filename):
filename = os.path.join(base_dir, filename)
f = None
try:
try:
f = open(filename)
except IOError, e:
print elt
raise XacroException("included file \"%s\" could not be opened: %s" % (filename, str(e)))
try:
global all_includes
all_includes.append(filename)
included = parse(f)
except Exception, e:
raise XacroException("included file [%s] generated an error during XML parsing: %s"%(filename, str(e)))
finally:
if f: f.close()
# Replaces the include tag with the elements of the included file
for c in child_elements(included.documentElement):
elt.parentNode.insertBefore(c.cloneNode(1), elt)
elt.parentNode.removeChild(elt)
elt = None
# Grabs all the declared namespaces of the included document
for name, value in included.documentElement.attributes.items():
if name.startswith('xmlns:'):
namespaces[name] = value
else:
previous = elt
elt = next_element(previous)
# Makes sure the final document declares all the namespaces of the included documents.
for k,v in namespaces.items():
doc.documentElement.setAttribute(k, v)
# Returns a dictionary: { macro_name => macro_xml_block }
def grab_macros(doc):
macros = {}
previous = doc.documentElement
elt = next_element(previous)
while elt:
if elt.tagName == 'macro' or elt.tagName == 'xacro:macro':
name = elt.getAttribute('name')
macros[name] = elt
macros['xacro:' + name] = elt
elt.parentNode.removeChild(elt)
elt = None
else:
previous = elt
elt = next_element(previous)
return macros
# Returns a Table of the properties
def grab_properties(doc):
table = Table()
previous = doc.documentElement
elt = next_element(previous)
while elt:
if elt.tagName == 'property' or elt.tagName == 'xacro:property':
name = elt.getAttribute('name')
value = None
if elt.hasAttribute('value'):
value = elt.getAttribute('value')
else:
name = '**' + name
value = elt #debug
bad = string.whitespace + "${}"
has_bad = False
for b in bad:
if b in name:
has_bad = True
break
if has_bad:
sys.stderr.write('Property names may not have whitespace, ' +
'"{", "}", or "$" : "' + name + '"')
else:
table[name] = value
elt.parentNode.removeChild(elt)
elt = None
else:
previous = elt
elt = next_element(previous)
return table
def eat_ignore(lex):
while lex.peek() and lex.peek()[0] == lex.IGNORE:
lex.next()
def eval_lit(lex, symbols):
eat_ignore(lex)
if lex.peek()[0] == lex.NUMBER:
return float(lex.next()[1])
if lex.peek()[0] == lex.SYMBOL:
try:
value = symbols[lex.next()[1]]
except KeyError, ex:
#sys.stderr.write("Could not find symbol: %s\n" % str(ex))
raise XacroException("Property wasn't defined: %s" % str(ex))
if not (isnumber(value) or isinstance(value,(str,unicode))):
print [value], isinstance(value, str), type(value)
raise XacroException("WTF2")
try:
return int(value)
except:
try:
return float(value)
except:
return value
raise XacroException("Bad literal")
def eval_factor(lex, symbols):
eat_ignore(lex)
neg = 1;
if lex.peek()[1] == '-':
lex.next()
neg = -1
if lex.peek()[0] in [lex.NUMBER, lex.SYMBOL]:
return neg * eval_lit(lex, symbols)
if lex.peek()[0] == lex.LPAREN:
lex.next()
eat_ignore(lex)
result = eval_expr(lex, symbols)
eat_ignore(lex)
if lex.next()[0] != lex.RPAREN:
raise XacroException("Unmatched left paren")
eat_ignore(lex)
return neg * result
raise XacroException("Misplaced operator")
def eval_term(lex, symbols):
eat_ignore(lex)
result = 0
if lex.peek()[0] in [lex.NUMBER, lex.SYMBOL, lex.LPAREN] \
or lex.peek()[1] == '-':
result = eval_factor(lex, symbols)
eat_ignore(lex)
while lex.peek() and lex.peek()[1] in ['*', '/']:
op = lex.next()[1]
n = eval_factor(lex, symbols)
if op == '*':
result = float(result) * float(n)
elif op == '/':
result = float(result) / float(n)
else:
raise XacroException("WTF")
eat_ignore(lex)
return result
def eval_expr(lex, symbols):
eat_ignore(lex)
op = None
if lex.peek()[0] == lex.OP:
op = lex.next()[1]
if not op in ['+', '-']:
raise XacroException("Invalid operation. Must be '+' or '-'")
result = eval_term(lex, symbols)
if op == '-':
result = -float(result)
eat_ignore(lex)
while lex.peek() and lex.peek()[1] in ['+', '-']:
op = lex.next()[1]
n = eval_term(lex, symbols)
if op == '+':
result = float(result) + float(n)
if op == '-':
result = float(result) - float(n)
eat_ignore(lex)
return result
def eval_text(text, symbols):
def handle_expr(s):
lex = QuickLexer(IGNORE = r"\s+",
NUMBER = r"(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?",
SYMBOL = r"[a-zA-Z_]\w*",
OP = r"[\+\-\*/^]",
LPAREN = r"\(",
RPAREN = r"\)")
lex.lex(s)
return eval_expr(lex, symbols)
def handle_extension(s):
return eval_extension("$(%s)" % s)
results = []
lex = QuickLexer(DOLLAR_DOLLAR_BRACE = r"\$\$+\{",
EXPR = r"\$\{[^\}]*\}",
EXTENSION = r"\$\([^\)]*\)",
TEXT = r"([^\$]|\$[^{(]|\$$)+")
lex.lex(text)
while lex.peek():
if lex.peek()[0] == lex.EXPR:
results.append(handle_expr(lex.next()[1][2:-1]))
elif lex.peek()[0] == lex.EXTENSION:
results.append(handle_extension(lex.next()[1][2:-1]))
elif lex.peek()[0] == lex.TEXT:
results.append(lex.next()[1])
elif lex.peek()[0] == lex.DOLLAR_DOLLAR_BRACE:
results.append(lex.next()[1][1:])
return ''.join(map(str, results))
# Expands macros, replaces properties, and evaluates expressions
def eval_all(root, macros, symbols):
# Evaluates the attributes for the root node
for at in root.attributes.items():
result = eval_text(at[1], symbols)
root.setAttribute(at[0], result)
previous = root
node = next_node(previous)
while node:
if node.nodeType == xml.dom.Node.ELEMENT_NODE:
if node.tagName in macros:
body = macros[node.tagName].cloneNode(deep = True)
params = body.getAttribute('params').split()
# Expands the macro
scoped = Table(symbols)
for name,value in node.attributes.items():
if not name in params:
raise XacroException("Invalid parameter \"%s\" while expanding macro \"%s\"" % \
(str(name), str(node.tagName)))
params.remove(name)
scoped[name] = eval_text(value, symbols)
# Pulls out the block arguments, in order
cloned = node.cloneNode(deep = True)
eval_all(cloned, macros, symbols)
block = cloned.firstChild
for param in params[:]:
if param[0] == '*':
while block and block.nodeType != xml.dom.Node.ELEMENT_NODE:
block = block.nextSibling
if not block:
raise XacroException("Not enough blocks while evaluating macro %s" % str(node.tagName))
params.remove(param)
scoped[param] = block
block = block.nextSibling
if params:
raise XacroException("Some parameters were not set for macro %s" % \
str(node.tagName))
eval_all(body, macros, scoped)
# Replaces the macro node with the expansion
for e in list(child_elements(body)): # Ew
node.parentNode.insertBefore(e, node)
node.parentNode.removeChild(node)
node = None
elif node.tagName == 'insert_block' or node.tagName == 'xacro:insert_block':
name = node.getAttribute('name')
if ("**" + name) in symbols:
# Multi-block
block = symbols['**' + name]
for e in list(child_elements(block)):
node.parentNode.insertBefore(e.cloneNode(deep=True), node)
node.parentNode.removeChild(node)
elif ("*" + name) in symbols:
# Single block
block = symbols['*' + name]
node.parentNode.insertBefore(block.cloneNode(deep=True), node)
node.parentNode.removeChild(node)
else:
raise XacroException("Block \"%s\" was never declared" % name)
node = None
else:
# Evals the attributes
for at in node.attributes.items():
result = eval_text(at[1], symbols)
node.setAttribute(at[0], result)
previous = node
elif node.nodeType == xml.dom.Node.TEXT_NODE:
node.data = eval_text(node.data, symbols)
previous = node
else:
previous = node
node = next_node(previous)
return macros
# Expands everything except includes
def eval_self_contained(doc):
macros = grab_macros(doc)
symbols = grab_properties(doc)
eval_all(doc.documentElement, macros, symbols)
def print_usage(exit_code = 0):
print "Usage: %s [-o <output>] <input>" % 'xacro.py'
print " %s --deps Prints dependencies" % 'xacro.py'
print " %s --includes Only evalutes includes" % 'xacro.py'
sys.exit(exit_code)
def main():
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "ho:", ['deps', 'includes'])
except getopt.GetoptError, err:
print str(err)
print_usage(2)
just_deps = False
just_includes = False
output = sys.stdout
for o, a in opts:
if o == '-h':
print_usage(0)
elif o == '-o':
output = open(a, 'w')
elif o == '--deps':
just_deps = True
elif o == '--includes':
just_includes = True
if len(args) < 1:
print "No input given"
print_usage(2)
f = open(args[0])
doc = None
try:
doc = parse(f)
except xml.parsers.expat.ExpatError:
sys.stderr.write("Expat parsing error. Check that:\n")
sys.stderr.write(" - Your XML is correctly formed\n")
sys.stderr.write(" - You have the xacro xmlns declaration: " +
"xmlns:xacro=\"http://www.ros.org/wiki/xacro\"\n")
sys.stderr.write("\n")
raise
finally:
f.close()
process_includes(doc, os.path.dirname(sys.argv[1]))
if just_deps:
for inc in all_includes:
sys.stdout.write(inc + " ")
sys.stdout.write("\n")
elif just_includes:
doc.writexml(output)
print
else:
eval_self_contained(doc)
banner = [xml.dom.minidom.Comment(c) for c in
[" %s " % ('='*83),
" | This document was autogenerated by xacro from %-30s | " % args[0],
" | EDITING THIS FILE BY HAND IS NOT RECOMMENDED %-30s | " % "",
" %s " % ('='*83)]]
first = doc.firstChild
for comment in banner:
doc.insertBefore(comment, first)
output.write(doc.toprettyxml(indent = ' '))
#doc.writexml(output, newl = "\n")
print
main();
| bsd-3-clause | 1,156,979,970,147,458,300 | 31.991274 | 123 | 0.545863 | false |
toshywoshy/ansible | lib/ansible/modules/network/fortios/fortios_firewall_ldb_monitor.py | 7 | 12613 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_ldb_monitor
short_description: Configure server load balancing health monitors in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and ldb_monitor category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_ldb_monitor:
description:
- Configure server load balancing health monitors.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
http_get:
description:
- URL used to send a GET request to check the health of an HTTP server.
type: str
http_match:
description:
- String to match the value expected in response to an HTTP-GET request.
type: str
http_max_redirects:
description:
- The maximum number of HTTP redirects to be allowed (0 - 5).
type: int
interval:
description:
- Time between health checks (5 - 65635 sec).
type: int
name:
description:
- Monitor name.
required: true
type: str
port:
description:
- Service port used to perform the health check. If 0, health check monitor inherits port configured for the server (0 - 65635).
type: int
retry:
description:
- Number health check attempts before the server is considered down (1 - 255).
type: int
timeout:
description:
- Time to wait to receive response to a health check from a server. Reaching the timeout means the health check failed (1 - 255 sec).
type: int
type:
description:
- Select the Monitor type used by the health check monitor to check the health of the server (PING | TCP | HTTP).
type: str
choices:
- ping
- tcp
- http
- passive-sip
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure server load balancing health monitors.
fortios_firewall_ldb_monitor:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_ldb_monitor:
http_get: "<your_own_value>"
http_match: "<your_own_value>"
http_max_redirects: "5"
interval: "6"
name: "default_name_7"
port: "8"
retry: "9"
timeout: "10"
type: "ping"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_ldb_monitor_data(json):
option_list = ['http_get', 'http_match', 'http_max_redirects',
'interval', 'name', 'port',
'retry', 'timeout', 'type']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_ldb_monitor(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_ldb_monitor'] and data['firewall_ldb_monitor']:
state = data['firewall_ldb_monitor']['state']
else:
state = True
firewall_ldb_monitor_data = data['firewall_ldb_monitor']
filtered_data = underscore_to_hyphen(filter_firewall_ldb_monitor_data(firewall_ldb_monitor_data))
if state == "present":
return fos.set('firewall',
'ldb-monitor',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'ldb-monitor',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_ldb_monitor']:
resp = firewall_ldb_monitor(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_ldb_monitor": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"http_get": {"required": False, "type": "str"},
"http_match": {"required": False, "type": "str"},
"http_max_redirects": {"required": False, "type": "int"},
"interval": {"required": False, "type": "int"},
"name": {"required": True, "type": "str"},
"port": {"required": False, "type": "int"},
"retry": {"required": False, "type": "int"},
"timeout": {"required": False, "type": "int"},
"type": {"required": False, "type": "str",
"choices": ["ping", "tcp", "http",
"passive-sip"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | 7,255,643,223,289,773,000 | 31.507732 | 153 | 0.572901 | false |
wfxiang08/django190 | django/contrib/gis/gdal/__init__.py | 130 | 2610 | """
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existent file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
from django.contrib.gis.gdal.error import (check_err, GDALException,
OGRException, OGRIndexError, SRSException) # NOQA
from django.contrib.gis.gdal.geomtype import OGRGeomType # NOQA
__all__ = [
'check_err', 'GDALException', 'OGRException', 'OGRIndexError',
'SRSException', 'OGRGeomType', 'HAS_GDAL',
]
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver # NOQA
from django.contrib.gis.gdal.datasource import DataSource # NOQA
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, GDAL_VERSION # NOQA
from django.contrib.gis.gdal.raster.source import GDALRaster # NOQA
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform # NOQA
from django.contrib.gis.gdal.geometries import OGRGeometry # NOQA
HAS_GDAL = True
__all__ += [
'Driver', 'DataSource', 'gdal_version', 'gdal_full_version',
'GDAL_VERSION', 'SpatialReference', 'CoordTransform', 'OGRGeometry',
]
except GDALException:
HAS_GDAL = False
try:
from django.contrib.gis.gdal.envelope import Envelope
__all__ += ['Envelope']
except ImportError:
# No ctypes, but don't raise an exception.
pass
| bsd-3-clause | -4,136,187,003,226,447,000 | 38.545455 | 101 | 0.744828 | false |
double-y/django | django/conf/locale/mk/formats.py | 504 | 1742 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y'
SHORT_DATETIME_FORMAT = 'j.m.Y H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M:%S.%f', # '25. 10. 2006 14:30:59.000200'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M:%S.%f', # '25. 10. 06 14:30:59.000200'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause | 6,987,838,347,134,050,000 | 37.711111 | 77 | 0.493685 | false |
pombredanne/python-ptrace | examples/itrace.py | 1 | 1770 | #!/usr/bin/env python
"""
Here is a tool which I have been using to debug libc startup code where I
didn't find gdb very helpful. It single steps the process and prints each
instruction pointer address. To go faster, it allows a number of syscalls to
run before starting single-stepping.
It's possible to pipe the addresses through addr2line to get a very
simple tracing debugger. :-)
I couldn't see a way to catch syscalls and single step at the same
time. As a consequence the tool can't handle multiple threads.
Mark
"""
import signal
from ptrace.debugger import ProcessExit, ProcessSignal
import strace
class Tracer(strace.SyscallTracer):
def createCommonOptions(self, parser):
parser.add_option(
"-n", dest="syscall_limit", type="int", default=None,
help="Number of syscalls before switching to single step")
super(Tracer, self).createCommonOptions(parser)
def syscallTrace(self, process):
syscall_limit = self.options.syscall_limit
i = 0
while i < syscall_limit or syscall_limit is None:
print i
i += 1
process.syscall()
self.debugger.waitSyscall()
i = 0
while self.debugger:
eip = process.getInstrPointer()
print i, process.pid, "[%08x]" % eip
i += 1
process.singleStep()
event = self.debugger.waitProcessEvent()
if isinstance(event, ProcessExit):
print "process exit"
return
if (isinstance(event, ProcessSignal) and
event.signum & ~128 != signal.SIGTRAP):
print "died with signal %i" % event.signum
return
if __name__ == "__main__":
Tracer().main()
| gpl-2.0 | 4,968,696,768,076,982,000 | 32.396226 | 76 | 0.626554 | false |
maaaks/andreas | andreas/commands/dbcommands.py | 1 | 1291 | from typing import List, Type
from andreas.db.database import db
from andreas.db.model import Model
from andreas.models.event import Event
from andreas.models.keypair import KeyPair
from andreas.models.post import Post
from andreas.models.relations import PostPostRelation, UserPostRelation
from andreas.models.server import Server
from andreas.models.signature import Signature, UnverifiedSignature
from andreas.models.user import User
models: List[Type[Model]] = [
Event,
KeyPair,
Post,
PostPostRelation,
Server,
Signature,
UnverifiedSignature,
User,
UserPostRelation,
]
@db.atomic()
def updatedb():
db.create_tables(models)
@db.atomic()
def populatedb():
updatedb()
# Local server
if not Server.select().where(Server.is_local).count():
server = Server()
server.is_local = True
server.name = 'localhost'
server.engine_name = 'Andreas'
server.engine_version = '0.0.1'
server.save()
# Local user
if not User.select().count():
user = User()
user.server = Server.get(Server.is_local)
user.name = 'root'
user.save()
@db.atomic()
def dropdb():
db.drop_tables(models, safe=True)
@db.atomic()
def resetdb():
dropdb()
populatedb() | mit | 4,788,224,980,873,828,000 | 22.071429 | 71 | 0.670798 | false |
aliyun/oss-ftp | python27/win32/Lib/distutils/tests/test_register.py | 39 | 8839 | # -*- encoding: utf8 -*-
"""Tests for distutils.command.register."""
import os
import unittest
import getpass
import urllib2
import warnings
from test.test_support import check_warnings, run_unittest
from distutils.command import register as register_module
from distutils.command.register import register
from distutils.errors import DistutilsSetupError
from distutils.tests.test_config import PyPIRCCommandTestCase
try:
import docutils
except ImportError:
docutils = None
PYPIRC_NOPASSWORD = """\
[distutils]
index-servers =
server1
[server1]
username:me
"""
WANTED_PYPIRC = """\
[distutils]
index-servers =
pypi
[pypi]
username:tarek
password:password
"""
class RawInputs(object):
"""Fakes user inputs."""
def __init__(self, *answers):
self.answers = answers
self.index = 0
def __call__(self, prompt=''):
try:
return self.answers[self.index]
finally:
self.index += 1
class FakeOpener(object):
"""Fakes a PyPI server"""
def __init__(self):
self.reqs = []
def __call__(self, *args):
return self
def open(self, req):
self.reqs.append(req)
return self
def read(self):
return 'xxx'
class RegisterTestCase(PyPIRCCommandTestCase):
def setUp(self):
super(RegisterTestCase, self).setUp()
# patching the password prompt
self._old_getpass = getpass.getpass
def _getpass(prompt):
return 'password'
getpass.getpass = _getpass
self.old_opener = urllib2.build_opener
self.conn = urllib2.build_opener = FakeOpener()
def tearDown(self):
getpass.getpass = self._old_getpass
urllib2.build_opener = self.old_opener
super(RegisterTestCase, self).tearDown()
def _get_cmd(self, metadata=None):
if metadata is None:
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx'}
pkg_info, dist = self.create_dist(**metadata)
return register(dist)
def test_create_pypirc(self):
# this test makes sure a .pypirc file
# is created when requested.
# let's create a register instance
cmd = self._get_cmd()
# we shouldn't have a .pypirc file yet
self.assertFalse(os.path.exists(self.rc))
# patching raw_input and getpass.getpass
# so register gets happy
#
# Here's what we are faking :
# use your existing login (choice 1.)
# Username : 'tarek'
# Password : 'password'
# Save your login (y/N)? : 'y'
inputs = RawInputs('1', 'tarek', 'y')
register_module.raw_input = inputs.__call__
# let's run the command
try:
cmd.run()
finally:
del register_module.raw_input
# we should have a brand new .pypirc file
self.assertTrue(os.path.exists(self.rc))
# with the content similar to WANTED_PYPIRC
f = open(self.rc)
try:
content = f.read()
self.assertEqual(content, WANTED_PYPIRC)
finally:
f.close()
# now let's make sure the .pypirc file generated
# really works : we shouldn't be asked anything
# if we run the command again
def _no_way(prompt=''):
raise AssertionError(prompt)
register_module.raw_input = _no_way
cmd.show_response = 1
cmd.run()
# let's see what the server received : we should
# have 2 similar requests
self.assertEqual(len(self.conn.reqs), 2)
req1 = dict(self.conn.reqs[0].headers)
req2 = dict(self.conn.reqs[1].headers)
self.assertEqual(req2['Content-length'], req1['Content-length'])
self.assertIn('xxx', self.conn.reqs[1].data)
def test_password_not_in_file(self):
self.write_file(self.rc, PYPIRC_NOPASSWORD)
cmd = self._get_cmd()
cmd._set_config()
cmd.finalize_options()
cmd.send_metadata()
# dist.password should be set
# therefore used afterwards by other commands
self.assertEqual(cmd.distribution.password, 'password')
def test_registering(self):
# this test runs choice 2
cmd = self._get_cmd()
inputs = RawInputs('2', 'tarek', '[email protected]')
register_module.raw_input = inputs.__call__
try:
# let's run the command
cmd.run()
finally:
del register_module.raw_input
# we should have send a request
self.assertEqual(len(self.conn.reqs), 1)
req = self.conn.reqs[0]
headers = dict(req.headers)
self.assertEqual(headers['Content-length'], '608')
self.assertIn('tarek', req.data)
def test_password_reset(self):
# this test runs choice 3
cmd = self._get_cmd()
inputs = RawInputs('3', '[email protected]')
register_module.raw_input = inputs.__call__
try:
# let's run the command
cmd.run()
finally:
del register_module.raw_input
# we should have send a request
self.assertEqual(len(self.conn.reqs), 1)
req = self.conn.reqs[0]
headers = dict(req.headers)
self.assertEqual(headers['Content-length'], '290')
self.assertIn('tarek', req.data)
@unittest.skipUnless(docutils is not None, 'needs docutils')
def test_strict(self):
# testing the script option
# when on, the register command stops if
# the metadata is incomplete or if
# long_description is not reSt compliant
# empty metadata
cmd = self._get_cmd({})
cmd.ensure_finalized()
cmd.strict = 1
self.assertRaises(DistutilsSetupError, cmd.run)
# metadata are OK but long_description is broken
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': u'éxéxé',
'name': 'xxx', 'version': 'xxx',
'long_description': 'title\n==\n\ntext'}
cmd = self._get_cmd(metadata)
cmd.ensure_finalized()
cmd.strict = 1
self.assertRaises(DistutilsSetupError, cmd.run)
# now something that works
metadata['long_description'] = 'title\n=====\n\ntext'
cmd = self._get_cmd(metadata)
cmd.ensure_finalized()
cmd.strict = 1
inputs = RawInputs('1', 'tarek', 'y')
register_module.raw_input = inputs.__call__
# let's run the command
try:
cmd.run()
finally:
del register_module.raw_input
# strict is not by default
cmd = self._get_cmd()
cmd.ensure_finalized()
inputs = RawInputs('1', 'tarek', 'y')
register_module.raw_input = inputs.__call__
# let's run the command
try:
cmd.run()
finally:
del register_module.raw_input
# and finally a Unicode test (bug #12114)
metadata = {'url': u'xxx', 'author': u'\u00c9ric',
'author_email': u'xxx', u'name': 'xxx',
'version': u'xxx',
'description': u'Something about esszet \u00df',
'long_description': u'More things about esszet \u00df'}
cmd = self._get_cmd(metadata)
cmd.ensure_finalized()
cmd.strict = 1
inputs = RawInputs('1', 'tarek', 'y')
register_module.raw_input = inputs.__call__
# let's run the command
try:
cmd.run()
finally:
del register_module.raw_input
@unittest.skipUnless(docutils is not None, 'needs docutils')
def test_register_invalid_long_description(self):
description = ':funkie:`str`' # mimic Sphinx-specific markup
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx',
'long_description': description}
cmd = self._get_cmd(metadata)
cmd.ensure_finalized()
cmd.strict = True
inputs = RawInputs('2', 'tarek', '[email protected]')
register_module.raw_input = inputs
self.addCleanup(delattr, register_module, 'raw_input')
self.assertRaises(DistutilsSetupError, cmd.run)
def test_check_metadata_deprecated(self):
# makes sure make_metadata is deprecated
cmd = self._get_cmd()
with check_warnings() as w:
warnings.simplefilter("always")
cmd.check_metadata()
self.assertEqual(len(w.warnings), 1)
def test_suite():
return unittest.makeSuite(RegisterTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| mit | 2,733,337,625,858,231,300 | 29.468966 | 75 | 0.574581 | false |
Adriwr/Clinica | vendor/doctrine/orm/docs/en/_exts/configurationblock.py | 2577 | 3506 | #Copyright (c) 2010 Fabien Potencier
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from docutils.parsers.rst import Directive, directives
from docutils import nodes
from string import upper
class configurationblock(nodes.General, nodes.Element):
pass
class ConfigurationBlock(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
formats = {
'html': 'HTML',
'xml': 'XML',
'php': 'PHP',
'yaml': 'YAML',
'jinja': 'Twig',
'html+jinja': 'Twig',
'jinja+html': 'Twig',
'php+html': 'PHP',
'html+php': 'PHP',
'ini': 'INI',
'php-annotations': 'Annotations',
}
def run(self):
env = self.state.document.settings.env
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
entries = []
for i, child in enumerate(node):
if isinstance(child, nodes.literal_block):
# add a title (the language name) before each block
#targetid = "configuration-block-%d" % env.new_serialno('configuration-block')
#targetnode = nodes.target('', '', ids=[targetid])
#targetnode.append(child)
innernode = nodes.emphasis(self.formats[child['language']], self.formats[child['language']])
para = nodes.paragraph()
para += [innernode, child]
entry = nodes.list_item('')
entry.append(para)
entries.append(entry)
resultnode = configurationblock()
resultnode.append(nodes.bullet_list('', *entries))
return [resultnode]
def visit_configurationblock_html(self, node):
self.body.append(self.starttag(node, 'div', CLASS='configuration-block'))
def depart_configurationblock_html(self, node):
self.body.append('</div>\n')
def visit_configurationblock_latex(self, node):
pass
def depart_configurationblock_latex(self, node):
pass
def setup(app):
app.add_node(configurationblock,
html=(visit_configurationblock_html, depart_configurationblock_html),
latex=(visit_configurationblock_latex, depart_configurationblock_latex))
app.add_directive('configuration-block', ConfigurationBlock)
| mit | 8,855,156,550,038,624,000 | 36.698925 | 108 | 0.650029 | false |
laborautonomo/opps | opps/views/generic/json_views.py | 1 | 2941 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from django.http import HttpResponse
from django.views.generic import TemplateView
from django.views.generic.detail import BaseDetailView
from django.views.generic.detail import SingleObjectTemplateResponseMixin
def response_mimetype(request):
if "application/json" in request.META['HTTP_ACCEPT']:
return "application/json"
return "text/plain"
class JSONResponse(HttpResponse):
"""JSON response class."""
def __init__(self, obj='', json_opts={}, mimetype="application/json",
*args, **kwargs):
content = json.dumps(obj, **json_opts)
super(JSONResponse, self).__init__(content, mimetype, *args, **kwargs)
class JSONPResponse(HttpResponse):
"""JSONP response class."""
def __init__(self, obj='', json_opts={}, mimetype="application/jsonp",
jsonp_callback='jsonpCallback', *args, **kwargs):
_json_content = json.dumps(obj, **json_opts)
content = "{}({})".format(jsonp_callback, _json_content)
super(JSONPResponse, self).__init__(content, mimetype, *args,
**kwargs)
class JSONResponseMixin(object):
"""
A mixin that can be used to render a JSON response.
"""
HEADERS = {}
def render_to_json_response(self, context, **response_kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
response = HttpResponse(
self.convert_context_to_json(context),
content_type='application/json',
**response_kwargs
)
for key, value in self.HEADERS.items():
response[key] = value
return response
def convert_context_to_json(self, context):
"Convert the context dictionary into a JSON object"
# Note: This is *EXTREMELY* naive; in reality, you'll need
# to do much more complex handling to ensure that arbitrary
# objects -- such as Django model instances or querysets
# -- can be serialized as JSON.
return json.dumps(context)
class JSONView(JSONResponseMixin, TemplateView):
def render_to_response(self, context, **response_kwargs):
return self.render_to_json_response(context, **response_kwargs)
class JSONDetailView(JSONResponseMixin, BaseDetailView):
def render_to_response(self, context, **response_kwargs):
return self.render_to_json_response(context, **response_kwargs)
class HybridDetailView(JSONResponseMixin,
SingleObjectTemplateResponseMixin,
BaseDetailView):
def render_to_response(self, context):
# Look for a 'format=json' GET argument
if self.request.GET.get('format') == 'json':
return self.render_to_json_response(context)
else:
return super(HybridDetailView, self).render_to_response(context)
| mit | 7,314,731,393,627,772,000 | 34.865854 | 78 | 0.639918 | false |
AntonioJBT/CGATPipeline_core | CGATPipelines/Pipeline/Utils.py | 2 | 2223 | """Utils.py - Utilities for ruffus pipelines
============================================
Reference
---------
"""
import inspect
import sys
def isTest():
"""return True if the pipeline is run in a "testing" mode.
This method checks if ``-is-test`` has been given as a
command line option.
"""
return "--is-test" in sys.argv
def getCallerLocals(decorators=0):
'''returns the locals of the calling function.
from http://pylab.blogspot.com/2009/02/python-accessing-caller-locals-from.html
Arguments
---------
decorators : int
Number of contexts to go up to reach calling function
of interest.
Returns
-------
locals : dict
Dictionary of variable defined in the context of the
calling function.
'''
f = sys._getframe(2 + decorators)
args = inspect.getargvalues(f)
return args[3]
def getCaller(decorators=0):
"""return the name of the calling module.
Arguments
---------
decorators : int
Number of contexts to go up to reach calling function
of interest.
Returns
-------
mod : object
The calling module
"""
frm = inspect.stack()[2 + decorators]
mod = inspect.getmodule(frm[0])
return mod
def add_doc(value, replace=False):
"""add doc string of value to function that is decorated.
The original doc-string is added as the first paragraph(s)
inside the new doc-string.
Parameter
---------
replace : bool
If True, replace documentation rather than appending
"""
def _doc(func):
if func.__doc__:
lines = value.__doc__.split("\n")
for x, line in enumerate(lines):
if line.strip() == "":
break
# insert appropriate indentiation
# currently hard-coded, can be derived
# from doc string?
if not replace:
lines.insert(x+1, " " * 4 +
func.__doc__)
func.__doc__ = "\n".join(lines)
else:
func.__doc__ = value.__doc__
else:
func.__doc__ = value.__doc__
return func
return _doc
| mit | 911,938,815,966,374,100 | 22.903226 | 83 | 0.545659 | false |
joshmoore/zeroc-ice | java/test/Ice/operations/run.py | 1 | 1403 | #!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys
path = [ ".", "..", "../..", "../../..", "../../../.." ]
head = os.path.dirname(sys.argv[0])
if len(head) > 0:
path = [os.path.join(head, p) for p in path]
path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ]
if len(path) == 0:
raise "can't find toplevel directory!"
sys.path.append(os.path.join(path[0]))
from scripts import *
print "tests with regular server."
TestUtil.clientServerTest(additionalClientOptions = "--Ice.Warn.AMICallback=0")
print "tests with AMD server."
TestUtil.clientServerTest(additionalClientOptions = "--Ice.Warn.AMICallback=0", server="test.Ice.operations.AMDServer")
print "tests with TIE server."
TestUtil.clientServerTest(additionalClientOptions = "--Ice.Warn.AMICallback=0", server="test.Ice.operations.TieServer")
print "tests with AMD TIE server."
TestUtil.clientServerTest(additionalClientOptions = "--Ice.Warn.AMICallback=0", server="test.Ice.operations.AMDTieServer")
print "tests with collocated server."
TestUtil.collocatedTest()
| gpl-2.0 | 7,911,121,298,220,474,000 | 36.918919 | 122 | 0.642195 | false |
EliteTK/qutebrowser | qutebrowser/browser/webelem.py | 2 | 12858 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Generic web element related code.
Module attributes:
Group: Enum for different kinds of groups.
SELECTORS: CSS selectors for different groups of elements.
FILTERS: A dictionary of filter functions for the modes.
The filter for "links" filters javascript:-links and a-tags
without "href".
"""
import collections.abc
from PyQt5.QtCore import QUrl, Qt, QEvent, QTimer
from PyQt5.QtGui import QMouseEvent
from qutebrowser.config import config
from qutebrowser.utils import log, usertypes, utils, qtutils
Group = usertypes.enum('Group', ['all', 'links', 'images', 'url', 'prevnext',
'inputs'])
SELECTORS = {
Group.all: ('a, area, textarea, select, input:not([type=hidden]), button, '
'frame, iframe, link, [onclick], [onmousedown], [role=link], '
'[role=option], [role=button], img'),
Group.links: 'a, area, link, [role=link]',
Group.images: 'img',
Group.url: '[src], [href]',
Group.prevnext: 'a, area, button, link, [role=button]',
Group.inputs: ('input[type=text], input[type=email], input[type=url], '
'input[type=tel], input[type=number], '
'input[type=password], input[type=search], '
'input:not([type]), textarea'),
}
def filter_links(elem):
return 'href' in elem and QUrl(elem['href']).scheme() != 'javascript'
FILTERS = {
Group.links: filter_links,
Group.prevnext: filter_links,
}
class Error(Exception):
"""Base class for WebElement errors."""
pass
class AbstractWebElement(collections.abc.MutableMapping):
"""A wrapper around QtWebKit/QtWebEngine web element.
Attributes:
tab: The tab associated with this element.
"""
def __init__(self, tab):
self._tab = tab
def __eq__(self, other):
raise NotImplementedError
def __str__(self):
return self.text()
def __getitem__(self, key):
raise NotImplementedError
def __setitem__(self, key, val):
raise NotImplementedError
def __delitem__(self, key):
raise NotImplementedError
def __iter__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __repr__(self):
try:
html = utils.compact_text(self.outer_xml(), 500)
except Error:
html = None
return utils.get_repr(self, html=html)
def has_frame(self):
"""Check if this element has a valid frame attached."""
raise NotImplementedError
def geometry(self):
"""Get the geometry for this element."""
raise NotImplementedError
def style_property(self, name, *, strategy):
"""Get the element style resolved with the given strategy."""
raise NotImplementedError
def classes(self):
"""Get a list of classes assigned to this element."""
raise NotImplementedError
def tag_name(self):
"""Get the tag name of this element.
The returned name will always be lower-case.
"""
raise NotImplementedError
def outer_xml(self):
"""Get the full HTML representation of this element."""
raise NotImplementedError
def text(self, *, use_js=False):
"""Get the plain text content for this element.
Args:
use_js: Whether to use javascript if the element isn't
content-editable.
"""
# FIXME:qtwebengine what to do about use_js with WebEngine?
raise NotImplementedError
def set_text(self, text, *, use_js=False):
"""Set the given plain text.
Args:
use_js: Whether to use javascript if the element isn't
content-editable.
"""
# FIXME:qtwebengine what to do about use_js with WebEngine?
raise NotImplementedError
def insert_text(self, text):
"""Insert the given text into the element."""
raise NotImplementedError
def rect_on_view(self, *, elem_geometry=None, no_js=False):
"""Get the geometry of the element relative to the webview.
Uses the getClientRects() JavaScript method to obtain the collection of
rectangles containing the element and returns the first rectangle which
is large enough (larger than 1px times 1px). If all rectangles returned
by getClientRects() are too small, falls back to elem.rect_on_view().
Skipping of small rectangles is due to <a> elements containing other
elements with "display:block" style, see
https://github.com/The-Compiler/qutebrowser/issues/1298
Args:
elem_geometry: The geometry of the element, or None.
Calling QWebElement::geometry is rather expensive so
we want to avoid doing it twice.
no_js: Fall back to the Python implementation
"""
raise NotImplementedError
def is_writable(self):
"""Check whether an element is writable."""
return not ('disabled' in self or 'readonly' in self)
def is_content_editable(self):
"""Check if an element has a contenteditable attribute.
Args:
elem: The QWebElement to check.
Return:
True if the element has a contenteditable attribute,
False otherwise.
"""
try:
return self['contenteditable'].lower() not in ['false', 'inherit']
except KeyError:
return False
def _is_editable_object(self):
"""Check if an object-element is editable."""
if 'type' not in self:
log.webelem.debug("<object> without type clicked...")
return False
objtype = self['type'].lower()
if objtype.startswith('application/') or 'classid' in self:
# Let's hope flash/java stuff has an application/* mimetype OR
# at least a classid attribute. Oh, and let's hope images/...
# DON'T have a classid attribute. HTML sucks.
log.webelem.debug("<object type='{}'> clicked.".format(objtype))
return config.get('input', 'insert-mode-on-plugins')
else:
# Image/Audio/...
return False
def _is_editable_input(self):
"""Check if an input-element is editable.
Return:
True if the element is editable, False otherwise.
"""
try:
objtype = self['type'].lower()
except KeyError:
return self.is_writable()
else:
if objtype in ['text', 'email', 'url', 'tel', 'number', 'password',
'search']:
return self.is_writable()
else:
return False
def _is_editable_div(self):
"""Check if a div-element is editable.
Return:
True if the element is editable, False otherwise.
"""
# Beginnings of div-classes which are actually some kind of editor.
div_classes = ('CodeMirror', # Javascript editor over a textarea
'kix-', # Google Docs editor
'ace_') # http://ace.c9.io/
for klass in self.classes():
if any([klass.startswith(e) for e in div_classes]):
return True
return False
def is_editable(self, strict=False):
"""Check whether we should switch to insert mode for this element.
Args:
strict: Whether to do stricter checking so only fields where we can
get the value match, for use with the :editor command.
Return:
True if we should switch to insert mode, False otherwise.
"""
roles = ('combobox', 'textbox')
log.webelem.debug("Checking if element is editable: {}".format(
repr(self)))
tag = self.tag_name()
if self.is_content_editable() and self.is_writable():
return True
elif self.get('role', None) in roles and self.is_writable():
return True
elif tag == 'input':
return self._is_editable_input()
elif tag == 'textarea':
return self.is_writable()
elif tag in ['embed', 'applet']:
# Flash/Java/...
return config.get('input', 'insert-mode-on-plugins') and not strict
elif tag == 'object':
return self._is_editable_object() and not strict
elif tag == 'div':
return self._is_editable_div() and not strict
else:
return False
def is_text_input(self):
"""Check if this element is some kind of text box."""
roles = ('combobox', 'textbox')
tag = self.tag_name()
return self.get('role', None) in roles or tag in ['input', 'textarea']
def remove_blank_target(self):
"""Remove target from link."""
raise NotImplementedError
def resolve_url(self, baseurl):
"""Resolve the URL in the element's src/href attribute.
Args:
baseurl: The URL to base relative URLs on as QUrl.
Return:
A QUrl with the absolute URL, or None.
"""
if baseurl.isRelative():
raise ValueError("Need an absolute base URL!")
for attr in ['href', 'src']:
if attr in self:
text = self[attr].strip()
break
else:
return None
url = QUrl(text)
if not url.isValid():
return None
if url.isRelative():
url = baseurl.resolved(url)
qtutils.ensure_valid(url)
return url
def _mouse_pos(self):
"""Get the position to click/hover."""
# Click the center of the largest square fitting into the top/left
# corner of the rectangle, this will help if part of the <a> element
# is hidden behind other elements
# https://github.com/The-Compiler/qutebrowser/issues/1005
rect = self.rect_on_view()
if rect.width() > rect.height():
rect.setWidth(rect.height())
else:
rect.setHeight(rect.width())
pos = rect.center()
if pos.x() < 0 or pos.y() < 0:
raise Error("Element position is out of view!")
return pos
def click(self, click_target):
"""Simulate a click on the element."""
# FIXME:qtwebengine do we need this?
# self._widget.setFocus()
self._tab.data.override_target = click_target
pos = self._mouse_pos()
log.webelem.debug("Sending fake click to {!r} at position {} with "
"target {}".format(self, pos, click_target))
if click_target in [usertypes.ClickTarget.tab,
usertypes.ClickTarget.tab_bg,
usertypes.ClickTarget.window]:
modifiers = Qt.ControlModifier
else:
modifiers = Qt.NoModifier
events = [
QMouseEvent(QEvent.MouseMove, pos, Qt.NoButton, Qt.NoButton,
Qt.NoModifier),
QMouseEvent(QEvent.MouseButtonPress, pos, Qt.LeftButton,
Qt.LeftButton, modifiers),
QMouseEvent(QEvent.MouseButtonRelease, pos, Qt.LeftButton,
Qt.NoButton, modifiers),
]
for evt in events:
self._tab.send_event(evt)
def after_click():
"""Move cursor to end and reset override_target after clicking."""
if self.is_text_input() and self.is_editable():
self._tab.caret.move_to_end_of_document()
self._tab.data.override_target = None
QTimer.singleShot(0, after_click)
def hover(self):
"""Simulate a mouse hover over the element."""
pos = self._mouse_pos()
event = QMouseEvent(QEvent.MouseMove, pos, Qt.NoButton, Qt.NoButton,
Qt.NoModifier)
self._tab.send_event(event)
| gpl-3.0 | -7,513,154,092,897,436,000 | 32.926121 | 79 | 0.586405 | false |
mdavid/horizon | openstack_dashboard/dashboards/project/stacks/api.py | 95 | 2863 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from openstack_dashboard.api import heat
from openstack_dashboard.dashboards.project.stacks import mappings
from openstack_dashboard.dashboards.project.stacks import sro
class Stack(object):
pass
def d3_data(request, stack_id=''):
try:
stack = heat.stack_get(request, stack_id)
except Exception:
stack = Stack()
stack.id = stack_id
stack.stack_name = request.session.get('stack_name', '')
stack.stack_status = 'DELETE_COMPLETE'
stack.stack_status_reason = 'DELETE_COMPLETE'
try:
resources = heat.resources_list(request, stack.stack_name)
except Exception:
resources = []
d3_data = {"nodes": [], "stack": {}}
if stack:
stack_image = mappings.get_resource_image(stack.stack_status, 'stack')
stack_node = {
'stack_id': stack.id,
'name': stack.stack_name,
'status': stack.stack_status,
'image': stack_image,
'image_size': 60,
'image_x': -30,
'image_y': -30,
'text_x': 40,
'text_y': ".35em",
'in_progress': (stack.status == 'IN_PROGRESS'),
'info_box': sro.stack_info(stack, stack_image)
}
d3_data['stack'] = stack_node
if resources:
for resource in resources:
resource_image = mappings.get_resource_image(
resource.resource_status,
resource.resource_type)
resource_status = mappings.get_resource_status(
resource.resource_status)
if resource_status in ('IN_PROGRESS', 'INIT'):
in_progress = True
else:
in_progress = False
resource_node = {
'name': resource.resource_name,
'status': resource.resource_status,
'image': resource_image,
'required_by': resource.required_by,
'image_size': 50,
'image_x': -25,
'image_y': -25,
'text_x': 35,
'text_y': ".35em",
'in_progress': in_progress,
'info_box': sro.resource_info(resource)
}
d3_data['nodes'].append(resource_node)
return json.dumps(d3_data)
| apache-2.0 | -2,374,737,551,438,899,700 | 33.493976 | 78 | 0.574572 | false |
huziyizero/godot | platform/osx/detect.py | 12 | 3271 |
import os
import sys
def is_active():
return True
def get_name():
return "OSX"
def can_build():
if (sys.platform == "darwin" or os.environ.has_key("OSXCROSS_ROOT")):
return True
return False
def get_opts():
return [
('force_64_bits','Force 64 bits binary','no'),
('osxcross_sdk','OSXCross SDK version','darwin14'),
]
def get_flags():
return [
('legacygl', 'yes'),
('builtin_zlib', 'no'),
('glew', 'yes'),
]
def configure(env):
env.Append(CPPPATH=['#platform/osx'])
if (env["bits"]=="default"):
env["bits"]="32"
if (env["target"]=="release"):
env.Append(CCFLAGS=['-O2','-ffast-math','-fomit-frame-pointer','-ftree-vectorize','-msse2'])
elif (env["target"]=="release_debug"):
env.Append(CCFLAGS=['-O2','-DDEBUG_ENABLED'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['-g3', '-Wall','-DDEBUG_ENABLED','-DDEBUG_MEMORY_ENABLED'])
if (not os.environ.has_key("OSXCROSS_ROOT")):
#regular native build
if (env["bits"]=="64"):
env.Append(CCFLAGS=['-arch', 'x86_64'])
env.Append(LINKFLAGS=['-arch', 'x86_64'])
elif (env["bits"]=="32"):
env.Append(CCFLAGS=['-arch', 'i386'])
env.Append(LINKFLAGS=['-arch', 'i386'])
else:
env.Append(CCFLAGS=['-arch', 'i386', '-arch', 'x86_64'])
env.Append(LINKFLAGS=['-arch', 'i386', '-arch', 'x86_64'])
else:
#osxcross build
root=os.environ.get("OSXCROSS_ROOT",0)
if env["bits"]=="64":
basecmd=root+"/target/bin/x86_64-apple-"+env["osxcross_sdk"]+"-"
else:
basecmd=root+"/target/bin/i386-apple-"+env["osxcross_sdk"]+"-"
env['CC'] = basecmd+"cc"
env['CXX'] = basecmd+"c++"
env['AR'] = basecmd+"ar"
env['RANLIB'] = basecmd+"ranlib"
env['AS'] = basecmd+"as"
# env.Append(CPPPATH=['#platform/osx/include/freetype2', '#platform/osx/include'])
# env.Append(LIBPATH=['#platform/osx/lib'])
env.Append(CPPFLAGS=["-DAPPLE_STYLE_KEYS"])
env.Append(CPPFLAGS=['-DUNIX_ENABLED','-DGLES2_ENABLED','-DOSX_ENABLED'])
env.Append(LIBS=['pthread'])
#env.Append(CPPFLAGS=['-F/Developer/SDKs/MacOSX10.4u.sdk/System/Library/Frameworks', '-isysroot', '/Developer/SDKs/MacOSX10.4u.sdk', '-mmacosx-version-min=10.4'])
#env.Append(LINKFLAGS=['-mmacosx-version-min=10.4', '-isysroot', '/Developer/SDKs/MacOSX10.4u.sdk', '-Wl,-syslibroot,/Developer/SDKs/MacOSX10.4u.sdk'])
env.Append(LINKFLAGS=['-framework', 'Cocoa', '-framework', 'Carbon', '-framework', 'OpenGL', '-framework', 'AGL', '-framework', 'AudioUnit','-lz'])
if (env["CXX"]=="clang++"):
env.Append(CPPFLAGS=['-DTYPED_METHOD_BIND'])
env["CC"]="clang"
env["LD"]="clang++"
if (env["colored"]=="yes"):
if sys.stdout.isatty():
env.Append(CPPFLAGS=["-fcolor-diagnostics"])
import methods
env.Append( BUILDERS = { 'GLSL120' : env.Builder(action = methods.build_legacygl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'GLSL' : env.Builder(action = methods.build_glsl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'GLSL120GLES' : env.Builder(action = methods.build_gles2_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
#env.Append( BUILDERS = { 'HLSL9' : env.Builder(action = methods.build_hlsl_dx9_headers, suffix = 'hlsl.h',src_suffix = '.hlsl') } )
env["x86_opt_gcc"]=True
| mit | -8,455,481,845,154,642,000 | 28.205357 | 163 | 0.628248 | false |
dmoliveira/networkx | networkx/algorithms/tree/tests/test_recognition.py | 54 | 4061 |
from nose.tools import *
import networkx as nx
class TestTreeRecognition(object):
graph = nx.Graph
multigraph = nx.MultiGraph
def setUp(self):
self.T1 = self.graph()
self.T2 = self.graph()
self.T2.add_node(1)
self.T3 = self.graph()
self.T3.add_nodes_from(range(5))
edges = [(i,i+1) for i in range(4)]
self.T3.add_edges_from(edges)
self.T5 = self.multigraph()
self.T5.add_nodes_from(range(5))
edges = [(i,i+1) for i in range(4)]
self.T5.add_edges_from(edges)
self.T6 = self.graph()
self.T6.add_nodes_from([6,7])
self.T6.add_edge(6,7)
self.F1 = nx.compose(self.T6, self.T3)
self.N4 = self.graph()
self.N4.add_node(1)
self.N4.add_edge(1,1)
self.N5 = self.graph()
self.N5.add_nodes_from(range(5))
self.N6 = self.graph()
self.N6.add_nodes_from(range(3))
self.N6.add_edges_from([(0,1),(1,2),(2,0)])
self.NF1 = nx.compose(self.T6,self.N6)
@raises(nx.NetworkXPointlessConcept)
def test_null_tree(self):
nx.is_tree(self.graph())
nx.is_tree(self.multigraph())
@raises(nx.NetworkXPointlessConcept)
def test_null_forest(self):
nx.is_forest(self.graph())
nx.is_forest(self.multigraph())
def test_is_tree(self):
assert_true(nx.is_tree(self.T2))
assert_true(nx.is_tree(self.T3))
assert_true(nx.is_tree(self.T5))
def test_is_not_tree(self):
assert_false(nx.is_tree(self.N4))
assert_false(nx.is_tree(self.N5))
assert_false(nx.is_tree(self.N6))
def test_is_forest(self):
assert_true(nx.is_forest(self.T2))
assert_true(nx.is_forest(self.T3))
assert_true(nx.is_forest(self.T5))
assert_true(nx.is_forest(self.F1))
assert_true(nx.is_forest(self.N5))
def test_is_not_forest(self):
assert_false(nx.is_forest(self.N4))
assert_false(nx.is_forest(self.N6))
assert_false(nx.is_forest(self.NF1))
class TestDirectedTreeRecognition(TestTreeRecognition):
graph = nx.DiGraph
multigraph = nx.MultiDiGraph
def test_disconnected_graph():
# https://github.com/networkx/networkx/issues/1144
G = nx.Graph()
G.add_edges_from([(0, 1), (1, 2), (2, 0), (3, 4)])
assert_false(nx.is_tree(G))
G = nx.DiGraph()
G.add_edges_from([(0, 1), (1, 2), (2, 0), (3, 4)])
assert_false(nx.is_tree(G))
def test_dag_nontree():
G = nx.DiGraph()
G.add_edges_from([(0,1), (0,2), (1,2)])
assert_false(nx.is_tree(G))
assert_true(nx.is_directed_acyclic_graph(G))
def test_multicycle():
G = nx.MultiDiGraph()
G.add_edges_from([(0,1), (0,1)])
assert_false(nx.is_tree(G))
assert_true(nx.is_directed_acyclic_graph(G))
def test_emptybranch():
G = nx.DiGraph()
G.add_nodes_from(range(10))
assert_true(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
def test_path():
G = nx.DiGraph()
G.add_path(range(5))
assert_true(nx.is_branching(G))
assert_true(nx.is_arborescence(G))
def test_notbranching1():
# Acyclic violation.
G = nx.MultiDiGraph()
G.add_nodes_from(range(10))
G.add_edges_from([(0,1),(1,0)])
assert_false(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
def test_notbranching2():
# In-degree violation.
G = nx.MultiDiGraph()
G.add_nodes_from(range(10))
G.add_edges_from([(0,1),(0,2),(3,2)])
assert_false(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
def test_notarborescence1():
# Not an arborescence due to not spanning.
G = nx.MultiDiGraph()
G.add_nodes_from(range(10))
G.add_edges_from([(0,1),(0,2),(1,3),(5,6)])
assert_true(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
def test_notarborescence2():
# Not an arborescence due to in-degree violation.
G = nx.MultiDiGraph()
G.add_path(range(5))
G.add_edge(6, 4)
assert_false(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
| bsd-3-clause | -7,915,539,460,904,078,000 | 26.62585 | 55 | 0.602561 | false |
vivekanand1101/neutron | neutron/agent/rpc.py | 22 | 8851 | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
import itertools
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import uuidutils
from neutron.common import constants
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.i18n import _LW
LOG = logging.getLogger(__name__)
def create_consumers(endpoints, prefix, topic_details, start_listening=True):
"""Create agent RPC consumers.
:param endpoints: The list of endpoints to process the incoming messages.
:param prefix: Common prefix for the plugin/agent message queues.
:param topic_details: A list of topics. Each topic has a name, an
operation, and an optional host param keying the
subscription to topic.host for plugin calls.
:param start_listening: if True, it starts the processing loop
:returns: A common Connection.
"""
connection = n_rpc.create_connection(new=True)
for details in topic_details:
topic, operation, node_name = itertools.islice(
itertools.chain(details, [None]), 3)
topic_name = topics.get_topic_name(prefix, topic, operation)
connection.create_consumer(topic_name, endpoints, fanout=True)
if node_name:
node_topic_name = '%s.%s' % (topic_name, node_name)
connection.create_consumer(node_topic_name,
endpoints,
fanout=False)
if start_listening:
connection.consume_in_threads()
return connection
class PluginReportStateAPI(object):
"""RPC client used to report state back to plugin.
This class implements the client side of an rpc interface. The server side
can be found in neutron.db.agents_db.AgentExtRpcCallback. For more
information on changing rpc interfaces, see doc/source/devref/rpc_api.rst.
"""
def __init__(self, topic):
target = oslo_messaging.Target(topic=topic, version='1.0',
namespace=constants.RPC_NAMESPACE_STATE)
self.client = n_rpc.get_client(target)
def report_state(self, context, agent_state, use_call=False):
cctxt = self.client.prepare()
# add unique identifier to a report
# that can be logged on server side.
# This create visible correspondence between events on
# the agent and on the server
agent_state['uuid'] = uuidutils.generate_uuid()
kwargs = {
'agent_state': {'agent_state': agent_state},
'time': datetime.utcnow().strftime(constants.ISO8601_TIME_FORMAT),
}
method = cctxt.call if use_call else cctxt.cast
return method(context, 'report_state', **kwargs)
class PluginApi(object):
'''Agent side of the rpc API.
API version history:
1.0 - Initial version.
1.3 - get_device_details rpc signature upgrade to obtain 'host' and
return value to include fixed_ips and device_owner for
the device port
1.4 - tunnel_sync rpc signature upgrade to obtain 'host'
1.5 - Support update_device_list and
get_devices_details_list_and_failed_devices
'''
def __init__(self, topic):
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def get_device_details(self, context, device, agent_id, host=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'get_device_details', device=device,
agent_id=agent_id, host=host)
def get_devices_details_list(self, context, devices, agent_id, host=None):
try:
cctxt = self.client.prepare(version='1.3')
res = cctxt.call(context, 'get_devices_details_list',
devices=devices, agent_id=agent_id, host=host)
except oslo_messaging.UnsupportedVersion:
# If the server has not been upgraded yet, a DVR-enabled agent
# may not work correctly, however it can function in 'degraded'
# mode, in that DVR routers may not be in the system yet, and
# it might be not necessary to retrieve info about the host.
LOG.warn(_LW('DVR functionality requires a server upgrade.'))
res = [
self.get_device_details(context, device, agent_id, host)
for device in devices
]
return res
def get_devices_details_list_and_failed_devices(self, context, devices,
agent_id, host=None):
"""Get devices details and the list of devices that failed.
This method returns the devices details. If an error is thrown when
retrieving the devices details, the device is put in a list of
failed devices.
"""
try:
cctxt = self.client.prepare(version='1.5')
res = cctxt.call(
context,
'get_devices_details_list_and_failed_devices',
devices=devices, agent_id=agent_id, host=host)
except oslo_messaging.UnsupportedVersion:
#TODO(rossella_s): Remove this failback logic in M
res = self._device_list_rpc_call_with_failed_dev(
self.get_device_details, context, agent_id, host, devices)
return res
def update_device_down(self, context, device, agent_id, host=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'update_device_down', device=device,
agent_id=agent_id, host=host)
def update_device_up(self, context, device, agent_id, host=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'update_device_up', device=device,
agent_id=agent_id, host=host)
def _device_list_rpc_call_with_failed_dev(self, rpc_call, context,
agent_id, host, devices):
succeeded_devices = []
failed_devices = []
for device in devices:
try:
rpc_device = rpc_call(context, device, agent_id, host)
except Exception:
failed_devices.append(device)
else:
# update_device_up doesn't return the device
succeeded_dev = rpc_device or device
succeeded_devices.append(succeeded_dev)
return {'devices': succeeded_devices, 'failed_devices': failed_devices}
def update_device_list(self, context, devices_up, devices_down,
agent_id, host):
try:
cctxt = self.client.prepare(version='1.5')
res = cctxt.call(context, 'update_device_list',
devices_up=devices_up, devices_down=devices_down,
agent_id=agent_id, host=host)
except oslo_messaging.UnsupportedVersion:
#TODO(rossella_s): Remove this failback logic in M
dev_up = self._device_list_rpc_call_with_failed_dev(
self.update_device_up, context, agent_id, host, devices_up)
dev_down = self._device_list_rpc_call_with_failed_dev(
self.update_device_down, context, agent_id, host, devices_down)
res = {'devices_up': dev_up.get('devices'),
'failed_devices_up': dev_up.get('failed_devices'),
'devices_down': dev_down.get('devices'),
'failed_devices_down': dev_down.get('failed_devices')}
return res
def tunnel_sync(self, context, tunnel_ip, tunnel_type=None, host=None):
try:
cctxt = self.client.prepare(version='1.4')
res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip,
tunnel_type=tunnel_type, host=host)
except oslo_messaging.UnsupportedVersion:
LOG.warn(_LW('Tunnel synchronization requires a server upgrade.'))
cctxt = self.client.prepare()
res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip,
tunnel_type=tunnel_type)
return res
| apache-2.0 | -8,646,328,556,412,745,000 | 42.600985 | 79 | 0.609875 | false |
gbosh/mezzanine | mezzanine/urls.py | 1 | 3462 | """
This is the main ``urlconf`` for Mezzanine - it sets up patterns for
all the various Mezzanine apps, third-party apps like Grappelli and
filebrowser.
"""
from django.conf.urls.defaults import patterns, include
from django.contrib import admin
from django.contrib.admin.sites import NotRegistered
from django.http import HttpResponse
from mezzanine.conf import settings
from mezzanine.core.sitemaps import DisplayableSitemap
# Remove unwanted models from the admin that are installed by default with
# third-party apps.
for model in settings.ADMIN_REMOVAL:
try:
model = tuple(model.rsplit(".", 1))
exec "from %s import %s" % model
except ImportError:
pass
else:
try:
admin.site.unregister(eval(model[1]))
except NotRegistered:
pass
urlpatterns = []
# Django's sitemap app.
if "django.contrib.sitemaps" in settings.INSTALLED_APPS:
sitemaps = {"sitemaps": {"all": DisplayableSitemap}}
urlpatterns += patterns("django.contrib.sitemaps.views",
("^sitemap\.xml$", "sitemap", sitemaps)
)
# Return a robots.txt that disallows all spiders when DEBUG is True.
if getattr(settings, "DEBUG", False):
urlpatterns += patterns("",
("^robots.txt$", lambda r: HttpResponse("User-agent: *\nDisallow: /",
mimetype="text/plain")),
)
# Filebrowser admin media library.
if getattr(settings, "PACKAGE_NAME_FILEBROWSER") in settings.INSTALLED_APPS:
urlpatterns += patterns("",
("^admin/media-library/", include("%s.urls" %
settings.PACKAGE_NAME_FILEBROWSER)),
)
# Miscellanous Mezzanine patterns.
urlpatterns += patterns("",
("^", include("mezzanine.core.urls")),
("^", include("mezzanine.generic.urls")),
)
# Mezzanine's Blog app.
blog_installed = "mezzanine.blog" in settings.INSTALLED_APPS
if blog_installed:
BLOG_SLUG = settings.BLOG_SLUG.rstrip("/")
blog_patterns = patterns("",
("^%s" % BLOG_SLUG, include("mezzanine.blog.urls")),
)
urlpatterns += blog_patterns
# Mezzanine's Accounts app
_old_accounts_enabled = getattr(settings, "ACCOUNTS_ENABLED", False)
if _old_accounts_enabled:
import warnings
warnings.warn("The setting ACCOUNTS_ENABLED is deprecated. Please "
"add mezzanine.accounts to INSTALLED_APPS.")
if _old_accounts_enabled or "mezzanine.accounts" in settings.INSTALLED_APPS:
# We don't define a URL prefix here such as /account/ since we want
# to honour the LOGIN_* settings, which Django has prefixed with
# /account/ by default. So those settings are used in accounts.urls
urlpatterns += patterns("",
("^", include("mezzanine.accounts.urls")),
)
# Mezzanine's Pages app.
PAGES_SLUG = ""
if "mezzanine.pages" in settings.INSTALLED_APPS:
# No BLOG_SLUG means catch-all patterns belong to the blog,
# so give pages their own prefix and inject them before the
# blog urlpatterns.
if blog_installed and not BLOG_SLUG:
PAGES_SLUG = getattr(settings, "PAGES_SLUG", "pages").strip("/") + "/"
blog_patterns_start = urlpatterns.index(blog_patterns[0])
urlpatterns[blog_patterns_start:len(blog_patterns)] = patterns("",
("^%s" % unicode(PAGES_SLUG), include("mezzanine.pages.urls")),
)
else:
urlpatterns += patterns("",
("^", include("mezzanine.pages.urls")),
)
| bsd-2-clause | -1,704,780,481,293,052,700 | 34.326531 | 78 | 0.656268 | false |
bgxavier/nova | nova/virt/ironic/ironic_states.py | 36 | 4259 | # Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Mapping of bare metal node states.
Setting the node `power_state` is handled by the conductor's power
synchronization thread. Based on the power state retrieved from the driver
for the node, the state is set to POWER_ON or POWER_OFF, accordingly.
Should this fail, the `power_state` value is left unchanged, and the node
is placed into maintenance mode.
The `power_state` can also be set manually via the API. A failure to change
the state leaves the current state unchanged. The node is NOT placed into
maintenance mode in this case.
"""
#####################
# Provisioning states
#####################
NOSTATE = None
""" No state information.
This state is used with power_state to represent a lack of knowledge of
power state, and in target_*_state fields when there is no target.
Prior to the Kilo release, Ironic set node.provision_state to NOSTATE
when the node was available for provisioning. During Kilo cycle, this was
changed to the AVAILABLE state.
"""
MANAGEABLE = 'manageable'
""" Node is in a manageable state.
This state indicates that Ironic has verified, at least once, that it had
sufficient information to manage the hardware. While in this state, the node
is not available for provisioning (it must be in the AVAILABLE state for that).
"""
AVAILABLE = 'available'
""" Node is available for use and scheduling.
This state is replacing the NOSTATE state used prior to Kilo.
"""
ACTIVE = 'active'
""" Node is successfully deployed and associated with an instance. """
DEPLOYWAIT = 'wait call-back'
""" Node is waiting to be deployed.
This will be the node `provision_state` while the node is waiting for
the driver to finish deployment.
"""
DEPLOYING = 'deploying'
""" Node is ready to receive a deploy request, or is currently being deployed.
A node will have its `provision_state` set to DEPLOYING briefly before it
receives its initial deploy request. It will also move to this state from
DEPLOYWAIT after the callback is triggered and deployment is continued
(disk partitioning and image copying).
"""
DEPLOYFAIL = 'deploy failed'
""" Node deployment failed. """
DEPLOYDONE = 'deploy complete'
""" Node was successfully deployed.
This is mainly a target provision state used during deployment. A successfully
deployed node should go to ACTIVE status.
"""
DELETING = 'deleting'
""" Node is actively being torn down. """
DELETED = 'deleted'
""" Node tear down was successful.
In Juno, target_provision_state was set to this value during node tear down.
In Kilo, this will be a transitory value of provision_state, and never
represented in target_provision_state.
"""
CLEANING = 'cleaning'
""" Node is being automatically cleaned to prepare it for provisioning. """
CLEANFAIL = 'clean failed'
""" Node failed cleaning. This requires operator intervention to resolve. """
ERROR = 'error'
""" An error occurred during node processing.
The `last_error` attribute of the node details should contain an error message.
"""
REBUILD = 'rebuild'
""" Node is to be rebuilt.
This is not used as a state, but rather as a "verb" when changing the node's
provision_state via the REST API.
"""
INSPECTING = 'inspecting'
""" Node is under inspection.
This is the provision state used when inspection is started. A successfully
inspected node shall transition to MANAGEABLE status.
"""
INSPECTFAIL = 'inspect failed'
""" Node inspection failed. """
##############
# Power states
##############
POWER_ON = 'power on'
""" Node is powered on. """
POWER_OFF = 'power off'
""" Node is powered off. """
REBOOT = 'rebooting'
""" Node is rebooting. """
| apache-2.0 | -5,459,125,732,780,691,000 | 29.421429 | 79 | 0.73374 | false |
great-expectations/great_expectations | tests/test_ge_utils.py | 1 | 11911 | import copy
import os
import pytest
import great_expectations as ge
from great_expectations.core.util import nested_update
from great_expectations.dataset.util import check_sql_engine_dialect
from great_expectations.util import (
filter_properties_dict,
get_currently_executing_function_call_arguments,
lint_code,
)
def test_validate_non_dataset(file_data_asset, empty_expectation_suite):
with pytest.raises(
ValueError, match=r"The validate util method only supports dataset validations"
):
with pytest.warns(
Warning,
match="No great_expectations version found in configuration object.",
):
ge.validate(
file_data_asset,
empty_expectation_suite,
data_asset_class=ge.data_asset.FileDataAsset,
)
def test_validate_dataset(dataset, basic_expectation_suite):
res = ge.validate(dataset, basic_expectation_suite)
# assert res.success is True # will not be true for mysql, where "infinities" column is missing
assert res["statistics"]["evaluated_expectations"] == 4
if isinstance(dataset, ge.dataset.PandasDataset):
res = ge.validate(
dataset,
expectation_suite=basic_expectation_suite,
data_asset_class=ge.dataset.PandasDataset,
)
assert res.success is True
assert res["statistics"]["evaluated_expectations"] == 4
with pytest.raises(
ValueError,
match=r"The validate util method only supports validation for subtypes of the provided data_asset_type",
):
ge.validate(
dataset,
basic_expectation_suite,
data_asset_class=ge.dataset.SqlAlchemyDataset,
)
elif (
isinstance(dataset, ge.dataset.SqlAlchemyDataset)
and dataset.sql_engine_dialect.name.lower() != "mysql"
):
res = ge.validate(
dataset,
expectation_suite=basic_expectation_suite,
data_asset_class=ge.dataset.SqlAlchemyDataset,
)
assert res.success is True
assert res["statistics"]["evaluated_expectations"] == 4
with pytest.raises(
ValueError,
match=r"The validate util method only supports validation for subtypes of the provided data_asset_type",
):
ge.validate(
dataset,
expectation_suite=basic_expectation_suite,
data_asset_class=ge.dataset.PandasDataset,
)
elif (
isinstance(dataset, ge.dataset.SqlAlchemyDataset)
and dataset.sql_engine_dialect.name.lower() == "mysql"
):
# mysql cannot use the infinities column
res = ge.validate(
dataset,
expectation_suite=basic_expectation_suite,
data_asset_class=ge.dataset.SqlAlchemyDataset,
)
assert res.success is False
assert res["statistics"]["evaluated_expectations"] == 4
with pytest.raises(
ValueError,
match=r"The validate util method only supports validation for subtypes of the provided data_asset_type",
):
ge.validate(
dataset,
expectation_suite=basic_expectation_suite,
data_asset_class=ge.dataset.PandasDataset,
)
elif isinstance(dataset, ge.dataset.SparkDFDataset):
res = ge.validate(
dataset, basic_expectation_suite, data_asset_class=ge.dataset.SparkDFDataset
)
assert res.success is True
assert res["statistics"]["evaluated_expectations"] == 4
with pytest.raises(
ValueError,
match=r"The validate util method only supports validation for subtypes of the provided data_asset_type",
):
ge.validate(
dataset,
expectation_suite=basic_expectation_suite,
data_asset_class=ge.dataset.PandasDataset,
)
def test_validate_using_data_context(
dataset, data_context_parameterized_expectation_suite
):
# Before running, the data context should not have compiled parameters
assert (
data_context_parameterized_expectation_suite._evaluation_parameter_dependencies_compiled
is False
)
res = ge.validate(
dataset,
expectation_suite_name="my_dag_node.default",
data_context=data_context_parameterized_expectation_suite,
)
# Since the handling of evaluation parameters is no longer happening without an action,
# the context should still be not compiles after validation.
assert (
data_context_parameterized_expectation_suite._evaluation_parameter_dependencies_compiled
is False
)
# And, we should have validated the right number of expectations from the context-provided config
assert res.success is False
assert res.statistics["evaluated_expectations"] == 2
def test_validate_using_data_context_path(
dataset, data_context_parameterized_expectation_suite
):
data_context_path = data_context_parameterized_expectation_suite.root_directory
res = ge.validate(
dataset,
expectation_suite_name="my_dag_node.default",
data_context=data_context_path,
)
# We should have now found the right suite with expectations to evaluate
assert res.success is False
assert res["statistics"]["evaluated_expectations"] == 2
def test_validate_invalid_parameters(
dataset, basic_expectation_suite, data_context_parameterized_expectation_suite
):
with pytest.raises(
ValueError,
match="Either an expectation suite or a DataContext is required for validation.",
):
ge.validate(dataset)
def test_gen_directory_tree_str(tmp_path_factory):
project_dir = str(tmp_path_factory.mktemp("project_dir"))
os.mkdir(os.path.join(project_dir, "BBB"))
with open(os.path.join(project_dir, "BBB", "bbb.txt"), "w") as f:
f.write("hello")
with open(os.path.join(project_dir, "BBB", "aaa.txt"), "w") as f:
f.write("hello")
os.mkdir(os.path.join(project_dir, "AAA"))
print(ge.util.gen_directory_tree_str(project_dir))
# Note: files and directories are sorteds alphabetically, so that this method can be used for testing.
assert (
ge.util.gen_directory_tree_str(project_dir)
== """\
project_dir0/
AAA/
BBB/
aaa.txt
bbb.txt
"""
)
def test_nested_update():
# nested_update is useful for update nested dictionaries (such as batch_kwargs with reader_options as a dictionary)
batch_kwargs = {
"path": "/a/path",
"reader_method": "read_csv",
"reader_options": {"header": 0},
}
nested_update(batch_kwargs, {"reader_options": {"nrows": 1}})
assert batch_kwargs == {
"path": "/a/path",
"reader_method": "read_csv",
"reader_options": {"header": 0, "nrows": 1},
}
def test_nested_update_lists():
# nested_update is useful for update nested dictionaries (such as batch_kwargs with reader_options as a dictionary)
dependencies = {
"suite.warning": {"metric.name": ["column=foo"]},
"suite.failure": {"metric.blarg": [""]},
}
new_dependencies = {
"suite.warning": {
"metric.other_name": ["column=foo"],
"metric.name": ["column=bar"],
}
}
nested_update(dependencies, new_dependencies)
assert dependencies == {
"suite.warning": {
"metric.name": ["column=foo", "column=bar"],
"metric.other_name": ["column=foo"],
},
"suite.failure": {"metric.blarg": [""]},
}
def test_linter_raises_error_on_non_string_input():
with pytest.raises(TypeError):
lint_code(99)
def test_linter_changes_dirty_code():
code = "foo = [1,2,3]"
assert lint_code(code) == "foo = [1, 2, 3]\n"
def test_linter_leaves_clean_code():
code = "foo = [1, 2, 3]\n"
assert lint_code(code) == "foo = [1, 2, 3]\n"
def test_get_currently_executing_function_call_arguments(a=None, *args, **kwargs):
if a is None:
test_get_currently_executing_function_call_arguments(0, 1, 2, 3, b=5)
else:
assert a == 0
assert args == (1, 2, 3)
assert kwargs == {"b": 5}
params = get_currently_executing_function_call_arguments(
**{
"additional_param_0": "xyz_0",
"additional_param_1": "xyz_1",
"additional_param_2": "xyz_2",
}
)
assert params["a"] == 0
assert params["args"] == (1, 2, 3)
assert params["b"] == 5
assert params["additional_param_0"] == "xyz_0"
assert params["additional_param_1"] == "xyz_1"
assert params["additional_param_2"] == "xyz_2"
def test_filter_properties_dict():
source_dict: dict = {
"integer_zero": 0,
"null": None,
"string": "xyz_0",
"integer_one": 1,
"scientific_notation_floating_point_number": 9.8e1,
}
d0_begin: dict = copy.deepcopy(source_dict)
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
d0_end: dict = filter_properties_dict(
properties=d0_begin,
keep_fields=["string"],
delete_fields=["integer_zero", "scientific_notation_floating_point_number"],
clean_falsy=True,
)
d0_end: dict = filter_properties_dict(properties=d0_begin, clean_falsy=True)
d0_end_expected = copy.deepcopy(d0_begin)
d0_end_expected.pop("null")
assert d0_end == d0_end_expected
d1_begin: dict = copy.deepcopy(source_dict)
d1_end: dict = filter_properties_dict(
properties=d1_begin,
clean_nulls=False,
)
d1_end_expected = copy.deepcopy(d1_begin)
assert d1_end == d1_end_expected
d2_begin: dict = copy.deepcopy(source_dict)
d2_end: dict = filter_properties_dict(
properties=d2_begin,
clean_nulls=True,
clean_falsy=False,
)
d2_end_expected = copy.deepcopy(d2_begin)
d2_end_expected.pop("null")
assert d2_end == d2_end_expected
d3_begin: dict = copy.deepcopy(source_dict)
d3_end: dict = filter_properties_dict(
properties=d3_begin,
keep_fields=["null"],
clean_falsy=True,
)
d3_end_expected = {"null": None}
assert d3_end == d3_end_expected
d4_begin: dict = copy.deepcopy(source_dict)
d4_end: dict = filter_properties_dict(
properties=d4_begin,
clean_falsy=True,
keep_falsy_numerics=False,
)
d4_end_expected = copy.deepcopy(d4_begin)
d4_end_expected.pop("integer_zero")
d4_end_expected.pop("null")
assert d4_end == d4_end_expected
d5_begin: dict = copy.deepcopy(source_dict)
d5_end: dict = filter_properties_dict(
properties=d5_begin,
keep_fields=["integer_zero", "scientific_notation_floating_point_number"],
clean_falsy=True,
)
d5_end_expected = {
"integer_zero": 0,
"scientific_notation_floating_point_number": 9.8e1,
}
assert d5_end == d5_end_expected
d6_begin: dict = copy.deepcopy(source_dict)
d6_end: dict = filter_properties_dict(
properties=d6_begin,
delete_fields=["integer_zero", "scientific_notation_floating_point_number"],
clean_falsy=True,
)
d6_end_expected = {"string": "xyz_0", "integer_one": 1}
assert d6_end == d6_end_expected
d7_begin: dict = copy.deepcopy(source_dict)
filter_properties_dict(
properties=d7_begin,
delete_fields=["integer_zero", "scientific_notation_floating_point_number"],
clean_falsy=True,
inplace=True,
)
d7_end = copy.deepcopy(d7_begin)
d7_end_expected = {"string": "xyz_0", "integer_one": 1}
assert d7_end == d7_end_expected
| apache-2.0 | -8,535,863,983,150,321,000 | 32.086111 | 119 | 0.616741 | false |
lgeiger/ide-python | dist/debugger/VendorLib/vs-py-debugger/pythonFiles/experimental/ptvsd/ptvsd/_vendored/pydevd/pydev_ipython/inputhookqt5.py | 7 | 7286 | # -*- coding: utf-8 -*-
"""
Qt5's inputhook support function
Author: Christian Boos
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import signal
import threading
from pydev_ipython.qt_for_kernel import QtCore, QtGui
from pydev_ipython.inputhook import allow_CTRL_C, ignore_CTRL_C, stdin_ready
# To minimise future merging complexity, rather than edit the entire code base below
# we fake InteractiveShell here
class InteractiveShell:
_instance = None
@classmethod
def instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def set_hook(self, *args, **kwargs):
# We don't consider the pre_prompt_hook because we don't have
# KeyboardInterrupts to consider since we are running under PyDev
pass
#-----------------------------------------------------------------------------
# Module Globals
#-----------------------------------------------------------------------------
got_kbdint = False
sigint_timer = None
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def create_inputhook_qt5(mgr, app=None):
"""Create an input hook for running the Qt5 application event loop.
Parameters
----------
mgr : an InputHookManager
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Returns
-------
A pair consisting of a Qt Application (either the one given or the
one found or created) and a inputhook.
Notes
-----
We use a custom input hook instead of PyQt5's default one, as it
interacts better with the readline packages (issue #481).
The inputhook function works in tandem with a 'pre_prompt_hook'
which automatically restores the hook as an inputhook in case the
latter has been temporarily disabled after having intercepted a
KeyboardInterrupt.
"""
if app is None:
app = QtCore.QCoreApplication.instance()
if app is None:
from PyQt5 import QtWidgets
app = QtWidgets.QApplication([" "])
# Re-use previously created inputhook if any
ip = InteractiveShell.instance()
if hasattr(ip, '_inputhook_qt5'):
return app, ip._inputhook_qt5
# Otherwise create the inputhook_qt5/preprompthook_qt5 pair of
# hooks (they both share the got_kbdint flag)
def inputhook_qt5():
"""PyOS_InputHook python hook for Qt5.
Process pending Qt events and if there's no pending keyboard
input, spend a short slice of time (50ms) running the Qt event
loop.
As a Python ctypes callback can't raise an exception, we catch
the KeyboardInterrupt and temporarily deactivate the hook,
which will let a *second* CTRL+C be processed normally and go
back to a clean prompt line.
"""
try:
allow_CTRL_C()
app = QtCore.QCoreApplication.instance()
if not app: # shouldn't happen, but safer if it happens anyway...
return 0
app.processEvents(QtCore.QEventLoop.AllEvents, 300)
if not stdin_ready():
# Generally a program would run QCoreApplication::exec()
# from main() to enter and process the Qt event loop until
# quit() or exit() is called and the program terminates.
#
# For our input hook integration, we need to repeatedly
# enter and process the Qt event loop for only a short
# amount of time (say 50ms) to ensure that Python stays
# responsive to other user inputs.
#
# A naive approach would be to repeatedly call
# QCoreApplication::exec(), using a timer to quit after a
# short amount of time. Unfortunately, QCoreApplication
# emits an aboutToQuit signal before stopping, which has
# the undesirable effect of closing all modal windows.
#
# To work around this problem, we instead create a
# QEventLoop and call QEventLoop::exec(). Other than
# setting some state variables which do not seem to be
# used anywhere, the only thing QCoreApplication adds is
# the aboutToQuit signal which is precisely what we are
# trying to avoid.
timer = QtCore.QTimer()
event_loop = QtCore.QEventLoop()
timer.timeout.connect(event_loop.quit)
while not stdin_ready():
timer.start(50)
event_loop.exec_()
timer.stop()
except KeyboardInterrupt:
global got_kbdint, sigint_timer
ignore_CTRL_C()
got_kbdint = True
mgr.clear_inputhook()
# This generates a second SIGINT so the user doesn't have to
# press CTRL+C twice to get a clean prompt.
#
# Since we can't catch the resulting KeyboardInterrupt here
# (because this is a ctypes callback), we use a timer to
# generate the SIGINT after we leave this callback.
#
# Unfortunately this doesn't work on Windows (SIGINT kills
# Python and CTRL_C_EVENT doesn't work).
if(os.name == 'posix'):
pid = os.getpid()
if(not sigint_timer):
sigint_timer = threading.Timer(.01, os.kill,
args=[pid, signal.SIGINT] )
sigint_timer.start()
else:
print("\nKeyboardInterrupt - Ctrl-C again for new prompt")
except: # NO exceptions are allowed to escape from a ctypes callback
ignore_CTRL_C()
from traceback import print_exc
print_exc()
print("Got exception from inputhook_qt5, unregistering.")
mgr.clear_inputhook()
finally:
allow_CTRL_C()
return 0
def preprompthook_qt5(ishell):
"""'pre_prompt_hook' used to restore the Qt5 input hook
(in case the latter was temporarily deactivated after a
CTRL+C)
"""
global got_kbdint, sigint_timer
if(sigint_timer):
sigint_timer.cancel()
sigint_timer = None
if got_kbdint:
mgr.set_inputhook(inputhook_qt5)
got_kbdint = False
ip._inputhook_qt5 = inputhook_qt5
ip.set_hook('pre_prompt_hook', preprompthook_qt5)
return app, inputhook_qt5
| mit | -2,663,700,316,365,340,700 | 35.984772 | 84 | 0.549822 | false |
kalefranz/auxlib | tests/test_path.py | 1 | 2148 | # # -*- coding: utf-8 -*-
# import logging
# from unittest import TestCase
#
# from auxlib import logz
# from auxlib.path import PackageFile, find_file_in_site_packages, open_package_file
#
# log = logging.getLogger(__name__)
#
#
# class PackageFileTests(TestCase):
#
# @classmethod
# def setUpClass(cls):
# logz.set_root_level(logging.INFO)
# logz.attach_stderr(logging.DEBUG)
# assert not logz.attach_stderr()
#
# @classmethod
# def tearDownClass(self):
# logz.detach_stderr()
# assert not logz.detach_stderr()
#
# def test_find_python_file_in_package(self):
# with PackageFile('path.py', 'auxlib') as fh:
# lines = fh.readlines()
# assert any(line.startswith(b'class PackageFile(object):') for line in lines)
#
# def test_find_python_file_in_package_subdirectory(self):
# with PackageFile('_vendor/five.py', 'auxlib') as fh:
# lines = fh.readlines()
# assert any(line.startswith(b'PY3 = sys.version_info[0] == 3') for line in lines)
#
# def test_package_resources_paths(self):
# with PackageFile('AES.py', 'Crypto.Cipher') as fh:
# lines = fh.readlines()
# assert any(line.startswith(b'class AESCipher') for line in lines)
#
# def test_package_resources_paths_subdirectory(self):
# with PackageFile('Cipher/AES.py', 'Crypto') as fh:
# lines = fh.readlines()
# assert any(line.startswith(b'class AESCipher') for line in lines)
#
# def test_site_packages_paths(self):
# with open(find_file_in_site_packages('AES.py', 'Crypto.Cipher')) as fh:
# lines = fh.readlines()
# assert any(line.startswith('class AESCipher') for line in lines)
#
# def test_site_packages_paths_subdirectory(self):
# with open(find_file_in_site_packages('Cipher/AES.py', 'Crypto')) as fh:
# lines = fh.readlines()
# assert any(line.startswith('class AESCipher') for line in lines)
#
# def test_no_file_found(self):
# self.assertRaises(IOError, open_package_file, 'not-a-file.txt', 'auxlib')
| isc | -4,274,642,817,118,182,000 | 38.054545 | 94 | 0.619181 | false |
cwelton/incubator-hawq | tools/bin/lib/gpgetconfig.py | 12 | 3572 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
gpgetconfig -- obtain gp_configuration
Usage: gpgetconfig [-f] [-u user] -d master_data_directory
-f : if necessary, force start up and shutdown of DB to obtain configuration
Exit: 0 - no error
1 - misc error
2 - unable to connect to database
'''
import os, sys
os.putenv('PGHOST', '')
os.putenv("PGOPTIONS", '-c gp_session_role=utility')
os.putenv('PGDATABASE', 'template1')
class __globals__:
opt = {}
for o in 'ud': opt['-'+o] = ''
for o in 'f': opt['-'+o] = False
GV = __globals__()
############
def usage(exitarg):
print __doc__
sys.exit(exitarg)
############
def parseCommandLine():
import getopt
try:
(options, args) = getopt.getopt(sys.argv[1:], '?fu:d:')
except Exception, e:
sys.stderr.write('Error: %s\n' % str(e))
usage(1)
for (switch, val) in options:
if switch == '-?': usage(0)
elif switch[1] in 'ud': GV.opt[switch] = val
elif switch[1] in 'f': GV.opt[switch] = True
if not GV.opt['-d']:
usage('Error: missing -d param')
############
def setPort():
port = 0
f = None
try:
f = open(os.path.join(GV.opt['-d'], 'postgresql.conf'))
lines = f.readlines()
lines = map(lambda x: x.strip().split('='), lines)
lines = filter(lambda x: len(x) and x[0] == 'port', lines)
port = int( (lines[0][1].split()) [0])
except Exception, e:
pass
finally:
if f: f.close()
if port == 0:
sys.stderr.write('Error: unable to read port number from %s/postgresql.conf' %
GV.opt['-d'])
sys.exit(1)
os.putenv('PGPORT', str(port))
############
def getConfiguration():
CMD = """psql -At -q -c "select content, preferred_role='p' as definedprimary, dbid, role = 'p' as isprimary, 't' as valid, hostname, port, fse\
location as datadir from gp_segment_configuration join pg_filespace_entry on (dbid = fsedbid) where fsefsoid = 3052" 2> /dev/null"""
p = os.popen(CMD)
out = p.readlines()
rc = p.close()
return (rc, out)
############
def main():
parseCommandLine()
if GV.opt['-u']:
os.putenv('PGUSER', GV.opt['-u'])
os.putenv('MASTER_DATA_DIRECTORY', GV.opt['-d'])
setPort()
(rc, out) = getConfiguration()
if rc:
if not GV.opt['-f']:
sys.stderr.write('Error: psql unable to connect\n')
sys.exit(2)
os.putenv('GPSTART_INTERNAL_MASTER_ONLY', '1')
p = os.popen("gpstart -m")
p.readlines()
p.close()
(rc, out) = getConfiguration()
p = os.popen("gpstop -m")
p.readlines()
p.close()
if rc:
sys.stderr.write('Error: psql still unable to connect after bouncing\n')
sys.exit(1)
out = filter(lambda x: x, map(lambda x: x.strip(), out))
for line in out:
print '[gpgetconfig]',line
if __name__ == '__main__':
main()
| apache-2.0 | 4,923,039,520,281,521,000 | 25.459259 | 148 | 0.62318 | false |
marco-lancini/Showcase | django/db/models/loading.py | 308 | 8745 | "Utilities for loading models and the modules that contain them."
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.datastructures import SortedDict
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
import imp
import sys
import os
import threading
__all__ = ('get_apps', 'get_app', 'get_models', 'get_model', 'register_models',
'load_app', 'app_cache_ready')
class AppCache(object):
"""
A cache that stores installed applications and their models. Used to
provide reverse-relations and for app introspection (e.g. admin).
"""
# Use the Borg pattern to share state between all instances. Details at
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66531.
__shared_state = dict(
# Keys of app_store are the model modules for each application.
app_store = SortedDict(),
# Mapping of app_labels to a dictionary of model names to model code.
app_models = SortedDict(),
# Mapping of app_labels to errors raised when trying to import the app.
app_errors = {},
# -- Everything below here is only used when populating the cache --
loaded = False,
handled = {},
postponed = [],
nesting_level = 0,
write_lock = threading.RLock(),
_get_models_cache = {},
)
def __init__(self):
self.__dict__ = self.__shared_state
def _populate(self):
"""
Fill in all the cache information. This method is threadsafe, in the
sense that every caller will see the same state upon return, and if the
cache is already initialised, it does no work.
"""
if self.loaded:
return
self.write_lock.acquire()
try:
if self.loaded:
return
for app_name in settings.INSTALLED_APPS:
if app_name in self.handled:
continue
self.load_app(app_name, True)
if not self.nesting_level:
for app_name in self.postponed:
self.load_app(app_name)
self.loaded = True
finally:
self.write_lock.release()
def load_app(self, app_name, can_postpone=False):
"""
Loads the app with the provided fully qualified name, and returns the
model module.
"""
self.handled[app_name] = None
self.nesting_level += 1
app_module = import_module(app_name)
try:
models = import_module('.models', app_name)
except ImportError:
self.nesting_level -= 1
# If the app doesn't have a models module, we can just ignore the
# ImportError and return no models for it.
if not module_has_submodule(app_module, 'models'):
return None
# But if the app does have a models module, we need to figure out
# whether to suppress or propagate the error. If can_postpone is
# True then it may be that the package is still being imported by
# Python and the models module isn't available yet. So we add the
# app to the postponed list and we'll try it again after all the
# recursion has finished (in populate). If can_postpone is False
# then it's time to raise the ImportError.
else:
if can_postpone:
self.postponed.append(app_name)
return None
else:
raise
self.nesting_level -= 1
if models not in self.app_store:
self.app_store[models] = len(self.app_store)
return models
def app_cache_ready(self):
"""
Returns true if the model cache is fully populated.
Useful for code that wants to cache the results of get_models() for
themselves once it is safe to do so.
"""
return self.loaded
def get_apps(self):
"Returns a list of all installed modules that contain models."
self._populate()
# Ensure the returned list is always in the same order (with new apps
# added at the end). This avoids unstable ordering on the admin app
# list page, for example.
apps = [(v, k) for k, v in self.app_store.items()]
apps.sort()
return [elt[1] for elt in apps]
def get_app(self, app_label, emptyOK=False):
"""
Returns the module containing the models for the given app_label. If
the app has no models in it and 'emptyOK' is True, returns None.
"""
self._populate()
self.write_lock.acquire()
try:
for app_name in settings.INSTALLED_APPS:
if app_label == app_name.split('.')[-1]:
mod = self.load_app(app_name, False)
if mod is None:
if emptyOK:
return None
else:
return mod
raise ImproperlyConfigured("App with label %s could not be found" % app_label)
finally:
self.write_lock.release()
def get_app_errors(self):
"Returns the map of known problems with the INSTALLED_APPS."
self._populate()
return self.app_errors
def get_models(self, app_mod=None, include_auto_created=False, include_deferred=False):
"""
Given a module containing models, returns a list of the models.
Otherwise returns a list of all installed models.
By default, auto-created models (i.e., m2m models without an
explicit intermediate table) are not included. However, if you
specify include_auto_created=True, they will be.
By default, models created to satisfy deferred attribute
queries are *not* included in the list of models. However, if
you specify include_deferred, they will be.
"""
cache_key = (app_mod, include_auto_created, include_deferred)
try:
return self._get_models_cache[cache_key]
except KeyError:
pass
self._populate()
if app_mod:
app_list = [self.app_models.get(app_mod.__name__.split('.')[-2], SortedDict())]
else:
app_list = self.app_models.itervalues()
model_list = []
for app in app_list:
model_list.extend(
model for model in app.values()
if ((not model._deferred or include_deferred)
and (not model._meta.auto_created or include_auto_created))
)
self._get_models_cache[cache_key] = model_list
return model_list
def get_model(self, app_label, model_name, seed_cache=True):
"""
Returns the model matching the given app_label and case-insensitive
model_name.
Returns None if no model is found.
"""
if seed_cache:
self._populate()
return self.app_models.get(app_label, SortedDict()).get(model_name.lower())
def register_models(self, app_label, *models):
"""
Register a set of models as belonging to an app.
"""
for model in models:
# Store as 'name: model' pair in a dictionary
# in the app_models dictionary
model_name = model._meta.object_name.lower()
model_dict = self.app_models.setdefault(app_label, SortedDict())
if model_name in model_dict:
# The same model may be imported via different paths (e.g.
# appname.models and project.appname.models). We use the source
# filename as a means to detect identity.
fname1 = os.path.abspath(sys.modules[model.__module__].__file__)
fname2 = os.path.abspath(sys.modules[model_dict[model_name].__module__].__file__)
# Since the filename extension could be .py the first time and
# .pyc or .pyo the second time, ignore the extension when
# comparing.
if os.path.splitext(fname1)[0] == os.path.splitext(fname2)[0]:
continue
model_dict[model_name] = model
self._get_models_cache.clear()
cache = AppCache()
# These methods were always module level, so are kept that way for backwards
# compatibility.
get_apps = cache.get_apps
get_app = cache.get_app
get_app_errors = cache.get_app_errors
get_models = cache.get_models
get_model = cache.get_model
register_models = cache.register_models
load_app = cache.load_app
app_cache_ready = cache.app_cache_ready
| mit | -4,780,139,768,806,437,000 | 37.524229 | 97 | 0.590166 | false |
AMOSus/amos-ss16-proj6 | DataProcessing/test_dataProcessing.py | 1 | 3169 | #!/usr/bin/env python
# This file is part of Rogue Vision.
#
# Copyright (C) 2016 Daniel Reischl, Rene Rathmann, Peter Tan,
# Tobias Dorsch, Shefali Shukla, Vignesh Govindarajulu,
# Aleksander Penew, Abhinav Puri
#
# Rogue Vision is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rogue Vision is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Rogue Vision. If not, see <http://www.gnu.org/licenses/>.
# This files includes all tests for dataProcessing
import unittest
import os
import dataProcessingFunctions
# This class tests if all necessary folders exist
class testFolderExistence (unittest.TestCase):
# Folder InitialData
def test_FolderInitialData(self):
res = True
self.assertEqual(res, os.path.isdir("/srv/DataProcessing/InitialData"))
# Folder InitialDataArchive
def test_FolderInitialDataArchive(self):
res = True
self.assertEqual(res, os.path.isdir("/srv/DataProcessing/InitialDataArchive"))
# Folder CarrierData
def test_FolderCarrierData(self):
res = True
self.assertEqual(res, os.path.isdir("/srv/DataProcessing/CarrierData"))
# Folder CarrierDataArchive
def test_FolderCarrierDataArchive(self):
res = True
self.assertEqual(res, os.path.isdir('/srv/DataProcessing/CarrierDataArchive'))
# Checks if all files are existing
class testFileExistence (unittest.TestCase):
# compressInitialData.py
def test_CompressIntitialData (self):
res = True
self.assertEqual (res, os.path.exists('/srv/DataProcessing/compressInitialData.py'))
# writeCarrierDataToDataBase
def test_WriteDataToDatabase(self):
res = True
self.assertEqual(res, os.path.exists('/srv/DataProcessing/writeCarrierDataToDataBase.py'))
# setConstants.py
def test_configFile(self):
res = True
self.assertEqual(res, os.path.exists('/srv/DataProcessing/settings.cfg'))
# dataProcessingFunctions.py
def test_dataProcessingFunctions(self):
res = True
self.assertEqual(res, os.path.exists('/srv/DataProcessing/dataProcessingFunctions.py'))
class testRunningFile (unittest.TestCase):
# Tests if Running.txt is created
def test_CreationOfRunningFile(self):
res = True
dataProcessingFunctions.createRunningFile()
self.assertEqual(res, os.path.exists('/srv/DataProcessing/Running.txt'))
# Tests if Running.txt is deleted
def test_DeleteOfRunningFile(self):
res = False
dataProcessingFunctions.deleteRunningFile()
self.assertEqual(res, os.path.exists('/srv/DataProcessing/Running.txt'))
if __name__ == '__main__':
unittest.main() | agpl-3.0 | 2,673,997,930,981,394,400 | 35.022727 | 98 | 0.714421 | false |
1844144/django-blog-zinnia | zinnia/migrations/0009_change_mptt_field.py | 4 | 8003 | from south.db import db
from south.v2 import SchemaMigration
from zinnia.migrations import user_name
from zinnia.migrations import user_table
from zinnia.migrations import user_orm_label
from zinnia.migrations import user_model_label
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Category.parent'
db.alter_column('zinnia_category', 'parent_id', self.gf('mptt.fields.TreeForeignKey')(null=True, to=orm['zinnia.Category']))
def backwards(self, orm):
# Changing field 'Category.parent'
db.alter_column('zinnia_category', 'parent_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['zinnia.Category']))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': user_name, 'db_table': "'%s'" % user_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 5, 11, 10, 16, 27, 936575)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 5, 11, 10, 16, 27, 936424)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'zinnia.category': {
'Meta': {'ordering': "['title']", 'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['zinnia.Category']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'zinnia.entry': {
'Meta': {'ordering': "['-creation_date']", 'object_name': 'Entry'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'entries'", 'blank': 'True', 'to': "orm['%s']" % user_orm_label}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'entries'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['zinnia.Category']"}),
'comment_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'end_publication': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2042, 3, 15, 0, 0)'}),
'excerpt': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'pingback_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'related': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_rel_+'", 'null': 'True', 'to': "orm['zinnia.Entry']"}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'entries'", 'symmetrical': 'False', 'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'start_publication': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('tagging.fields.TagField', [], {}),
'template': ('django.db.models.fields.CharField', [], {'default': "'entry_detail.html'", 'max_length': '250'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['zinnia']
| bsd-3-clause | -5,560,119,401,173,851,000 | 77.460784 | 202 | 0.565538 | false |
Lezval/horizon | django-openstack/django_openstack/tests/view_tests/dash/security_groups_tests.py | 5 | 13975 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.contrib import messages
from django.core.urlresolvers import reverse
from django_openstack import api
from django_openstack.tests.view_tests import base
from glance.common import exception as glance_exception
from openstackx.api import exceptions as api_exceptions
from novaclient import exceptions as novaclient_exceptions
from mox import IgnoreArg, IsA
class SecurityGroupsViewTests(base.BaseViewTests):
def setUp(self):
super(SecurityGroupsViewTests, self).setUp()
security_group = self.mox.CreateMock(api.SecurityGroup)
security_group.name = 'default'
self.security_groups = (security_group,)
def test_index(self):
self.mox.StubOutWithMock(api, 'security_group_list')
api.security_group_list(IsA(http.HttpRequest)).\
AndReturn(self.security_groups)
self.mox.ReplayAll()
res = self.client.get(reverse('dash_security_groups',
args=[self.TEST_TENANT]))
self.assertTemplateUsed(res,
'django_openstack/dash/security_groups/index.html')
self.assertItemsEqual(res.context['security_groups'],
self.security_groups)
self.mox.VerifyAll()
def test_index_exception(self):
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
self.mox.StubOutWithMock(api, 'security_group_list')
api.security_group_list(IsA(http.HttpRequest)).AndRaise(exception)
self.mox.StubOutWithMock(messages, 'error')
messages.error(IsA(http.HttpRequest), IsA(basestring))
self.mox.ReplayAll()
res = self.client.get(reverse('dash_security_groups',
args=[self.TEST_TENANT]))
self.assertTemplateUsed(res,
'django_openstack/dash/security_groups/index.html')
self.assertEqual(len(res.context['security_groups']), 0)
self.mox.VerifyAll()
def test_create_security_groups_get(self):
res = self.client.get(reverse('dash_security_groups_create',
args=[self.TEST_TENANT]))
self.assertTemplateUsed(res,
'django_openstack/dash/security_groups/create.html')
def test_create_security_groups_post(self):
SECGROUP_NAME = 'fakegroup'
SECGROUP_DESC = 'fakegroup_desc'
new_group = self.mox.CreateMock(api.SecurityGroup)
new_group.name = SECGROUP_NAME
formData = {'method': 'CreateGroup',
'tenant_id': self.TEST_TENANT,
'name': SECGROUP_NAME,
'description': SECGROUP_DESC,
}
self.mox.StubOutWithMock(api, 'security_group_create')
api.security_group_create(IsA(http.HttpRequest),
SECGROUP_NAME, SECGROUP_DESC).AndReturn(new_group)
self.mox.ReplayAll()
res = self.client.post(reverse('dash_security_groups_create',
args=[self.TEST_TENANT]),
formData)
self.assertRedirectsNoFollow(res, reverse('dash_security_groups',
args=[self.TEST_TENANT]))
self.mox.VerifyAll()
def test_create_security_groups_post_exception(self):
SECGROUP_NAME = 'fakegroup'
SECGROUP_DESC = 'fakegroup_desc'
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
formData = {'method': 'CreateGroup',
'tenant_id': self.TEST_TENANT,
'name': SECGROUP_NAME,
'description': SECGROUP_DESC,
}
self.mox.StubOutWithMock(api, 'security_group_create')
api.security_group_create(IsA(http.HttpRequest),
SECGROUP_NAME, SECGROUP_DESC).AndRaise(exception)
self.mox.ReplayAll()
res = self.client.post(reverse('dash_security_groups_create',
args=[self.TEST_TENANT]),
formData)
self.assertTemplateUsed(res,
'django_openstack/dash/security_groups/create.html')
self.mox.VerifyAll()
def test_edit_rules_get(self):
SECGROUP_ID = '1'
self.mox.StubOutWithMock(api, 'security_group_get')
api.security_group_get(IsA(http.HttpRequest), SECGROUP_ID).AndReturn(
self.security_groups[0])
self.mox.ReplayAll()
res = self.client.get(reverse('dash_security_groups_edit_rules',
args=[self.TEST_TENANT, SECGROUP_ID]))
self.assertTemplateUsed(res,
'django_openstack/dash/security_groups/edit_rules.html')
self.assertItemsEqual(res.context['security_group'].name,
self.security_groups[0].name)
self.mox.VerifyAll()
def test_edit_rules_get_exception(self):
SECGROUP_ID = '1'
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
self.mox.StubOutWithMock(api, 'security_group_get')
api.security_group_get(IsA(http.HttpRequest), SECGROUP_ID).AndRaise(
exception)
self.mox.ReplayAll()
res = self.client.get(reverse('dash_security_groups_edit_rules',
args=[self.TEST_TENANT, SECGROUP_ID]))
self.assertRedirectsNoFollow(res, reverse('dash_security_groups',
args=[self.TEST_TENANT]))
self.mox.VerifyAll()
def test_edit_rules_add_rule(self):
SECGROUP_ID = '1'
RULE_ID = '1'
FROM_PORT = '-1'
TO_PORT = '-1'
IP_PROTOCOL = 'icmp'
CIDR = '0.0.0.0/0'
new_rule = self.mox.CreateMock(api.SecurityGroup)
new_rule.from_port = FROM_PORT
new_rule.to_port = TO_PORT
new_rule.ip_protocol = IP_PROTOCOL
new_rule.cidr = CIDR
new_rule.security_group_id = SECGROUP_ID
new_rule.id = RULE_ID
formData = {'method': 'AddRule',
'tenant_id': self.TEST_TENANT,
'security_group_id': SECGROUP_ID,
'from_port': FROM_PORT,
'to_port': TO_PORT,
'ip_protocol': IP_PROTOCOL,
'cidr': CIDR}
self.mox.StubOutWithMock(api, 'security_group_rule_create')
api.security_group_rule_create(IsA(http.HttpRequest),
SECGROUP_ID, IP_PROTOCOL, FROM_PORT, TO_PORT, CIDR)\
.AndReturn(new_rule)
self.mox.StubOutWithMock(messages, 'info')
messages.info(IsA(http.HttpRequest), IsA(basestring))
self.mox.ReplayAll()
res = self.client.post(reverse('dash_security_groups_edit_rules',
args=[self.TEST_TENANT, SECGROUP_ID]),
formData)
self.assertRedirectsNoFollow(res,
reverse('dash_security_groups_edit_rules',
args=[self.TEST_TENANT, SECGROUP_ID]))
self.mox.VerifyAll()
def test_edit_rules_add_rule_exception(self):
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
SECGROUP_ID = '1'
RULE_ID = '1'
FROM_PORT = '-1'
TO_PORT = '-1'
IP_PROTOCOL = 'icmp'
CIDR = '0.0.0.0/0'
formData = {'method': 'AddRule',
'tenant_id': self.TEST_TENANT,
'security_group_id': SECGROUP_ID,
'from_port': FROM_PORT,
'to_port': TO_PORT,
'ip_protocol': IP_PROTOCOL,
'cidr': CIDR}
self.mox.StubOutWithMock(api, 'security_group_rule_create')
api.security_group_rule_create(IsA(http.HttpRequest),
SECGROUP_ID, IP_PROTOCOL, FROM_PORT,
TO_PORT, CIDR).AndRaise(exception)
self.mox.StubOutWithMock(messages, 'error')
messages.error(IsA(http.HttpRequest), IsA(basestring))
self.mox.ReplayAll()
res = self.client.post(reverse('dash_security_groups_edit_rules',
args=[self.TEST_TENANT, SECGROUP_ID]),
formData)
self.assertRedirectsNoFollow(res,
reverse('dash_security_groups_edit_rules',
args=[self.TEST_TENANT, SECGROUP_ID]))
self.mox.VerifyAll()
def test_edit_rules_delete_rule(self):
SECGROUP_ID = '1'
RULE_ID = '1'
formData = {'method': 'DeleteRule',
'tenant_id': self.TEST_TENANT,
'security_group_rule_id': RULE_ID,
}
self.mox.StubOutWithMock(api, 'security_group_rule_delete')
api.security_group_rule_delete(IsA(http.HttpRequest), RULE_ID)
self.mox.StubOutWithMock(messages, 'info')
messages.info(IsA(http.HttpRequest), IsA(unicode))
self.mox.ReplayAll()
res = self.client.post(reverse('dash_security_groups_edit_rules',
args=[self.TEST_TENANT, SECGROUP_ID]),
formData)
self.assertRedirectsNoFollow(res,
reverse('dash_security_groups_edit_rules',
args=[self.TEST_TENANT, SECGROUP_ID]))
self.mox.VerifyAll()
def test_edit_rules_delete_rule_exception(self):
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
SECGROUP_ID = '1'
RULE_ID = '1'
formData = {'method': 'DeleteRule',
'tenant_id': self.TEST_TENANT,
'security_group_rule_id': RULE_ID,
}
self.mox.StubOutWithMock(api, 'security_group_rule_delete')
api.security_group_rule_delete(IsA(http.HttpRequest), RULE_ID).\
AndRaise(exception)
self.mox.StubOutWithMock(messages, 'error')
messages.error(IsA(http.HttpRequest), IsA(basestring))
self.mox.ReplayAll()
res = self.client.post(reverse('dash_security_groups_edit_rules',
args=[self.TEST_TENANT, SECGROUP_ID]),
formData)
self.assertRedirectsNoFollow(res,
reverse('dash_security_groups_edit_rules',
args=[self.TEST_TENANT, SECGROUP_ID]))
self.mox.VerifyAll()
def test_delete_group(self):
SECGROUP_ID = '1'
formData = {'method': 'DeleteGroup',
'tenant_id': self.TEST_TENANT,
'security_group_id': SECGROUP_ID,
}
self.mox.StubOutWithMock(api, 'security_group_delete')
api.security_group_delete(IsA(http.HttpRequest), SECGROUP_ID)
self.mox.StubOutWithMock(messages, 'info')
messages.info(IsA(http.HttpRequest), IsA(unicode))
self.mox.ReplayAll()
res = self.client.post(reverse('dash_security_groups',
args=[self.TEST_TENANT]),
formData)
self.assertRedirectsNoFollow(res, reverse('dash_security_groups',
args=[self.TEST_TENANT]))
self.mox.VerifyAll()
def test_delete_group_exception(self):
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
SECGROUP_ID = '1'
formData = {'method': 'DeleteGroup',
'tenant_id': self.TEST_TENANT,
'security_group_id': SECGROUP_ID,
}
self.mox.StubOutWithMock(api, 'security_group_delete')
api.security_group_delete(IsA(http.HttpRequest), SECGROUP_ID).\
AndRaise(exception)
self.mox.StubOutWithMock(messages, 'error')
messages.error(IsA(http.HttpRequest), IsA(basestring))
self.mox.ReplayAll()
res = self.client.post(reverse('dash_security_groups',
args=[self.TEST_TENANT]),
formData)
self.assertRedirectsNoFollow(res, reverse('dash_security_groups',
args=[self.TEST_TENANT]))
self.mox.VerifyAll()
| apache-2.0 | -6,839,621,232,211,050,000 | 36.567204 | 79 | 0.550698 | false |
listamilton/supermilton.repository | plugin.video.traquinas/resources/lib/libraries/f4mproxy/utils/openssl_tripledes.py | 202 | 1788 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""OpenSSL/M2Crypto 3DES implementation."""
from .cryptomath import *
from .tripledes import *
if m2cryptoLoaded:
def new(key, mode, IV):
return OpenSSL_TripleDES(key, mode, IV)
class OpenSSL_TripleDES(TripleDES):
def __init__(self, key, mode, IV):
TripleDES.__init__(self, key, mode, IV, "openssl")
self.key = key
self.IV = IV
def _createContext(self, encrypt):
context = m2.cipher_ctx_new()
cipherType = m2.des_ede3_cbc()
m2.cipher_init(context, cipherType, self.key, self.IV, encrypt)
return context
def encrypt(self, plaintext):
TripleDES.encrypt(self, plaintext)
context = self._createContext(1)
ciphertext = m2.cipher_update(context, plaintext)
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return bytearray(ciphertext)
def decrypt(self, ciphertext):
TripleDES.decrypt(self, ciphertext)
context = self._createContext(0)
#I think M2Crypto has a bug - it fails to decrypt and return the last block passed in.
#To work around this, we append sixteen zeros to the string, below:
plaintext = m2.cipher_update(context, ciphertext+('\0'*16))
#If this bug is ever fixed, then plaintext will end up having a garbage
#plaintext block on the end. That's okay - the below code will ignore it.
plaintext = plaintext[:len(ciphertext)]
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return bytearray(plaintext) | gpl-2.0 | -4,782,939,536,038,871,000 | 37.06383 | 98 | 0.611298 | false |
zooniverse/aggregation | experimental/penguins/newCluster.py | 2 | 11987 | #!/usr/bin/env python
__author__ = 'greg'
from sklearn.cluster import DBSCAN
from sklearn.cluster import AffinityPropagation
import numpy as np
import matplotlib.pyplot as plt
import csv
import sys
import os
import pymongo
import matplotlib.cbook as cbook
import cPickle as pickle
import shutil
import urllib
import math
def dist(c1,c2):
return math.sqrt((c1[0]-c2[0])**2 + (c1[1]-c2[1])**2)
def adaptiveDBSCAN(XYpts,user_ids):
if XYpts == []:
return []
pts_in_each_cluster = []
users_in_each_cluster = []
cluster_centers = []
#increase the epsilon until we don't have any nearby clusters corresponding to non-overlapping
#sets of users
X = np.array(XYpts)
for epsilon in [5,10,15,20,25,30]:
db = DBSCAN(eps=epsilon, min_samples=2).fit(X)
labels = db.labels_
pts_in_each_cluster = []
users_in_each_cluster = []
cluster_centers = []
for k in sorted(set(labels)):
if k == -1:
continue
class_member_mask = (labels == k)
pts_in_cluster = list(X[class_member_mask])
xSet,ySet = zip(*pts_in_cluster)
cluster_centers.append((np.mean(xSet),np.mean(ySet)))
pts_in_each_cluster.append(pts_in_cluster[:])
users_in_each_cluster.append([u for u,l in zip(user_ids,labels) if l == k])
#do we have any adjacent clusters with non-overlapping sets of users
#if so, we should merge them by increasing the epsilon value
cluster_compare = []
for cluster_index, (c1,users) in enumerate(zip(cluster_centers,users_in_each_cluster)):
for cluster_index, (c2,users2) in enumerate(zip(cluster_centers[cluster_index+1:],users_in_each_cluster[cluster_index+1:])):
overlappingUsers = [u for u in users if u in users2]
cluster_compare.append((dist(c1,c2),overlappingUsers))
cluster_compare.sort(key = lambda x:x[0])
needToMerge = [] in [c[1] for c in cluster_compare[:10]]
if not(needToMerge):
break
print epsilon
print [c[1] for c in cluster_compare[:10]]
centers_to_return = []
#do we need to split any clusters?
for cluster_index in range(len(cluster_centers)):
print "splitting"
needToSplit = (sorted(users_in_each_cluster[cluster_index]) != sorted(list(set(users_in_each_cluster[cluster_index]))))
if needToSplit:
subcluster_centers = []
X = np.array(pts_in_each_cluster[cluster_index])
for epsilon in [30,25,20,15,10,5,1,0.1,0.01]:
db = DBSCAN(eps=epsilon, min_samples=2).fit(X)
labels = db.labels_
subcluster_centers = []
needToSplit = False
for k in sorted(set(labels)):
if k == -1:
continue
class_member_mask = (labels == k)
users_in_subcluster = [u for u,l in zip(users_in_each_cluster[cluster_index],labels) if l == k]
needToSplit = (sorted(users_in_subcluster) != sorted(list(set(users_in_subcluster))))
if needToSplit:
break
pts_in_cluster = list(X[class_member_mask])
xSet,ySet = zip(*pts_in_cluster)
subcluster_centers.append((np.mean(xSet),np.mean(ySet)))
if not(needToSplit):
break
assert not(needToSplit)
centers_to_return.extend(subcluster_centers)
#if needToSplit:
# print pts_in_each_cluster[cluster_index]
# print users_in_each_cluster[cluster_index]
#else:
else:
centers_to_return.append(cluster_centers[cluster_index])
return centers_to_return
# def cluster(XYpts,user_ids):
# if XYpts == []:
# return []
#
# #find out which points are noise - don't care about the actual clusters
# needToSplit = False
# X = np.array(XYpts)
#
#
# #X = np.array([XYpts[i] for i in signal_pts])
# #user_ids = [user_ids[i] for i in signal_pts]
# oldCenters = None
#
# needToMerge = False
# needToSplit = False
#
# cluster_list = []
# usersInCluster = []
# centers = []
#
# for pref in [0,-100,-200,-400,-800,-1200,-2000,-2200,-2400,-2700,-3000,-3500,-4000,-5000,-6000,-10000]:
# #now run affinity propagation to find the actual clusters
# af = AffinityPropagation(preference=pref).fit(X)
# #cluster_centers_indices = af.cluster_centers_indices_
# labels = af.labels_
#
#
#
# unique_labels = set(labels)
#
# usersInCluster = []
# centers = []
# cluster_list = []
# for k in sorted(unique_labels):
# assert(k != -1)
# #print k
# usersInCluster.append([u for u,l in zip(user_ids,labels) if l == k])
# #print XYpts
# #print user_ids
#
# class_member_mask = (labels == k)
# pts_in_cluster = list(X[class_member_mask])
# xSet,ySet = zip(*pts_in_cluster)
# centers.append((np.mean(xSet),np.mean(ySet)))
# cluster_list.append(pts_in_cluster[:])
#
# compare = []
# for cluster_index, (c1,users) in enumerate(zip(centers,usersInCluster)):
# for cluster_index, (c2,users2) in enumerate(zip(centers[cluster_index+1:],usersInCluster[cluster_index+1:])):
# overlappingUsers = [u for u in users if u in users2]
# compare.append((dist(c1,c2),overlappingUsers))
#
# #needToSplit = False
# #for users in usersInCluster:
# # needToSplit = (sorted(users) != sorted(list(set(users))))
# # if needToSplit:
# # break
#
# compare.sort(key = lambda x:x[0])
#
# needToMerge = ([] in [c[1] for c in compare[:3]]) and (compare[-1][0] <= 200)
#
# #if needToSplit:
# # assert(oldCenters != None)
# # return oldCenters
# if not(needToMerge):
# break
#
# oldCenters = centers[:]
#
# if needToMerge:
# print compare[0:3]
# assert not(needToMerge)
#
# centers_to_return = []
# for cluster_index in range(len(cluster_list)):
# if len(list(set(usersInCluster[cluster_index]))) == 1:
# continue
# #split any individual cluster
# needToSplit = (sorted(usersInCluster[cluster_index]) != sorted(list(set(usersInCluster[cluster_index]))))
# if needToSplit:
# #print cluster_list[cluster_index]
# X = np.array(cluster_list[cluster_index])
# sub_center_list = []
# for pref in [-2400,-2200,-2000,-1200,-800,-400,-200,-100,-75,-50,-30,0]:
# af = AffinityPropagation(preference=pref).fit(X)
# #cluster_centers_indices = af.cluster_centers_indices_
# labels = af.labels_
# try:
# unique_labels = set(labels)
# except TypeError:
# print pref
# print X
# print usersInCluster[cluster_index]
# print labels
# raise
# #get the new "sub"clusters and check to see if we need to split even more
# for k in sorted(unique_labels):
# users = [u for u,l in zip(usersInCluster[cluster_index],labels) if l == k]
# needToSplit = (sorted(users) != sorted(list(set(users))))
#
# if needToSplit:
# break
#
# #add this new sub-cluster onto the list
# class_member_mask = (labels == k)
# pts_in_cluster = list(X[class_member_mask])
# xSet,ySet = zip(*pts_in_cluster)
# sub_center_list.append((np.mean(xSet),np.mean(ySet)))
#
# if not(needToSplit):
# break
#
# #if pref == 0:
# # print sub_center_list
# assert not(needToSplit)
# #print pref
# centers_to_return.extend([c for c in sub_center_list if len(c) > 1])
#
#
#
# else:
# centers_to_return.append(centers[cluster_index])
#
# assert not(needToSplit)
# return centers
client = pymongo.MongoClient()
db = client['penguin_2014-09-19']
collection = db["penguin_classifications"]
collection2 = db["penguin_subjects"]
images = {}
pts = {}
ids = {}
userCount = {}
errorCount = 0
total = 0
at_5 = {}
at_10 = {}
center_5 = {}
center_10 = {}
step_1 = 5
step_2 = 8
toSkip = ["APZ0002uw3","APZ0001v9f","APZ00010ww","APZ0000p99","APZ0002jc3","APZ00014t4","APZ0000v0n","APZ0000ifx","APZ0002pch","APZ0003kls","APZ0001iv3","APZ0003auc","APZ0002ezn"]
mainSubject = "APZ0003fgt" #APZ0001jre
toPlot = None
numClassifications = []
for r in collection.find():
subject_id = r["subjects"][0]["zooniverse_id"]
total += 1
if subject_id != "APZ0003kls":# in toSkip:
continue
if not(subject_id in pts):
pts[subject_id] = []
userCount[subject_id] = 0
ids[subject_id] = []
userCount[subject_id] += 1
animalsPresent = r["annotations"][0]["value"] == "yes"
#print animalsPresent
if animalsPresent:
c = 0
for marking_index in r["annotations"][1]["value"]:
try:
marking = r["annotations"][1]["value"][marking_index]
if True: # marking["value"] == "adult":
x = float(marking["x"])
y = float(marking["y"])
ip = r["user_ip"]
alreadyInList = False
try:
index = pts[subject_id].index((x,y))
if ids[subject_id][index] == ip:
alreadyInList = True
except ValueError:
pass
if not(alreadyInList):
pts[subject_id].append((x,y))
ids[subject_id].append(ip)
c += 1
except TypeError:
errorCount += 1
userCount[subject_id] += -1
break
except ValueError:
errorCount += 1
continue
numClassifications.append(c)
if userCount[subject_id] in [step_2]:
cluster_center = adaptiveDBSCAN(pts[subject_id],ids[subject_id])
mainSubject = subject_id
if cluster_center != []:
break
if userCount[subject_id] == step_1:
pass
#at_5[subject_id] = len(cluster_center)
else:
at_10[subject_id] = len(cluster_center)
# inBoth = [subject_id for subject_id in at_10 if (subject_id in at_5)]
# # print len(inBoth)
# x = [at_5[subject_id] for subject_id in inBoth]
# y = [at_10[subject_id] for subject_id in inBoth]
# print zip(inBoth,zip(x,y))
# plt.plot((0,100),(0,100),'--')
# # #print x
# # #print y
# plt.plot(x,y,'.')
# plt.show()
# print userCount
# print numClassifications
#
#
print mainSubject
r2 = collection2.find_one({"zooniverse_id":mainSubject})
url = r2["location"]["standard"]
if not(os.path.isfile("/home/greg/Databases/penguins/images/"+mainSubject+".JPG")):
urllib.urlretrieve (url, "/home/greg/Databases/penguins/images/"+mainSubject+".JPG")
image_file = cbook.get_sample_data("/home/greg/Databases/penguins/images/"+mainSubject+".JPG")
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
#plt.show()
#
if cluster_center != []:
x,y = zip(*cluster_center)
plt.plot(x,y,'.',color='blue')
#
# x,y = zip(*center_5[mainSubject])
# plt.plot(x,y,'.',color='red')
# x,y = zip(*center_10[mainSubject])
# plt.plot(x,y,'.',color='green')
plt.show() | apache-2.0 | 7,761,811,715,038,455,000 | 31.053476 | 179 | 0.546342 | false |
stadtgestalten/stadtgestalten | docs/deployment/settings.py | 1 | 2052 | # grouprise settings file
# see https://docs.djangoproject.com/en/2.1/ref/settings/
import os
import subprocess
from stadt.settings.default import *
from grouprise.core.assets import add_javascript_reference, add_javascript_inline, add_csp_directive, add_meta
# see https://www.miniwebtool.com/django-secret-key-generator/
SECRET_KEY = 'CHANGE THIS!'
ALLOWED_HOSTS = ['yourhostname.org', 'localhost']
# dies wird von nginx gesetzt
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# SECURE_HSTS_SECONDS = 31536000
# SECURE_HSTS_INCLUDE_SUBDOMAINS = True
# SECURE_CONTENT_TYPE_NOSNIFF = True
# SECURE_BROWSER_XSS_FILTER = True
# SECURE_SSL_REDIRECT = False
# SESSION_COOKIE_SECURE = True
# CSRF_COOKIE_SECURE = True
# CSRF_COOKIE_HTTPONLY = False
# X_FRAME_OPTIONS = 'DENY'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': 'localhost',
'NAME': 'grouprise',
'USER': 'grouprise',
'PASSWORD': 'xxxxx',
}
}
ADMINS = [
('Admins', '[email protected]'),
]
DEFAULT_FROM_EMAIL = 'noreply@localhost'
FROM_EMAIL_WITH_SLUG = 'noreply+{slug}@localhost'
ANSWERABLE_FROM_EMAIL = 'noreply@localhost'
DEFAULT_REPLY_TO_EMAIL = 'reply+{reply_key}@localhost'
STADTGESTALTEN_BOT_EMAIL = 'grouprise-bot@localhost'
SERVER_EMAIL = 'grouprise <noreply@localhost>'
GROUPRISE_POSTMASTER_EMAIL = 'postmaster@localhost'
OPERATOR_GROUP_ID = 1
STADTGESTALTEN_FEEDS_IMPORTER_USER_ID = 1
GROUPRISE_FEEDS_IMPORTER_GESTALT_ID = 1
GROUPRISE_UNKNOWN_GESTALT_ID = 1
ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'
# ENTITY_SLUG_BLACKLIST = [ 'all', 'alle', 'antwort', 'crew', 'facebook', 'gbr', 'info', 'kontakt', 'mail', 'noreply', 'postmaster', 'presse', 'reply', 'stadt', 'unknown', 'webmaster', 'www']
# set debug mode to false
DEBUG = False
# increase session cookie time to 1 year
SESSION_COOKIE_AGE = 60 * 60 * 24 * 365
STADTGESTALTEN_CLAIMS = [
'your claim 1',
'your claim 2',
# ...
]
# HAYSTACK_CONNECTIONS['default']['PATH'] = os.path.join(DATA_DIR, 'xapian_index')
| agpl-3.0 | 515,369,064,642,529,200 | 27.5 | 191 | 0.695419 | false |
shl198/Pipeline | Modules/PacBioEDA/PacBio_Productivity.py | 3 | 2900 | #!/usr/bin/env python
# Copyright (C) 2011 Genome Research Limited -- See full notice at end
# of module.
# Create a plot of ZMW productivity by x/y position on the
# SMRTcell. First parameter is input .bas.h5 file. Output png file is
# optional command line parameter, defaulting to productivity.png.
import sys
import optparse
import numpy as np
import h5py
from tt_log import logger
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
DEF_OUTPUT = 'productivity.png'
def main ():
logger.debug("%s starting" % sys.argv[0])
opt, args = getParms()
infile_name = args[0]
infile = h5py.File (infile_name, 'r')
colours = ('grey', 'red', 'green')
legends = ('non-seq', 'prod-0', 'prod-1')
top = h5py.Group (infile, '/')
ZMW = top["PulseData/BaseCalls/ZMW"]
ZMWMetrics = top["PulseData/BaseCalls/ZMWMetrics"]
holeStatus = ZMW["HoleStatus"]
holeXY = ZMW["HoleXY"]
holeProd = ZMWMetrics["Productivity"]
nonseqHoles = holeStatus[:]!=0 # ZMWs other than sequencing
prod0Holes = np.logical_and(holeProd[:]==0, np.logical_not(nonseqHoles))
prod1Holes = np.logical_and(holeProd[:]==1, np.logical_not(nonseqHoles))
holesByType = (nonseqHoles, prod0Holes, prod1Holes)
for which in xrange(len(holesByType)):
whichHoles = holesByType[which]
howMany = sum(whichHoles)
logger.debug("%5d %s" % (howMany, legends[which]));
if howMany > 0:
plt.scatter (holeXY[whichHoles,0], holeXY[whichHoles,1], \
s=1, c=colours[which], edgecolor='face', \
label="%5d %s" % (howMany, legends[which]))
plt.axis ('equal')
plt.legend (scatterpoints=3, prop={'size':8})
plt.savefig (opt.output)
infile.close()
logger.debug("complete")
def getParms (): # use default input sys.argv[1:]
parser = optparse.OptionParser(usage='%prog [options] <bas_file>')
parser.add_option ('--output', help='Output file name (def: %default)')
parser.set_defaults (output=DEF_OUTPUT)
opt, args = parser.parse_args()
return opt, args
if __name__ == "__main__":
main()
# Copyright (C) 2011 Genome Research Limited
#
# This library is free software. You can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
| mit | 7,556,700,988,126,529,000 | 29.526316 | 77 | 0.657586 | false |
chbrown/pi | pi/commands/publish.py | 1 | 1402 | import os
from subprocess import check_call
from pi.dist import read_script
def publish(execute=True, verbose=False, script_name='setup.py'):
dist = read_script(script_name)
name = dist.get_name()
version = dist.get_version()
if os.path.exists('README.md'):
print 'Converting README.md to reStructuredText, because PyPI requires reStructuredText'
if execute:
check_call(['pandoc', 'README.md', '-o', 'README.rst'])
# print 'Tagging current version in git'
## e.g., git tag -a v1.2.3 -m 1.2.3
# subprocessor('git', 'tag', '-a', 'v' + pi.__version__, '-m', pi.__version__)
# subprocessor('git', 'push')
print 'Installing locally in develop mode (version=%s)' % version
if execute:
dist.run_command('develop')
# python setup.py --help register
print 'Registering on PyPI: https://pypi.python.org/pypi/%s' % name
if execute:
dist.run_command('register')
# python setup.py --help sdist upload
print 'Uploading source distribution: https://pypi.python.org/simple/%s' % name
if execute:
dist.run_command('sdist')
dist.run_command('upload')
def cli(parser):
parser.add_argument('-n', '--dry-run', action='store_true', help='Print publish sequence without running')
opts = parser.parse_args()
publish(execute=not opts.dry_run, verbose=opts.verbose or opts.dry_run)
| mit | 34,353,667,107,204,910 | 33.195122 | 110 | 0.64622 | false |
zfrenchee/pandas | doc/sphinxext/ipython_sphinxext/ipython_directive.py | 1 | 37812 | # -*- coding: utf-8 -*-
"""
Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives). For example, to enable syntax highlighting
and the IPython directive::
extensions = ['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive']
The IPython directive outputs code-blocks with the language 'ipython'. So
if you do not have the syntax highlighting extension enabled as well, then
all rendered code-blocks will be uncolored. By default this directive assumes
that your prompts are unchanged IPython ones, but this can be customized.
The configurable options that can be placed in conf.py are:
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_mplbackend:
The string which specifies if the embedded Sphinx shell should import
Matplotlib and set the backend. The value specifies a backend that is
passed to `matplotlib.use()` before any lines in `ipython_execlines` are
executed. If not specified in conf.py, then the default value of 'agg' is
used. To use the IPython directive without matplotlib as a dependency, set
the value to `None`. It may end up that matplotlib is still imported
if the user specifies so in `ipython_execlines` or makes use of the
@savefig pseudo decorator.
ipython_execlines:
A list of strings to be exec'd in the embedded Sphinx shell. Typical
usage is to make certain packages always available. Set this to an empty
list if you wish to have no imports always available. If specified in
conf.py as `None`, then it has the effect of making no imports available.
If omitted from conf.py altogether, then the default value of
['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
ipython_holdcount
When the @suppress pseudo-decorator is used, the execution count can be
incremented or not. The default behavior is to hold the execution count,
corresponding to a value of `True`. Set this to `False` to increment
the execution count after each suppressed command.
As an example, to use the IPython directive when `matplotlib` is not available,
one sets the backend to `None`::
ipython_mplbackend = None
An example usage of the directive is:
.. code-block:: rst
.. ipython::
In [1]: x = 1
In [2]: y = x**2
In [3]: print(y)
See http://matplotlib.org/sampledoc/ipython_directive.html for additional
documentation.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: original author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import print_function
from __future__ import unicode_literals
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import os
import re
import sys
import tempfile
import ast
from pandas.compat import zip, range, map, lmap, u, text_type, cStringIO as StringIO
import warnings
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
# Our own
try:
from traitlets.config import Config
except ImportError:
from IPython import Config
from IPython import InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
else:
from StringIO import StringIO
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one output, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class DecodingStringIO(StringIO, object):
def __init__(self,buf='',encodings=('utf8',), *args, **kwds):
super(DecodingStringIO, self).__init__(buf, *args, **kwds)
self.set_encodings(encodings)
def set_encodings(self, encodings):
self.encodings = encodings
def write(self,data):
if isinstance(data, text_type):
return super(DecodingStringIO, self).write(data)
else:
for enc in self.encodings:
try:
data = data.decode(enc)
return super(DecodingStringIO, self).write(data)
except :
pass
# default to brute utf8 if no encoding succeeded
return super(DecodingStringIO, self).write(data.decode('utf8', 'replace'))
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self, exec_lines=None,state=None):
self.cout = DecodingStringIO(u'')
if exec_lines is None:
exec_lines = []
self.state = state
# Create config object for IPython
config = Config()
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done after instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# Optionally, provide more detailed information to shell.
self.directive = None
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# Prepopulate the namespace.
for line in exec_lines:
self.process_input_line(line, store_history=False)
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
try:
source_raw = splitter.source_raw_reset()[1]
except:
# recent ipython #4504
source_raw = splitter.raw_reset()
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
# set the encodings to be used by DecodingStringIO
# to convert the execution output into unicode if
# needed. this attrib is set by IpythonDirective.run()
# based on the specified block options, defaulting to ['ut
self.cout.set_encodings(self.output_encoding)
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
# context information
filename = self.state.document.current_source
lineno = self.state.document.current_line
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and "Traceback" in output:
s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(output)
sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write('-' * 76 + '\n')
s=warnings.formatwarning(w.message, w.category,
w.filename, w.lineno, w.line)
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, decorator, image_file,
image_directive)
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, decorator, image_file):
"""
Process data block for OUTPUT token.
"""
TAB = ' ' * 4
if is_doctest and output is not None:
found = output
found = found.strip()
submitted = data.strip()
if self.directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join(TAB + line for line in content)
# Make sure the output contains the output prompt.
ind = found.find(output_prompt)
if ind < 0:
e = ('output does not contain output prompt\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'Input line(s):\n{TAB}{2}\n\n'
'Output line(s):\n{TAB}{3}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), TAB=TAB)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
# Handle the actual doctest comparison.
if decorator.strip() == '@doctest':
# Standard doctest
if found != submitted:
e = ('doctest failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), repr(submitted), TAB=TAB)
raise RuntimeError(e)
else:
self.custom_doctest(decorator, input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = ('plt.gcf().savefig("%s", bbox_inches="tight", '
'dpi=100)' % image_file)
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
(out_data, input_lines, output, is_doctest, decorator,
image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
decorator, image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive content
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
# if the next line is indented, it should be part of multiline
if len(content) > lineno + 1:
nextline = content[lineno + 1]
if len(nextline) - len(nextline.lstrip()) > 3:
continue
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
def custom_doctest(self, decorator, input_lines, found, submitted):
"""
Perform a specialized doctest.
"""
from .custom_doctests import doctests
args = decorator.split()
doctest_type = args[1]
if doctest_type in doctests:
doctests[doctest_type](self, args, input_lines, found, submitted)
else:
e = "Invalid option to @doctest: {0}".format(doctest_type)
raise Exception(e)
class IPythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
'okexcept': directives.flag,
'okwarning': directives.flag,
'output_encoding': directives.unchanged_required
}
shell = None
seen_docs = set()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
mplbackend = config.ipython_mplbackend
exec_lines = config.ipython_execlines
hold_count = config.ipython_holdcount
return (savefig_dir, source_dir, rgxin, rgxout,
promptin, promptout, mplbackend, exec_lines, hold_count)
def setup(self):
# Get configuration values.
(savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
mplbackend, exec_lines, hold_count) = self.get_config_options()
if self.shell is None:
# We will be here many times. However, when the
# EmbeddedSphinxShell is created, its interactive shell member
# is the same for each instance.
if mplbackend and 'matplotlib.backends' not in sys.modules:
import matplotlib
# Repeated calls to use() will not hurt us since `mplbackend`
# is the same each time.
matplotlib.use(mplbackend)
# Must be called after (potentially) importing matplotlib and
# setting its backend since exec_lines might import pylab.
self.shell = EmbeddedSphinxShell(exec_lines, self.state)
# Store IPython directive to enable better error messages
self.shell.directive = self
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
if self.state.document.current_source not in self.seen_docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
try:
self.shell.IP.prompt_manager.width = 0
except AttributeError:
# GH14003: class promptManager has removed after IPython 5.x
pass
self.seen_docs.add(self.state.document.current_source)
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
self.shell.hold_count = hold_count
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.is_okwarning = 'okwarning' in options
self.shell.output_encoding = [options.get('output_encoding', 'utf8')]
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython', '']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
if len(lines)>2:
if debug:
print('\n'.join(lines))
else:
# This has to do with input, not output. But if we comment
# these lines out, then no IPython code will appear in the
# final output.
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
# cleanup
self.teardown()
return []
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IPythonDirective)
app.add_config_value('ipython_savefig_dir', None, 'env')
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
# We could just let matplotlib pick whatever is specified as the default
# backend in the matplotlibrc file, but this would cause issues if the
# backend didn't work in headless environments. For this reason, 'agg'
# is a good default backend choice.
app.add_config_value('ipython_mplbackend', 'agg', 'env')
# If the user sets this config value to `None`, then EmbeddedSphinxShell's
# __init__ method will treat it as [].
execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
app.add_config_value('ipython_execlines', execlines, 'env')
app.add_config_value('ipython_holdcount', True, 'env')
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
@doctest float
In [154]: 0.1 + 0.2
Out[154]: 0.3
@doctest float
In [155]: np.arange(16).reshape(4,4)
Out[155]:
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
In [1]: x = np.arange(16, dtype=float).reshape(4,4)
In [2]: x[0,0] = np.inf
In [3]: x[0,1] = np.nan
@doctest float
In [4]: x
Out[4]:
array([[ inf, nan, 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
IPythonDirective('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
| bsd-3-clause | 2,839,080,831,233,573,400 | 33.656279 | 124 | 0.570246 | false |
Alwnikrotikz/jythonconsole | tip.py | 9 | 1701 | from java.awt import Color, Dimension
from javax.swing import JWindow, JTextArea, JScrollPane
__author__ = "Don Coleman <[email protected]>"
__cvsid__ = "$Id: tip.py,v 1.3 2003/05/01 03:43:53 dcoleman Exp $"
class Tip(JWindow):
"""
Window which provides the user with information about the method.
For Python, this shows arguments, and the documention
For Java, this shows the signature(s) and return type
"""
MAX_HEIGHT = 300
MAX_WIDTH = 400
def __init__(self, frame):
JWindow.__init__(self, frame)
self.textarea = JTextArea()
# TODO put this color with all the other colors
self.textarea.setBackground(Color(225,255,255))
self.textarea.setEditable(0)
self.jscrollpane = JScrollPane(self.textarea)
self.getContentPane().add(self.jscrollpane)
def setText(self, tip):
self.textarea.setText(tip)
self.textarea.setCaretPosition(0)
#print >> sys.stderr, self.textarea.getPreferredScrollableViewportSize()
self.setSize(self.getPreferredSize())
def getPreferredSize(self):
# need to add a magic amount to the size to avoid scrollbars
# I'm sure there's a better way to do this
MAGIC = 20
size = self.textarea.getPreferredScrollableViewportSize()
height = size.height + MAGIC
width = size.width + MAGIC
if height > Tip.MAX_HEIGHT:
height = Tip.MAX_HEIGHT
if width > Tip.MAX_WIDTH:
width = Tip.MAX_WIDTH
return Dimension(width, height)
def showTip(self, tip, displayPoint):
self.setLocation(displayPoint)
self.setText(tip)
self.show()
| lgpl-2.1 | 3,005,137,792,569,444,000 | 35.191489 | 80 | 0.648442 | false |
sdague/home-assistant | homeassistant/components/scsgate/light.py | 12 | 3131 | """Support for SCSGate lights."""
import logging
from scsgate.tasks import ToggleStatusTask
import voluptuous as vol
from homeassistant.components.light import PLATFORM_SCHEMA, LightEntity
from homeassistant.const import ATTR_ENTITY_ID, ATTR_STATE, CONF_DEVICES, CONF_NAME
import homeassistant.helpers.config_validation as cv
from . import CONF_SCS_ID, DOMAIN, SCSGATE_SCHEMA
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_DEVICES): cv.schema_with_slug_keys(SCSGATE_SCHEMA)}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the SCSGate switches."""
devices = config.get(CONF_DEVICES)
lights = []
logger = logging.getLogger(__name__)
scsgate = hass.data[DOMAIN]
if devices:
for entity_info in devices.values():
if entity_info[CONF_SCS_ID] in scsgate.devices:
continue
name = entity_info[CONF_NAME]
scs_id = entity_info[CONF_SCS_ID]
logger.info("Adding %s scsgate.light", name)
light = SCSGateLight(
name=name, scs_id=scs_id, logger=logger, scsgate=scsgate
)
lights.append(light)
add_entities(lights)
scsgate.add_devices_to_register(lights)
class SCSGateLight(LightEntity):
"""Representation of a SCSGate light."""
def __init__(self, scs_id, name, logger, scsgate):
"""Initialize the light."""
self._name = name
self._scs_id = scs_id
self._toggled = False
self._logger = logger
self._scsgate = scsgate
@property
def scs_id(self):
"""Return the SCS ID."""
return self._scs_id
@property
def should_poll(self):
"""No polling needed for a SCSGate light."""
return False
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if light is on."""
return self._toggled
def turn_on(self, **kwargs):
"""Turn the device on."""
self._scsgate.append_task(ToggleStatusTask(target=self._scs_id, toggled=True))
self._toggled = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
self._scsgate.append_task(ToggleStatusTask(target=self._scs_id, toggled=False))
self._toggled = False
self.schedule_update_ha_state()
def process_event(self, message):
"""Handle a SCSGate message related with this light."""
if self._toggled == message.toggled:
self._logger.info(
"Light %s, ignoring message %s because state already active",
self._scs_id,
message,
)
# Nothing changed, ignoring
return
self._toggled = message.toggled
self.schedule_update_ha_state()
command = "off"
if self._toggled:
command = "on"
self.hass.bus.fire(
"button_pressed", {ATTR_ENTITY_ID: self._scs_id, ATTR_STATE: command}
)
| apache-2.0 | -1,859,846,476,552,103,000 | 27.207207 | 87 | 0.603002 | false |
amanikamail/flexx | docs/scripts/genexamples.py | 19 | 3162 | """ Generate docs for examples.
"""
import os
from types import ModuleType
from flexx import ui, app
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
DOC_DIR = os.path.abspath(os.path.join(THIS_DIR, '..'))
EXAMPLES_DIR = os.path.abspath(os.path.join(DOC_DIR, '..', 'examples'))
OUTPUT_DIR = os.path.join(DOC_DIR, 'examples')
created_files = []
def main():
# Collect examples
examples = {}
for sub in os.listdir(EXAMPLES_DIR):
dirname = os.path.join(EXAMPLES_DIR, sub)
if os.path.isdir(dirname):
examples[sub] = {}
for fname in os.listdir(dirname):
filename = os.path.join(dirname, fname)
if os.path.isfile(filename) and fname.endswith('.py'):
# Create example content
code = open(filename, 'rt').read()
text = ':orphan:\n\n' # avoid toctree warning
text += '.. _%s:\n\n' % fname
text += '%s\n%s\n\n' % (fname, '=' * len(fname))
text += '.. code-block:: py\n :linenos:\n\n'
text += '\n ' + code.replace('\n', '\n ').rstrip() + '\n'
examples[sub][fname] = text
if not examples[sub]:
del examples[sub]
# Write all examples
created_files.append(OUTPUT_DIR)
if not os.path.isdir(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
for sub in list(examples.keys()):
dirname = os.path.join(OUTPUT_DIR, sub)
created_files.append(dirname)
if not os.path.isdir(dirname):
os.mkdir(dirname)
for name in examples[sub]:
filename = os.path.join(dirname, name + '.rst')
created_files.append(filename)
open(filename, 'wt').write(examples[sub][name])
# # Create example index page
# docs = 'Examples'
# docs += '\n' + '=' * len(docs) + '\n\n'
# for sub in sorted(examples):
# docs += '\n' + sub + ':\n\n'
# for name in sorted(examples[sub]):
# docs += '* :ref:`%s`\n' % name
# # Write
# filename = os.path.join(DOC_DIR, 'examples.rst')
# created_files.append(filename)
# open(filename, 'wt').write(docs)
better_names = {'pyscript': 'PyScript'}
# Create example pages per submodule
for sub in examples:
dirname = os.path.join(DOC_DIR, sub)
if os.path.isdir(dirname):
docs = better_names.get(sub, sub.capitalize()) + ' examples'
docs += '\n%s\n\n' % (len(docs) * '=')
for name in sorted(examples[sub]):
docs += '* :ref:`%s`\n' % name
# Write
filename = os.path.join(DOC_DIR, sub, 'examples.rst')
created_files.append(filename)
open(filename, 'wt').write(docs)
print(' generated %i examples' % sum([len(x) for x in examples.values()]))
def clean():
while created_files:
filename = created_files.pop()
if os.path.isfile(filename):
os.remove(filename)
elif os.path.isdir(filename) and not os.listdir(filename):
os.rmdir(filename)
| bsd-2-clause | -125,045,003,337,830,060 | 34.931818 | 83 | 0.533839 | false |
tengqm/senlin-container | senlin/tests/unit/api/common/test_util.py | 1 | 3858 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from webob import exc
from senlin.api.common import util
from senlin.api.common import wsgi
from senlin.common import context
from senlin.common import policy
from senlin.tests.unit.common import base
class TestGetAllowedParams(base.SenlinTestCase):
def setUp(self):
super(TestGetAllowedParams, self).setUp()
req = wsgi.Request({})
self.params = req.params.copy()
self.params.add('foo', 'foo value')
self.whitelist = {'foo': 'single'}
def test_returns_empty_dict(self):
self.whitelist = {}
result = util.get_allowed_params(self.params, self.whitelist)
self.assertEqual({}, result)
def test_only_adds_whitelisted_params_if_param_exists(self):
self.whitelist = {'foo': 'single'}
self.params.clear()
result = util.get_allowed_params(self.params, self.whitelist)
self.assertNotIn('foo', result)
def test_returns_only_whitelisted_params(self):
self.params.add('bar', 'bar value')
result = util.get_allowed_params(self.params, self.whitelist)
self.assertIn('foo', result)
self.assertNotIn('bar', result)
def test_handles_single_value_params(self):
result = util.get_allowed_params(self.params, self.whitelist)
self.assertEqual('foo value', result['foo'])
def test_handles_multiple_value_params(self):
self.whitelist = {'foo': 'multi'}
self.params.add('foo', 'foo value 2')
result = util.get_allowed_params(self.params, self.whitelist)
self.assertEqual(2, len(result['foo']))
self.assertIn('foo value', result['foo'])
self.assertIn('foo value 2', result['foo'])
def test_handles_mixed_value_param_with_multiple_entries(self):
self.whitelist = {'foo': 'mixed'}
self.params.add('foo', 'foo value 2')
result = util.get_allowed_params(self.params, self.whitelist)
self.assertEqual(2, len(result['foo']))
self.assertIn('foo value', result['foo'])
self.assertIn('foo value 2', result['foo'])
def test_handles_mixed_value_param_with_single_entry(self):
self.whitelist = {'foo': 'mixed'}
result = util.get_allowed_params(self.params, self.whitelist)
self.assertEqual('foo value', result['foo'])
def test_ignores_bogus_whitelist_items(self):
self.whitelist = {'foo': 'blah'}
result = util.get_allowed_params(self.params, self.whitelist)
self.assertNotIn('foo', result)
class TestPolicyEnforce(base.SenlinTestCase):
def setUp(self):
super(TestPolicyEnforce, self).setUp()
self.req = wsgi.Request({})
self.req.context = context.RequestContext(project='foo',
is_admin=False)
class DummyController(object):
REQUEST_SCOPE = 'test'
@util.policy_enforce
def an_action(self, req):
return 'woot'
self.controller = DummyController()
@mock.patch.object(policy, 'enforce')
def test_policy_enforce_policy_deny(self, mock_enforce):
mock_enforce.return_value = False
self.assertRaises(exc.HTTPForbidden,
self.controller.an_action,
self.req, tenant_id='foo')
| apache-2.0 | -8,149,454,512,689,554,000 | 35.056075 | 75 | 0.644635 | false |
TeslaProject/external_chromium_org | tools/telemetry/telemetry/core/backends/chrome/tab_list_backend.py | 46 | 2790 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import urllib2
from telemetry.core import tab
from telemetry.core import util
from telemetry.core.backends.chrome import inspector_backend_list
class TabListBackend(inspector_backend_list.InspectorBackendList):
"""A dynamic sequence of tab.Tabs in UI order."""
def __init__(self, browser_backend):
super(TabListBackend, self).__init__(browser_backend,
backend_wrapper=tab.Tab)
def New(self, timeout):
assert self._browser_backend.supports_tab_control
self._browser_backend.Request('new', timeout=timeout)
return self[-1]
def CloseTab(self, debugger_url, timeout=None):
assert self._browser_backend.supports_tab_control
tab_id = inspector_backend_list.DebuggerUrlToId(debugger_url)
# TODO(dtu): crbug.com/160946, allow closing the last tab on some platforms.
# For now, just create a new tab before closing the last tab.
if len(self) <= 1:
self.New(timeout)
try:
response = self._browser_backend.Request('close/%s' % tab_id,
timeout=timeout,
throw_network_exception=True)
except urllib2.HTTPError:
raise Exception('Unable to close tab, tab id not found: %s' % tab_id)
assert response == 'Target is closing'
util.WaitFor(lambda: tab_id not in self, timeout=5)
def ActivateTab(self, debugger_url, timeout=None):
assert self._browser_backend.supports_tab_control
tab_id = inspector_backend_list.DebuggerUrlToId(debugger_url)
assert tab_id in self
try:
response = self._browser_backend.Request('activate/%s' % tab_id,
timeout=timeout,
throw_network_exception=True)
except urllib2.HTTPError:
raise Exception('Unable to activate tab, tab id not found: %s' % tab_id)
assert response == 'Target activated'
def GetTabUrl(self, debugger_url):
tab_id = inspector_backend_list.DebuggerUrlToId(debugger_url)
tab_info = self.GetContextInfo(tab_id)
assert tab_info is not None
return tab_info['url']
def Get(self, index, ret):
"""Returns self[index] if it exists, or ret if index is out of bounds."""
if len(self) <= index:
return ret
return self[index]
def ShouldIncludeContext(self, context):
if 'type' in context:
return context['type'] == 'page'
# TODO: For compatibility with Chrome before r177683.
# This check is not completely correct, see crbug.com/190592.
return not context['url'].startswith('chrome-extension://')
| bsd-3-clause | 652,955,964,871,653,800 | 39.434783 | 80 | 0.653047 | false |
jgsogo/neutron | webapp/synthetic/forms.py | 1 | 1384 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from neutron.models import Word
from .models import AlternateData
class AlternateDataForm(forms.ModelForm):
word = forms.CharField()
class Meta:
model = AlternateData
fields = '__all__'
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance', None)
initial = kwargs.pop('initial', {})
if instance:
initial.update({'word': instance.word.word })
super(AlternateDataForm, self).__init__(initial=initial, *args, **kwargs)
def clean(self):
cleaned_data = self.cleaned_data
cleaned_data['word'], _ = Word.objects.get_or_create(word=cleaned_data['word'])
return cleaned_data
class WordCoarseDataForm(forms.ModelForm):
word = forms.CharField()
class Meta:
model = AlternateData
fields = '__all__'
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance', None)
initial = kwargs.pop('initial', {})
if instance:
initial.update({'word': instance.word.word })
super(WordCoarseDataForm, self).__init__(initial=initial, *args, **kwargs)
def clean(self):
cleaned_data = self.cleaned_data
cleaned_data['word'], _ = Word.objects.get_or_create(word=cleaned_data['word'])
return cleaned_data
| gpl-2.0 | 5,299,506,953,935,165,000 | 27.833333 | 87 | 0.611272 | false |
shurihell/testasia | lms/djangoapps/certificates/tests/factories.py | 16 | 3695 | # Factories are self documenting
# pylint: disable=missing-docstring
import factory
from uuid import uuid4
from django.core.files.base import ContentFile
from factory.django import DjangoModelFactory, ImageField
from student.models import LinkedInAddToProfileConfiguration
from certificates.models import (
GeneratedCertificate, CertificateStatuses, CertificateHtmlViewConfiguration, CertificateWhitelist, BadgeAssertion,
BadgeImageConfiguration,
)
class GeneratedCertificateFactory(DjangoModelFactory):
class Meta(object):
model = GeneratedCertificate
course_id = None
status = CertificateStatuses.unavailable
mode = GeneratedCertificate.MODES.honor
name = ''
verify_uuid = uuid4().hex
class CertificateWhitelistFactory(DjangoModelFactory):
class Meta(object):
model = CertificateWhitelist
course_id = None
whitelist = True
notes = 'Test Notes'
class BadgeAssertionFactory(DjangoModelFactory):
class Meta(object):
model = BadgeAssertion
mode = 'honor'
data = {
'image': 'http://www.example.com/image.png',
'json': {'id': 'http://www.example.com/assertion.json'},
'issuer': 'http://www.example.com/issuer.json',
}
class BadgeImageConfigurationFactory(DjangoModelFactory):
class Meta(object):
model = BadgeImageConfiguration
mode = 'honor'
icon = factory.LazyAttribute(
lambda _: ContentFile(
ImageField()._make_data( # pylint: disable=protected-access
{'color': 'blue', 'width': 50, 'height': 50, 'format': 'PNG'}
), 'test.png'
)
)
class CertificateHtmlViewConfigurationFactory(DjangoModelFactory):
class Meta(object):
model = CertificateHtmlViewConfiguration
enabled = True
configuration = """{
"default": {
"accomplishment_class_append": "accomplishment-certificate",
"platform_name": "edX",
"company_about_url": "http://www.edx.org/about-us",
"company_privacy_url": "http://www.edx.org/edx-privacy-policy",
"company_tos_url": "http://www.edx.org/edx-terms-service",
"company_verified_certificate_url": "http://www.edx.org/verified-certificate",
"document_stylesheet_url_application": "/static/certificates/sass/main-ltr.css",
"logo_src": "/static/certificates/images/logo-edx.png",
"logo_url": "http://www.edx.org"
},
"honor": {
"certificate_type": "Honor Code",
"certificate_title": "Certificate of Achievement",
"logo_url": "http://www.edx.org/honor_logo.png"
},
"verified": {
"certificate_type": "Verified",
"certificate_title": "Verified Certificate of Achievement"
},
"xseries": {
"certificate_title": "XSeries Certificate of Achievement",
"certificate_type": "XSeries"
},
"microsites": {
"testmicrosite": {
"company_about_url": "http://www.testmicrosite.org/about-us",
"company_privacy_url": "http://www.testmicrosite.org/edx-privacy-policy",
"company_tos_url": "http://www.testmicrosite.org/edx-terms-service"
}
}
}"""
class LinkedInAddToProfileConfigurationFactory(DjangoModelFactory):
class Meta(object):
model = LinkedInAddToProfileConfiguration
enabled = True
company_identifier = "0_0dPSPyS070e0HsE9HNz_13_d11_"
trk_partner_name = 'unittest'
| agpl-3.0 | 7,506,985,517,506,218,000 | 31.699115 | 118 | 0.615968 | false |
viz-dev/viz | qa/rpc-tests/importprunedfunds.py | 55 | 5061 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class ImportPrunedFundsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
self.is_network_split=False
self.sync_all()
def run_test(self):
print("Mining blocks...")
self.nodes[0].generate(101)
self.sync_all()
# address
address1 = self.nodes[0].getnewaddress()
# pubkey
address2 = self.nodes[0].getnewaddress()
address2_pubkey = self.nodes[0].validateaddress(address2)['pubkey'] # Using pubkey
# privkey
address3 = self.nodes[0].getnewaddress()
address3_privkey = self.nodes[0].dumpprivkey(address3) # Using privkey
#Check only one address
address_info = self.nodes[0].validateaddress(address1)
assert_equal(address_info['ismine'], True)
self.sync_all()
#Node 1 sync test
assert_equal(self.nodes[1].getblockcount(),101)
#Address Test - before import
address_info = self.nodes[1].validateaddress(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address2)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
#Send funds to self
txnid1 = self.nodes[0].sendtoaddress(address1, 0.1)
self.nodes[0].generate(1)
rawtxn1 = self.nodes[0].gettransaction(txnid1)['hex']
proof1 = self.nodes[0].gettxoutproof([txnid1])
txnid2 = self.nodes[0].sendtoaddress(address2, 0.05)
self.nodes[0].generate(1)
rawtxn2 = self.nodes[0].gettransaction(txnid2)['hex']
proof2 = self.nodes[0].gettxoutproof([txnid2])
txnid3 = self.nodes[0].sendtoaddress(address3, 0.025)
self.nodes[0].generate(1)
rawtxn3 = self.nodes[0].gettransaction(txnid3)['hex']
proof3 = self.nodes[0].gettxoutproof([txnid3])
self.sync_all()
#Import with no affiliated address
try:
self.nodes[1].importprunedfunds(rawtxn1, proof1)
except JSONRPCException as e:
assert('No addresses' in e.error['message'])
else:
assert(False)
balance1 = self.nodes[1].getbalance("", 0, True)
assert_equal(balance1, Decimal(0))
#Import with affiliated address with no rescan
self.nodes[1].importaddress(address2, "add2", False)
result2 = self.nodes[1].importprunedfunds(rawtxn2, proof2)
balance2 = self.nodes[1].getbalance("add2", 0, True)
assert_equal(balance2, Decimal('0.05'))
#Import with private key with no rescan
self.nodes[1].importprivkey(address3_privkey, "add3", False)
result3 = self.nodes[1].importprunedfunds(rawtxn3, proof3)
balance3 = self.nodes[1].getbalance("add3", 0, False)
assert_equal(balance3, Decimal('0.025'))
balance3 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance3, Decimal('0.075'))
#Addresses Test - after import
address_info = self.nodes[1].validateaddress(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address2)
assert_equal(address_info['iswatchonly'], True)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], True)
#Remove transactions
try:
self.nodes[1].removeprunedfunds(txnid1)
except JSONRPCException as e:
assert('does not exist' in e.error['message'])
else:
assert(False)
balance1 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance1, Decimal('0.075'))
self.nodes[1].removeprunedfunds(txnid2)
balance2 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance2, Decimal('0.025'))
self.nodes[1].removeprunedfunds(txnid3)
balance3 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance3, Decimal('0.0'))
if __name__ == '__main__':
ImportPrunedFundsTest().main()
| mit | 6,640,988,969,223,507,000 | 36.768657 | 107 | 0.626556 | false |
nadley/Sick-Beard | sickbeard/search_queue.py | 29 | 9175 | # Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import datetime
import time
import sickbeard
from sickbeard import db, logger, common, exceptions, helpers
from sickbeard import generic_queue
from sickbeard import search
from sickbeard import ui
BACKLOG_SEARCH = 10
RSS_SEARCH = 20
MANUAL_SEARCH = 30
class SearchQueue(generic_queue.GenericQueue):
def __init__(self):
generic_queue.GenericQueue.__init__(self)
self.queue_name = "SEARCHQUEUE"
def is_in_queue(self, show, segment):
for cur_item in self.queue:
if isinstance(cur_item, BacklogQueueItem) and cur_item.show == show and cur_item.segment == segment:
return True
return False
def is_ep_in_queue(self, ep_obj):
for cur_item in self.queue:
if isinstance(cur_item, ManualSearchQueueItem) and cur_item.ep_obj == ep_obj:
return True
return False
def pause_backlog(self):
self.min_priority = generic_queue.QueuePriorities.HIGH
def unpause_backlog(self):
self.min_priority = 0
def is_backlog_paused(self):
# backlog priorities are NORMAL, this should be done properly somewhere
return self.min_priority >= generic_queue.QueuePriorities.NORMAL
def is_backlog_in_progress(self):
for cur_item in self.queue + [self.currentItem]:
if isinstance(cur_item, BacklogQueueItem):
return True
return False
def add_item(self, item):
if isinstance(item, RSSSearchQueueItem):
generic_queue.GenericQueue.add_item(self, item)
# don't do duplicates
elif isinstance(item, BacklogQueueItem) and not self.is_in_queue(item.show, item.segment):
generic_queue.GenericQueue.add_item(self, item)
elif isinstance(item, ManualSearchQueueItem) and not self.is_ep_in_queue(item.ep_obj):
generic_queue.GenericQueue.add_item(self, item)
else:
logger.log(u"Not adding item, it's already in the queue", logger.DEBUG)
class ManualSearchQueueItem(generic_queue.QueueItem):
def __init__(self, ep_obj):
generic_queue.QueueItem.__init__(self, 'Manual Search', MANUAL_SEARCH)
self.priority = generic_queue.QueuePriorities.HIGH
self.ep_obj = ep_obj
self.success = None
def execute(self):
generic_queue.QueueItem.execute(self)
logger.log("Searching for download for " + self.ep_obj.prettyName())
foundEpisode = search.findEpisode(self.ep_obj, manualSearch=True)
result = False
if not foundEpisode:
ui.notifications.message('No downloads were found', "Couldn't find a download for <i>%s</i>" % self.ep_obj.prettyName())
logger.log(u"Unable to find a download for "+self.ep_obj.prettyName())
else:
# just use the first result for now
logger.log(u"Downloading episode from " + foundEpisode.url)
result = search.snatchEpisode(foundEpisode)
providerModule = foundEpisode.provider
if not result:
ui.notifications.error('Error while attempting to snatch '+foundEpisode.name+', check your logs')
elif providerModule == None:
ui.notifications.error('Provider is configured incorrectly, unable to download')
self.success = result
def finish(self):
# don't let this linger if something goes wrong
if self.success == None:
self.success = False
generic_queue.QueueItem.finish(self)
class RSSSearchQueueItem(generic_queue.QueueItem):
def __init__(self):
generic_queue.QueueItem.__init__(self, 'RSS Search', RSS_SEARCH)
def execute(self):
generic_queue.QueueItem.execute(self)
self._changeMissingEpisodes()
logger.log(u"Beginning search for new episodes on RSS")
foundResults = search.searchForNeededEpisodes()
if not len(foundResults):
logger.log(u"No needed episodes found on the RSS feeds")
else:
for curResult in foundResults:
search.snatchEpisode(curResult)
time.sleep(2)
generic_queue.QueueItem.finish(self)
def _changeMissingEpisodes(self):
logger.log(u"Changing all old missing episodes to status WANTED")
curDate = datetime.date.today().toordinal()
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE status = ? AND airdate < ?", [common.UNAIRED, curDate])
for sqlEp in sqlResults:
try:
show = helpers.findCertainShow(sickbeard.showList, int(sqlEp["showid"]))
except exceptions.MultipleShowObjectsException:
logger.log(u"ERROR: expected to find a single show matching " + sqlEp["showid"])
return None
if show == None:
logger.log(u"Unable to find the show with ID "+str(sqlEp["showid"])+" in your show list! DB value was "+str(sqlEp), logger.ERROR)
return None
ep = show.getEpisode(sqlEp["season"], sqlEp["episode"])
with ep.lock:
if ep.show.paused:
ep.status = common.SKIPPED
else:
ep.status = common.WANTED
ep.saveToDB()
class BacklogQueueItem(generic_queue.QueueItem):
def __init__(self, show, segment):
generic_queue.QueueItem.__init__(self, 'Backlog', BACKLOG_SEARCH)
self.priority = generic_queue.QueuePriorities.LOW
self.thread_name = 'BACKLOG-'+str(show.tvdbid)
self.show = show
self.segment = segment
logger.log(u"Seeing if we need any episodes from "+self.show.name+" season "+str(self.segment))
myDB = db.DBConnection()
# see if there is anything in this season worth searching for
if not self.show.air_by_date:
statusResults = myDB.select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ?", [self.show.tvdbid, self.segment])
else:
segment_year, segment_month = map(int, self.segment.split('-'))
min_date = datetime.date(segment_year, segment_month, 1)
# it's easier to just hard code this than to worry about rolling the year over or making a month length map
if segment_month == 12:
max_date = datetime.date(segment_year, 12, 31)
else:
max_date = datetime.date(segment_year, segment_month+1, 1) - datetime.timedelta(days=1)
statusResults = myDB.select("SELECT status FROM tv_episodes WHERE showid = ? AND airdate >= ? AND airdate <= ?",
[self.show.tvdbid, min_date.toordinal(), max_date.toordinal()])
anyQualities, bestQualities = common.Quality.splitQuality(self.show.quality) #@UnusedVariable
self.wantSeason = self._need_any_episodes(statusResults, bestQualities)
def execute(self):
generic_queue.QueueItem.execute(self)
results = search.findSeason(self.show, self.segment)
# download whatever we find
if results:
for curResult in results:
search.snatchEpisode(curResult)
time.sleep(5)
self.finish()
def _need_any_episodes(self, statusResults, bestQualities):
wantSeason = False
# check through the list of statuses to see if we want any
for curStatusResult in statusResults:
curCompositeStatus = int(curStatusResult["status"])
curStatus, curQuality = common.Quality.splitCompositeStatus(curCompositeStatus)
if bestQualities:
highestBestQuality = max(bestQualities)
else:
highestBestQuality = 0
# if we need a better one then say yes
if (curStatus in (common.DOWNLOADED, common.SNATCHED, common.SNATCHED_PROPER, common.SNATCHED_FRENCH) and curQuality < highestBestQuality) or curStatus == common.WANTED:
wantSeason = True
break
return wantSeason
| gpl-3.0 | -682,730,083,608,517,800 | 37.042553 | 181 | 0.619183 | false |
bihealth/vcfpy | tests/test_header.py | 1 | 13943 | # -*- coding: utf-8 -*-
"""Tests for vcfpy.header
"""
import sys
import vcfpy
from vcfpy import header
import pytest
def test_header_field_info():
"""Test the builtin functions of the FieldInfo class"""
info1 = header.FieldInfo("Integer", 1, "Some description")
info2 = header.FieldInfo("Integer", 1, "Some description")
info3 = header.FieldInfo("Integer", ".", "Some description")
assert info1 == info2
assert info1 != info3
assert hash(info1) == hash(info2)
assert str(info1) == "FieldInfo('Integer', 1, 'Some description', None)"
assert repr(info1) == "FieldInfo('Integer', 1, 'Some description', None)"
def test_sample_infos():
info1 = header.SamplesInfos(["one", "two", "three"])
info2 = header.SamplesInfos(["one", "two", "three"])
info3 = header.SamplesInfos(["one", "two", "four"])
assert info1 == info2
assert info1 != info3
with pytest.raises(TypeError):
assert hash(info1)
assert (
str(info1)
== "SamplesInfos(names=['one', 'two', 'three'], name_to_idx={'one': 0, 'three': 2, 'two': 1})"
)
assert (
repr(info1)
== "SamplesInfos(names=['one', 'two', 'three'], name_to_idx={'one': 0, 'three': 2, 'two': 1})"
)
def test_header_header():
lines1 = [header.HeaderLine("foo", "bar"), header.HeaderLine("foo2", "bar2")]
samples1 = header.SamplesInfos(["one", "two", "three"])
hdr1 = header.Header(lines1, samples1)
lines2 = [header.HeaderLine("foo", "bar"), header.HeaderLine("foo2", "bar2")]
samples2 = header.SamplesInfos(["one", "two", "three"])
hdr2 = header.Header(lines2, samples2)
lines3 = [header.HeaderLine("foo3", "bar"), header.HeaderLine("foo2", "bar2")]
samples3 = header.SamplesInfos(["one", "two", "three"])
hdr3 = header.Header(lines3, samples3)
assert hdr1 == hdr2
assert hdr1 != hdr3
EXPECTED = (
"Header(lines=[HeaderLine('foo', 'bar'), HeaderLine('foo2', 'bar2')], "
"samples=SamplesInfos(names=['one', 'two', 'three'], "
"name_to_idx={'one': 0, 'three': 2, 'two': 1}))"
)
assert str(hdr1) == EXPECTED
with pytest.raises(TypeError):
hash(hdr1)
def test_header_without_lines():
lines = [header.HeaderLine("foo", "bar"), header.HeaderLine("foo2", "bar2")]
samples = header.SamplesInfos(["one", "two", "three"])
hdr = header.Header(lines, samples)
hdr.add_filter_line(vcfpy.OrderedDict([("ID", "PASS")]))
hdr.add_filter_line(vcfpy.OrderedDict([("ID", "q30")]))
assert len(hdr.lines) == 4
hdr2 = header.header_without_lines(hdr, [("foo", "bar"), ("FILTER", "q30")])
assert len(hdr2.lines) == 2
assert hdr2.samples == hdr.samples
def test_header_header_line():
line1 = header.HeaderLine("key", "value")
line2 = header.HeaderLine("key", "value")
line3 = header.HeaderLine("key2", "value")
assert line1 == line2
assert line1 != line3
assert str(line1) == "HeaderLine('key', 'value')"
assert repr(line1) == "HeaderLine('key', 'value')"
assert line1.value == "value"
assert line1.serialize() == "##key=value"
with pytest.raises(TypeError):
hash(line1)
def test_header_alt_allele_header_line():
line1 = header.AltAlleleHeaderLine.from_mapping(
vcfpy.OrderedDict([("ID", "DEL"), ("Description", "deletion")])
)
line2 = header.AltAlleleHeaderLine.from_mapping(
vcfpy.OrderedDict([("ID", "DEL"), ("Description", "deletion")])
)
line3 = header.AltAlleleHeaderLine.from_mapping(
vcfpy.OrderedDict([("ID", "DUP"), ("Description", "duplication")])
)
assert line1 == line2
assert line1 != line3
if sys.version_info < (3, 6):
assert str(line1) == (
"""AltAlleleHeaderLine('ALT', '<ID=DEL,Description="deletion">', """
"""OrderedDict([('ID', 'DEL'), ('Description', 'deletion')]))"""
)
assert repr(line1) == (
"""AltAlleleHeaderLine('ALT', '<ID=DEL,Description="deletion">', """
"""OrderedDict([('ID', 'DEL'), ('Description', 'deletion')]))"""
)
else:
assert str(line1) == (
"AltAlleleHeaderLine('ALT', '<ID=DEL,Description=\"deletion\">', "
"{'ID': 'DEL', 'Description': 'deletion'})"
)
assert repr(line1) == (
"AltAlleleHeaderLine('ALT', '<ID=DEL,Description=\"deletion\">', "
"{'ID': 'DEL', 'Description': 'deletion'})"
)
assert line1.value == '<ID=DEL,Description="deletion">'
assert line1.serialize() == '##ALT=<ID=DEL,Description="deletion">'
with pytest.raises(TypeError):
hash(line1)
def test_header_contig_header_line():
line1 = header.ContigHeaderLine.from_mapping(vcfpy.OrderedDict([("ID", "1"), ("length", 234)]))
line2 = header.ContigHeaderLine.from_mapping(vcfpy.OrderedDict([("ID", "1"), ("length", 234)]))
line3 = header.ContigHeaderLine.from_mapping(vcfpy.OrderedDict([("ID", "2"), ("length", 123)]))
assert line1 == line2
assert line1 != line3
if sys.version_info < (3, 6):
assert str(line1) == (
"ContigHeaderLine('contig', '<ID=1,length=234>', OrderedDict([('ID', '1'), ('length', 234)]))"
)
assert repr(line1) == (
"ContigHeaderLine('contig', '<ID=1,length=234>', OrderedDict([('ID', '1'), ('length', 234)]))"
)
else:
assert str(line1) == (
"ContigHeaderLine('contig', '<ID=1,length=234>', {'ID': '1', 'length': 234})"
)
assert repr(line1) == (
"ContigHeaderLine('contig', '<ID=1,length=234>', {'ID': '1', 'length': 234})"
)
assert line1.value == "<ID=1,length=234>"
assert line1.serialize() == "##contig=<ID=1,length=234>"
with pytest.raises(TypeError):
hash(line1)
def test_header_filter_header_line():
line1 = header.FilterHeaderLine.from_mapping(
vcfpy.OrderedDict([("ID", "PASS"), ("Description", "All filters passed")])
)
line2 = header.FilterHeaderLine.from_mapping(
vcfpy.OrderedDict([("ID", "PASS"), ("Description", "All filters passed")])
)
line3 = header.FilterHeaderLine.from_mapping(
vcfpy.OrderedDict([("ID", "q30"), ("Description", "Phred score <30")])
)
assert line1 == line2
assert line1 != line3
if sys.version_info < (3, 6):
assert str(line1) == (
"FilterHeaderLine('FILTER', '<ID=PASS,Description=\"All filters passed\">', "
"OrderedDict([('ID', 'PASS'), ('Description', 'All filters passed')]))"
)
assert repr(line1) == (
"FilterHeaderLine('FILTER', '<ID=PASS,Description=\"All filters passed\">', "
"OrderedDict([('ID', 'PASS'), ('Description', 'All filters passed')]))"
)
else:
assert str(line1) == (
"FilterHeaderLine('FILTER', '<ID=PASS,Description=\"All filters passed\">', "
"{'ID': 'PASS', 'Description': 'All filters passed'})"
)
assert repr(line1) == (
"FilterHeaderLine('FILTER', '<ID=PASS,Description=\"All filters passed\">', "
"{'ID': 'PASS', 'Description': 'All filters passed'})"
)
assert line1.value == '<ID=PASS,Description="All filters passed">'
assert line1.serialize() == '##FILTER=<ID=PASS,Description="All filters passed">'
with pytest.raises(TypeError):
hash(line1)
def test_header_pedigree_header_line():
line1 = header.PedigreeHeaderLine.from_mapping(
vcfpy.OrderedDict([("ID", "child"), ("Father", "father")])
)
line2 = header.PedigreeHeaderLine.from_mapping(
vcfpy.OrderedDict([("ID", "child"), ("Father", "father")])
)
line3 = header.PedigreeHeaderLine.from_mapping(vcfpy.OrderedDict([("ID", "father")]))
assert line1 == line2
assert line1 != line3
if sys.version_info < (3, 6):
assert str(line1) == (
"PedigreeHeaderLine('PEDIGREE', '<ID=child,Father=father>', "
"OrderedDict([('ID', 'child'), ('Father', 'father')]))"
)
assert repr(line1) == (
"PedigreeHeaderLine('PEDIGREE', '<ID=child,Father=father>', "
"OrderedDict([('ID', 'child'), ('Father', 'father')]))"
)
else:
assert str(line1) == (
"PedigreeHeaderLine('PEDIGREE', '<ID=child,Father=father>', {'ID': 'child', 'Father': 'father'})"
)
assert repr(line1) == (
"PedigreeHeaderLine('PEDIGREE', '<ID=child,Father=father>', {'ID': 'child', 'Father': 'father'})"
)
assert line1.value == "<ID=child,Father=father>"
assert line1.serialize() == "##PEDIGREE=<ID=child,Father=father>"
with pytest.raises(TypeError):
hash(line1)
def test_header_sample_header_line():
line1 = header.SampleHeaderLine.from_mapping(vcfpy.OrderedDict([("ID", "sample1")]))
line2 = header.SampleHeaderLine.from_mapping(vcfpy.OrderedDict([("ID", "sample1")]))
line3 = header.SampleHeaderLine.from_mapping(vcfpy.OrderedDict([("ID", "sample2")]))
assert line1 == line2
assert line1 != line3
if sys.version_info < (3, 6):
assert str(line1) == (
"SampleHeaderLine('SAMPLE', '<ID=sample1>', OrderedDict([('ID', 'sample1')]))"
)
assert repr(line1) == (
"SampleHeaderLine('SAMPLE', '<ID=sample1>', OrderedDict([('ID', 'sample1')]))"
)
else:
assert str(line1) == ("SampleHeaderLine('SAMPLE', '<ID=sample1>', {'ID': 'sample1'})")
assert repr(line1) == ("SampleHeaderLine('SAMPLE', '<ID=sample1>', {'ID': 'sample1'})")
assert line1.value == "<ID=sample1>"
assert line1.serialize() == "##SAMPLE=<ID=sample1>"
with pytest.raises(TypeError):
hash(line1)
def test_header_info_header_line():
line1 = header.InfoHeaderLine.from_mapping(
vcfpy.OrderedDict([("ID", "SVTYPE"), ("Number", 1), ("Type", "String")])
)
line2 = header.InfoHeaderLine.from_mapping(
vcfpy.OrderedDict([("ID", "SVTYPE"), ("Number", 1), ("Type", "String")])
)
line3 = header.InfoHeaderLine.from_mapping(
vcfpy.OrderedDict([("ID", "END"), ("Number", 1), ("Type", "Integer")])
)
assert line1 == line2
assert line1 != line3
if sys.version_info < (3, 6):
assert str(line1) == (
"InfoHeaderLine('INFO', '<ID=SVTYPE,Number=1,Type=String>', "
"OrderedDict([('ID', 'SVTYPE'), ('Number', 1), ('Type', 'String')]))"
)
assert repr(line1) == (
"InfoHeaderLine('INFO', '<ID=SVTYPE,Number=1,Type=String>', "
"OrderedDict([('ID', 'SVTYPE'), ('Number', 1), ('Type', 'String')]))"
)
else:
assert str(line1) == (
"InfoHeaderLine('INFO', '<ID=SVTYPE,Number=1,Type=String>', "
"{'ID': 'SVTYPE', 'Number': 1, 'Type': 'String'})"
)
assert repr(line1) == (
"InfoHeaderLine('INFO', '<ID=SVTYPE,Number=1,Type=String>', "
"{'ID': 'SVTYPE', 'Number': 1, 'Type': 'String'})"
)
assert line1.value == "<ID=SVTYPE,Number=1,Type=String>"
assert line1.serialize() == "##INFO=<ID=SVTYPE,Number=1,Type=String>"
with pytest.raises(TypeError):
hash(line1)
def test_header_format_header_line():
line1 = header.FormatHeaderLine.from_mapping(
vcfpy.OrderedDict([("ID", "AD"), ("Number", "R"), ("Type", "Integer")])
)
line2 = header.FormatHeaderLine.from_mapping(
vcfpy.OrderedDict([("ID", "AD"), ("Number", "R"), ("Type", "Integer")])
)
line3 = header.FormatHeaderLine.from_mapping(
vcfpy.OrderedDict([("ID", "DP"), ("Number", 1), ("Type", "Integer")])
)
assert line1 == line2
assert line1 != line3
if sys.version_info < (3, 6):
assert str(line1) == (
"FormatHeaderLine('FORMAT', '<ID=AD,Number=R,Type=Integer>', "
"OrderedDict([('ID', 'AD'), ('Number', 'R'), ('Type', 'Integer')]))"
)
assert repr(line1) == (
"FormatHeaderLine('FORMAT', '<ID=AD,Number=R,Type=Integer>', "
"OrderedDict([('ID', 'AD'), ('Number', 'R'), ('Type', 'Integer')]))"
)
else:
assert str(line1) == (
"FormatHeaderLine('FORMAT', '<ID=AD,Number=R,Type=Integer>', "
"{'ID': 'AD', 'Number': 'R', 'Type': 'Integer'})"
)
assert repr(line1) == (
"FormatHeaderLine('FORMAT', '<ID=AD,Number=R,Type=Integer>', "
"{'ID': 'AD', 'Number': 'R', 'Type': 'Integer'})"
)
assert line1.value == "<ID=AD,Number=R,Type=Integer>"
assert line1.serialize() == "##FORMAT=<ID=AD,Number=R,Type=Integer>"
with pytest.raises(TypeError):
hash(line1)
def test_header_has_header_line_positive():
lines = [
header.FormatHeaderLine.from_mapping(
vcfpy.OrderedDict([("ID", "DP"), ("Number", "R"), ("Type", "Integer")])
),
header.InfoHeaderLine.from_mapping(
vcfpy.OrderedDict([("ID", "AD"), ("Number", "R"), ("Type", "Integer")])
),
header.FilterHeaderLine.from_mapping(
vcfpy.OrderedDict([("ID", "PASS"), ("Description", "All filters passed")])
),
header.ContigHeaderLine.from_mapping(vcfpy.OrderedDict([("ID", "1"), ("length", 234)])),
]
samples = header.SamplesInfos(["one", "two", "three"])
hdr = header.Header(lines, samples)
assert hdr.has_header_line("FORMAT", "DP")
assert hdr.has_header_line("INFO", "AD")
assert hdr.has_header_line("FILTER", "PASS")
assert hdr.has_header_line("contig", "1")
def test_header_has_header_line_positive_no_samples():
lines = []
samples = header.SamplesInfos(["one", "two", "three"])
hdr = header.Header(lines, samples)
assert not hdr.has_header_line("FORMAT", "DP")
assert not hdr.has_header_line("INFO", "AD")
assert not hdr.has_header_line("FILTER", "PASS")
assert not hdr.has_header_line("contig", "1")
| mit | -5,204,236,028,401,062,000 | 38.837143 | 109 | 0.571828 | false |
Tehsmash/ironic | ironic/drivers/modules/snmp.py | 2 | 24149 | # Copyright 2013,2014 Cray Inc
#
# Authors: David Hewson <[email protected]>
# Stig Telfer <[email protected]>
# Mark Goddard <[email protected]>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Ironic SNMP power manager.
Provides basic power control using an SNMP-enabled smart power controller.
Uses a pluggable driver model to support devices with different SNMP object
models.
"""
import abc
from oslo.config import cfg
from oslo.utils import importutils
import six
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.openstack.common import log as logging
from ironic.openstack.common import loopingcall
pysnmp = importutils.try_import('pysnmp')
if pysnmp:
from pysnmp.entity.rfc3413.oneliner import cmdgen
from pysnmp import error as snmp_error
from pysnmp.proto import rfc1902
else:
cmdgen = None
snmp_error = None
rfc1902 = None
opts = [
cfg.IntOpt('power_timeout',
default=10,
help='Seconds to wait for power action to be completed')
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(opts, group='snmp')
SNMP_V1 = '1'
SNMP_V2C = '2c'
SNMP_V3 = '3'
SNMP_PORT = 161
REQUIRED_PROPERTIES = {
'snmp_driver': _("PDU manufacturer driver. Required."),
'snmp_address': _("PDU IPv4 address or hostname. Required."),
'snmp_outlet': _("PDU power outlet index (1-based). Required."),
}
OPTIONAL_PROPERTIES = {
'snmp_version':
_("SNMP protocol version: %(v1)s, %(v2c)s, %(v3)s "
"(optional, default %(v1)s)")
% {"v1": SNMP_V1, "v2c": SNMP_V2C, "v3": SNMP_V3},
'snmp_port':
_("SNMP port, default %(port)d") % {"port": SNMP_PORT},
'snmp_community':
_("SNMP community. Required for versions %(v1)s, %(v2c)s")
% {"v1": SNMP_V1, "v2c": SNMP_V2C},
'snmp_security':
_("SNMP security name. Required for version %(v3)s")
% {"v3": SNMP_V3},
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
class SNMPClient(object):
"""SNMP client object.
Performs low level SNMP get and set operations. Encapsulates all
interaction with PySNMP to simplify dynamic importing and unit testing.
"""
def __init__(self, address, port, version, community=None, security=None):
self.address = address
self.port = port
self.version = version
if self.version == SNMP_V3:
self.security = security
else:
self.community = community
self.cmd_gen = cmdgen.CommandGenerator()
def _get_auth(self):
"""Return the authorization data for an SNMP request.
:returns: A
:class:`pysnmp.entity.rfc3413.oneliner.cmdgen.CommunityData`
object.
"""
if self.version == SNMP_V3:
# Handling auth/encryption credentials is not (yet) supported.
# This version supports a security name analagous to community.
return cmdgen.UsmUserData(self.security)
else:
mp_model = 1 if self.version == SNMP_V2C else 0
return cmdgen.CommunityData(self.community, mpModel=mp_model)
def _get_transport(self):
"""Return the transport target for an SNMP request.
:returns: A :class:
`pysnmp.entity.rfc3413.oneliner.cmdgen.UdpTransportTarget` object.
:raises: snmp_error.PySnmpError if the transport address is bad.
"""
# The transport target accepts timeout and retries parameters, which
# default to 1 (second) and 5 respectively. These are deemed sensible
# enough to allow for an unreliable network or slow device.
return cmdgen.UdpTransportTarget((self.address, self.port))
def get(self, oid):
"""Use PySNMP to perform an SNMP GET operation on a single object.
:param oid: The OID of the object to get.
:raises: SNMPFailure if an SNMP request fails.
:returns: The value of the requested object.
"""
try:
results = self.cmd_gen.getCmd(self._get_auth(),
self._get_transport(),
oid)
except snmp_error.PySnmpError as e:
raise exception.SNMPFailure(operation="GET", error=e)
error_indication, error_status, error_index, var_binds = results
if error_indication:
# SNMP engine-level error.
raise exception.SNMPFailure(operation="GET",
error=error_indication)
if error_status:
# SNMP PDU error.
raise exception.SNMPFailure(operation="GET",
error=error_status.prettyPrint())
# We only expect a single value back
name, val = var_binds[0]
return val
def set(self, oid, value):
"""Use PySNMP to perform an SNMP SET operation on a single object.
:param oid: The OID of the object to set.
:param value: The value of the object to set.
:raises: SNMPFailure if an SNMP request fails.
"""
try:
results = self.cmd_gen.setCmd(self._get_auth(),
self._get_transport(),
(oid, value))
except snmp_error.PySnmpError as e:
raise exception.SNMPFailure(operation="SET", error=e)
error_indication, error_status, error_index, var_binds = results
if error_indication:
# SNMP engine-level error.
raise exception.SNMPFailure(operation="SET",
error=error_indication)
if error_status:
# SNMP PDU error.
raise exception.SNMPFailure(operation="SET",
error=error_status.prettyPrint())
def _get_client(snmp_info):
"""Create and return an SNMP client object.
:param snmp_info: SNMP driver info.
:returns: A :class:`SNMPClient` object.
"""
return SNMPClient(snmp_info["address"],
snmp_info["port"],
snmp_info["version"],
snmp_info.get("community"),
snmp_info.get("security"))
@six.add_metaclass(abc.ABCMeta)
class SNMPDriverBase(object):
"""SNMP power driver base class.
The SNMPDriver class hierarchy implements manufacturer-specific MIB actions
over SNMP to interface with different smart power controller products.
"""
oid_enterprise = (1, 3, 6, 1, 4, 1)
retry_interval = 1
def __init__(self, snmp_info):
self.snmp_info = snmp_info
self.client = _get_client(snmp_info)
@abc.abstractmethod
def _snmp_power_state(self):
"""Perform the SNMP request required to get the current power state.
:raises: SNMPFailure if an SNMP request fails.
:returns: power state. One of :class:`ironic.common.states`.
"""
@abc.abstractmethod
def _snmp_power_on(self):
"""Perform the SNMP request required to set the power on.
:raises: SNMPFailure if an SNMP request fails.
"""
@abc.abstractmethod
def _snmp_power_off(self):
"""Perform the SNMP request required to set the power off.
:raises: SNMPFailure if an SNMP request fails.
"""
def _snmp_wait_for_state(self, goal_state):
"""Wait for the power state of the PDU outlet to change.
:param goal_state: The power state to wait for, one of
:class:`ironic.common.states`.
:raises: SNMPFailure if an SNMP request fails.
:returns: power state. One of :class:`ironic.common.states`.
"""
def _poll_for_state(mutable):
"""Called at an interval until the node's power is consistent.
:param mutable: dict object containing "state" and "next_time"
:raises: SNMPFailure if an SNMP request fails.
"""
mutable["state"] = self._snmp_power_state()
if mutable["state"] == goal_state:
raise loopingcall.LoopingCallDone()
mutable["next_time"] += self.retry_interval
if mutable["next_time"] >= CONF.snmp.power_timeout:
mutable["state"] = states.ERROR
raise loopingcall.LoopingCallDone()
# Pass state to the looped function call in a mutable form.
state = {"state": None, "next_time": 0}
timer = loopingcall.FixedIntervalLoopingCall(_poll_for_state,
state)
timer.start(interval=self.retry_interval).wait()
LOG.debug("power state '%s'", state["state"])
return state["state"]
def power_state(self):
"""Returns a node's current power state.
:raises: SNMPFailure if an SNMP request fails.
:returns: power state. One of :class:`ironic.common.states`.
"""
return self._snmp_power_state()
def power_on(self):
"""Set the power state to this node to ON.
:raises: SNMPFailure if an SNMP request fails.
:returns: power state. One of :class:`ironic.common.states`.
"""
self._snmp_power_on()
return self._snmp_wait_for_state(states.POWER_ON)
def power_off(self):
"""Set the power state to this node to OFF.
:raises: SNMPFailure if an SNMP request fails.
:returns: power state. One of :class:`ironic.common.states`.
"""
self._snmp_power_off()
return self._snmp_wait_for_state(states.POWER_OFF)
def power_reset(self):
"""Reset the power to this node.
:raises: SNMPFailure if an SNMP request fails.
:returns: power state. One of :class:`ironic.common.states`.
"""
power_result = self.power_off()
if power_result != states.POWER_OFF:
return states.ERROR
power_result = self.power_on()
if power_result != states.POWER_ON:
return states.ERROR
return power_result
class SNMPDriverSimple(SNMPDriverBase):
"""SNMP driver base class for simple PDU devices.
Here, simple refers to devices which provide a single SNMP object for
controlling the power state of an outlet.
The default OID of the power state object is of the form
<enterprise OID>.<device OID>.<outlet ID>. A different OID may be specified
by overriding the _snmp_oid method in a subclass.
"""
def __init__(self, *args, **kwargs):
super(SNMPDriverSimple, self).__init__(*args, **kwargs)
self.oid = self._snmp_oid()
@abc.abstractproperty
def oid_device(self):
"""Device dependent portion of the power state object OID."""
@abc.abstractproperty
def value_power_on(self):
"""Value representing power on state."""
@abc.abstractproperty
def value_power_off(self):
"""Value representing power off state."""
def _snmp_oid(self):
"""Return the OID of the power state object.
:returns: Power state object OID as a tuple of integers.
"""
outlet = int(self.snmp_info['outlet'])
return self.oid_enterprise + self.oid_device + (outlet,)
def _snmp_power_state(self):
state = self.client.get(self.oid)
# Translate the state to an Ironic power state.
if state == self.value_power_on:
power_state = states.POWER_ON
elif state == self.value_power_off:
power_state = states.POWER_OFF
else:
LOG.warning(_LW("SNMP PDU %(addr)s outlet %(outlet)s: "
"unrecognised power state %(state)s."),
{'addr': self.snmp_info['address'],
'outlet': self.snmp_info['outlet'],
'state': state})
power_state = states.ERROR
return power_state
def _snmp_power_on(self):
value = rfc1902.Integer(self.value_power_on)
self.client.set(self.oid, value)
def _snmp_power_off(self):
value = rfc1902.Integer(self.value_power_off)
self.client.set(self.oid, value)
class SNMPDriverAten(SNMPDriverSimple):
"""SNMP driver class for Aten PDU devices.
SNMP objects for Aten PDU:
1.3.6.1.4.1.21317.1.3.2.2.2.2 Outlet Power
Values: 1=Off, 2=On, 3=Pending, 4=Reset
"""
oid_device = (21317, 1, 3, 2, 2, 2, 2)
value_power_on = 2
value_power_off = 1
def _snmp_oid(self):
"""Return the OID of the power state object.
:returns: Power state object OID as a tuple of integers.
"""
outlet = int(self.snmp_info['outlet'])
return self.oid_enterprise + self.oid_device + (outlet, 0,)
class SNMPDriverAPC(SNMPDriverSimple):
"""SNMP driver class for APC PDU devices.
SNMP objects for APC PDU:
1.3.6.1.4.1.318.1.1.4.4.2.1.3 sPDUOutletCtl
Values: 1=On, 2=Off, 3=PowerCycle, [...more options follow]
"""
oid_device = (318, 1, 1, 4, 4, 2, 1, 3)
value_power_on = 1
value_power_off = 2
class SNMPDriverCyberPower(SNMPDriverSimple):
"""SNMP driver class for CyberPower PDU devices.
SNMP objects for CyberPower PDU:
1.3.6.1.4.1.3808.1.1.3.3.3.1.1.4 ePDUOutletControlOutletCommand
Values: 1=On, 2=Off, 3=PowerCycle, [...more options follow]
"""
# NOTE(mgoddard): This device driver is currently untested, this driver has
# been implemented based upon its published MIB
# documentation.
oid_device = (3808, 1, 1, 3, 3, 3, 1, 1, 4)
value_power_on = 1
value_power_off = 2
class SNMPDriverTeltronix(SNMPDriverSimple):
"""SNMP driver class for Teltronix PDU devices.
SNMP objects for Teltronix PDU:
1.3.6.1.4.1.23620.1.2.2.1.4 Outlet Power
Values: 1=Off, 2=On
"""
oid_device = (23620, 1, 2, 2, 1, 4)
value_power_on = 2
value_power_off = 1
class SNMPDriverEatonPower(SNMPDriverBase):
"""SNMP driver class for Eaton Power PDU.
The Eaton power PDU does not follow the model of SNMPDriverSimple as it
uses multiple SNMP objects.
SNMP objects for Eaton Power PDU
1.3.6.1.4.1.534.6.6.7.6.6.1.2.<outlet ID> outletControlStatus
Read 0=off, 1=on, 2=pending off, 3=pending on
1.3.6.1.4.1.534.6.6.7.6.6.1.3.<outlet ID> outletControlOffCmd
Write 0 for immediate power off
1.3.6.1.4.1.534.6.6.7.6.6.1.4.<outlet ID> outletControlOnCmd
Write 0 for immediate power on
"""
# NOTE(mgoddard): This device driver is currently untested, this driver has
# been implemented based upon its published MIB
# documentation.
oid_device = (534, 6, 6, 7, 6, 6, 1)
oid_status = (2,)
oid_poweron = (3,)
oid_poweroff = (4,)
status_off = 0
status_on = 1
status_pending_off = 2
status_pending_on = 3
value_power_on = 0
value_power_off = 0
def __init__(self, *args, **kwargs):
super(SNMPDriverEatonPower, self).__init__(*args, **kwargs)
# Due to its use of different OIDs for different actions, we only form
# an OID that holds the common substring of the OIDs for power
# operations.
self.oid_base = self.oid_enterprise + self.oid_device
def _snmp_oid(self, oid):
"""Return the OID for one of the outlet control objects.
:param oid: The action-dependent portion of the OID, as a tuple of
integers.
:returns: The full OID as a tuple of integers.
"""
outlet = int(self.snmp_info['outlet'])
return self.oid_base + oid + (outlet,)
def _snmp_power_state(self):
oid = self._snmp_oid(self.oid_status)
state = self.client.get(oid)
# Translate the state to an Ironic power state.
if state in (self.status_on, self.status_pending_off):
power_state = states.POWER_ON
elif state in (self.status_off, self.status_pending_on):
power_state = states.POWER_OFF
else:
LOG.warning(_LW("Eaton Power SNMP PDU %(addr)s outlet %(outlet)s: "
"unrecognised power state %(state)s."),
{'addr': self.snmp_info['address'],
'outlet': self.snmp_info['outlet'],
'state': state})
power_state = states.ERROR
return power_state
def _snmp_power_on(self):
oid = self._snmp_oid(self.oid_poweron)
value = rfc1902.Integer(self.value_power_on)
self.client.set(oid, value)
def _snmp_power_off(self):
oid = self._snmp_oid(self.oid_poweroff)
value = rfc1902.Integer(self.value_power_off)
self.client.set(oid, value)
# A dictionary of supported drivers keyed by snmp_driver attribute
DRIVER_CLASSES = {
'apc': SNMPDriverAPC,
'aten': SNMPDriverAten,
'cyberpower': SNMPDriverCyberPower,
'eatonpower': SNMPDriverEatonPower,
'teltronix': SNMPDriverTeltronix
}
def _parse_driver_info(node):
"""Parse a node's driver_info values.
Return a dictionary of validated driver information, usable for
SNMPDriver object creation.
:param node: An Ironic node object.
:returns: SNMP driver info.
:raises: MissingParameterValue if any required parameters are missing.
:raises: InvalidParameterValue if any parameters are invalid.
"""
info = node.driver_info or {}
missing_info = [key for key in REQUIRED_PROPERTIES if not info.get(key)]
if missing_info:
raise exception.MissingParameterValue(_(
"SNMP driver requires the following parameters to be set in "
"node's driver_info: %s.") % missing_info)
snmp_info = {}
# Validate PDU driver type
snmp_info['driver'] = info.get('snmp_driver')
if snmp_info['driver'] not in DRIVER_CLASSES:
raise exception.InvalidParameterValue(_(
"SNMPPowerDriver: unknown driver: '%s'") % snmp_info['driver'])
# In absence of a version, default to SNMPv1
snmp_info['version'] = info.get('snmp_version', SNMP_V1)
if snmp_info['version'] not in (SNMP_V1, SNMP_V2C, SNMP_V3):
raise exception.InvalidParameterValue(_(
"SNMPPowerDriver: unknown SNMP version: '%s'") %
snmp_info['version'])
# In absence of a configured UDP port, default to the standard port
port_str = info.get('snmp_port', SNMP_PORT)
try:
snmp_info['port'] = int(port_str)
except ValueError:
raise exception.InvalidParameterValue(_(
"SNMPPowerDriver: SNMP UDP port must be numeric: %s") % port_str)
if snmp_info['port'] < 1 or snmp_info['port'] > 65535:
raise exception.InvalidParameterValue(_(
"SNMPPowerDriver: SNMP UDP port out of range: %d")
% snmp_info['port'])
# Extract version-dependent required parameters
if snmp_info['version'] in (SNMP_V1, SNMP_V2C):
if 'snmp_community' not in info:
raise exception.MissingParameterValue(_(
"SNMP driver requires snmp_community to be set for version "
"%s.") % snmp_info['version'])
snmp_info['community'] = info.get('snmp_community')
elif snmp_info['version'] == SNMP_V3:
if 'snmp_security' not in info:
raise exception.MissingParameterValue(_(
"SNMP driver requires snmp_security to be set for version %s.")
% (SNMP_V3))
snmp_info['security'] = info.get('snmp_security')
# Target PDU IP address and power outlet identification
snmp_info['address'] = info.get('snmp_address')
snmp_info['outlet'] = info.get('snmp_outlet')
return snmp_info
def _get_driver(node):
"""Return a new SNMP driver object of the correct type for `node`.
:param node: Single node object.
:raises: InvalidParameterValue if node power config is incomplete or
invalid.
:returns: SNMP driver object.
"""
snmp_info = _parse_driver_info(node)
cls = DRIVER_CLASSES[snmp_info['driver']]
return cls(snmp_info)
class SNMPPower(base.PowerInterface):
"""SNMP Power Interface.
This PowerInterface class provides a mechanism for controlling the power
state of a physical device using an SNMP-enabled smart power controller.
"""
def get_properties(self):
"""Return the properties of the interface.
:returns: dictionary of <property name>:<property description> entries.
"""
return COMMON_PROPERTIES
def validate(self, task):
"""Check that node.driver_info contains the requisite fields.
:raises: MissingParameterValue if required SNMP parameters are missing.
:raises: InvalidParameterValue if SNMP parameters are invalid.
"""
_parse_driver_info(task.node)
def get_power_state(self, task):
"""Get the current power state.
Poll the SNMP device for the current power state of the node.
:param task: A instance of `ironic.manager.task_manager.TaskManager`.
:raises: MissingParameterValue if required SNMP parameters are missing.
:raises: InvalidParameterValue if SNMP parameters are invalid.
:raises: SNMPFailure if an SNMP request fails.
:returns: power state. One of :class:`ironic.common.states`.
"""
driver = _get_driver(task.node)
power_state = driver.power_state()
return power_state
@task_manager.require_exclusive_lock
def set_power_state(self, task, pstate):
"""Turn the power on or off.
Set the power state of a node.
:param task: A instance of `ironic.manager.task_manager.TaskManager`.
:param pstate: Either POWER_ON or POWER_OFF from :class:
`ironic.common.states`.
:raises: MissingParameterValue if required SNMP parameters are missing.
:raises: InvalidParameterValue if SNMP parameters are invalid or
`pstate` is invalid.
:raises: PowerStateFailure if the final power state of the node is not
as requested after the timeout.
:raises: SNMPFailure if an SNMP request fails.
"""
driver = _get_driver(task.node)
if pstate == states.POWER_ON:
state = driver.power_on()
elif pstate == states.POWER_OFF:
state = driver.power_off()
else:
raise exception.InvalidParameterValue(_("set_power_state called "
"with invalid power "
"state %s.") % str(pstate))
if state != pstate:
raise exception.PowerStateFailure(pstate=pstate)
@task_manager.require_exclusive_lock
def reboot(self, task):
"""Cycles the power to a node.
:param task: A instance of `ironic.manager.task_manager.TaskManager`.
:raises: MissingParameterValue if required SNMP parameters are missing.
:raises: InvalidParameterValue if SNMP parameters are invalid.
:raises: PowerStateFailure if the final power state of the node is not
POWER_ON after the timeout.
:raises: SNMPFailure if an SNMP request fails.
"""
driver = _get_driver(task.node)
state = driver.power_reset()
if state != states.POWER_ON:
raise exception.PowerStateFailure(pstate=states.POWER_ON)
| apache-2.0 | -8,906,259,176,969,156,000 | 33.847042 | 79 | 0.616216 | false |
lamby/pkg-rst2pdf | rst2pdf/tests/input/sphinx-issue172/conf.py | 9 | 7199 | # -*- coding: utf-8 -*-
#
# Sphinx markup documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 18 22:54:33 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['rst2pdf.pdfbuilder']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sphinx'
copyright = u'2009, RA'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'test'
# The full version, including alpha/beta/rc tags.
release = 'test'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'sphinx'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sphinx.tex', u'sphinx Documentation',
u'RA', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# -- Options for PDF output --------------------------------------------------
# Grouping the document tree into PDF files. List of tuples
# (source start file, target name, title, author).
pdf_documents = [
('index', u'MyProject', u'My Project', u'Author Name'),
]
# A comma-separated list of custom stylesheets. Example:
pdf_stylesheets = ['sphinx']
# Create a compressed PDF
# Use True/False or 1/0
# Example: compressed=True
#pdf_compressed=False
# A colon-separated list of folders to search for fonts. Example:
# pdf_font_path=['/usr/share/fonts', '/usr/share/texmf-dist/fonts/']
# Language to be used for hyphenation support
pdf_language="en_US"
# If false, no index is generated.
pdf_use_index = True
# If false, no modindex is generated.
pdf_use_modindex = True
# If false, no coverpage is generated.
pdf_use_coverpage = False
pdf_break_level = 1
pdf_verbosity=0
pdf_invariant = True
| mit | 7,448,587,530,387,755,000 | 30.3 | 80 | 0.708154 | false |
brainelectronics/towerdefense | tests/text/EMPTY.py | 28 | 1136 | #!/usr/bin/env python
'''Test that an empty document doesn't break.
'''
__docformat__ = 'restructuredtext'
__noninteractive = True
import unittest
from pyglet import gl
from pyglet import graphics
from pyglet.text import document
from pyglet.text import layout
from pyglet import window
class TestWindow(window.Window):
def __init__(self, doctype, *args, **kwargs):
super(TestWindow, self).__init__(*args, **kwargs)
self.batch = graphics.Batch()
self.document = doctype()
self.layout = layout.IncrementalTextLayout(self.document,
self.width, self.height, batch=self.batch)
def on_draw(self):
gl.glClearColor(1, 1, 1, 1)
self.clear()
self.batch.draw()
class TestCase(unittest.TestCase):
def testUnformatted(self):
self.window = TestWindow(document.UnformattedDocument)
self.window.dispatch_events()
self.window.close()
def testFormatted(self):
self.window = TestWindow(document.FormattedDocument)
self.window.dispatch_events()
self.window.close()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -3,185,301,283,785,553,000 | 24.818182 | 65 | 0.661092 | false |
benesch/adspygoogle.dfp | adspygoogle/common/Client.py | 3 | 10125 | #!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for accessing all other services."""
__author__ = '[email protected] (Stan Grinberg)'
import datetime
import os
import pickle
import warnings
from adspygoogle.common import PYXML
from adspygoogle.common import SanityCheck
from adspygoogle.common import Utils
from adspygoogle.common.Errors import ValidationError
# The values in _DEFAULT_CONFIG will be used to populate a user's configuration
# if any of these keys was not provided.
_DEFAULT_CONFIG = {
'proxy': None,
'xml_parser': PYXML,
'debug': 'n',
'raw_debug': 'n',
'xml_log': 'y',
'request_log': 'y',
'raw_response': 'n',
'strict': 'y',
'auth_token_epoch': 0,
'auth_type': '',
'pretty_xml': 'y',
'compress': 'y',
'access': '',
'wrap_in_tuple': 'y'
}
# The _OAUTH_2_AUTH_KEYS are the keys in the authentication dictionary that are
# used to construct an OAuth 2.0 credential.
_OAUTH_2_AUTH_KEYS = set(['clientId', 'clientSecret', 'refreshToken'])
# The web address for generating OAuth 2.0 credentials at Google.
_GOOGLE_OAUTH2_ENDPOINT = 'https://accounts.google.com/o/oauth2/token'
class Client(object):
"""Provides entry point to all web services.
Allows instantiation of all web services.
"""
home = os.getcwd()
auth_pkl = ''
config_pkl = ''
def __init__(self, headers=None, config=None, path=None):
"""Inits Client.
Args:
[optional]
headers: dict Object with populated authentication credentials.
config: dict Object with client configuration values.
path: str Relative or absolute path to home directory (i.e. location of
pickles and logs/).
"""
self._headers = headers or {}
self._config = config or self._SetMissingDefaultConfigValues()
def _LoadAuthCredentials(self):
"""Load existing authentication credentials from auth.pkl.
Returns:
dict Dictionary object with populated authentication credentials.
Raises:
ValidationError: if authentication data is missing.
"""
auth = {}
if os.path.exists(self.__class__.auth_pkl):
fh = open(self.__class__.auth_pkl, 'r')
try:
auth = pickle.load(fh)
finally:
fh.close()
if not auth:
msg = 'Authentication data is missing.'
raise ValidationError(msg)
if _OAUTH_2_AUTH_KEYS.issubset(set(auth.keys())):
from oauth2client.client import OAuth2Credentials
auth['oauth2credentials'] = OAuth2Credentials(
None, auth['clientId'], auth['clientSecret'], auth['refreshToken'],
datetime.datetime(1980, 1, 1, 12), _GOOGLE_OAUTH2_ENDPOINT,
'Google Ads* Python Client Library')
for auth_key in _OAUTH_2_AUTH_KEYS:
del auth[auth_key]
return auth
def _WriteUpdatedAuthValue(self, key, new_value):
"""Write updated authentication value for a key in auth.pkl.
Args:
key: str Key to update.
new_value: str New value to update the key with.
"""
auth = self._LoadAuthCredentials()
auth[key] = new_value
# Only write to an existing pickle.
if os.path.exists(self.__class__.auth_pkl):
fh = open(self.__class__.auth_pkl, 'w')
try:
pickle.dump(auth, fh)
finally:
fh.close()
def _LoadConfigValues(self):
"""Load existing configuration values from config.pkl.
Returns:
dict Dictionary object with populated configuration values.
"""
config = {}
if os.path.exists(self.__class__.config_pkl):
fh = open(self.__class__.config_pkl, 'r')
try:
config = pickle.load(fh)
finally:
fh.close()
if not config:
# Proceed to set default config values.
pass
return config
def _SetMissingDefaultConfigValues(self, config=None):
"""Set default configuration values for missing elements in the config dict.
Args:
config: dict Object with client configuration values.
Returns:
dict Given config dictionary with default values added in.
"""
if config is None: config = {}
for key in _DEFAULT_CONFIG:
if key not in config:
config[key] = _DEFAULT_CONFIG[key]
return config
def GetAuthCredentials(self):
"""Return authentication credentials.
Returns:
dict Authentiaction credentials.
"""
return self._headers
def GetConfigValues(self):
"""Return configuration values.
Returns:
dict Configuration values.
"""
return self._config
def SetDebug(self, new_state):
"""Temporarily change debug mode for a given Client instance.
Args:
new_state: bool New state of the debug mode.
"""
self._config['debug'] = Utils.BoolTypeConvert(new_state, str)
def __GetDebug(self):
"""Return current state of the debug mode.
Returns:
bool State of the debug mode.
"""
return self._config['debug']
def __SetDebug(self, new_state):
"""Temporarily change debug mode for a given Client instance.
Args:
new_state: bool New state of the debug mode.
"""
self._config['debug'] = Utils.BoolTypeConvert(new_state, str)
debug = property(__GetDebug, __SetDebug)
def __GetRawDebug(self):
"""Return current state of the raw debug mode.
Returns:
bool State of the debug mode.
"""
return self._config['raw_debug']
def __SetRawDebug(self, new_state):
"""Temporarily change raw debug mode for a given Client instance.
Args:
new_state: bool New state of the raw debug mode.
"""
self._config['raw_debug'] = Utils.BoolTypeConvert(new_state, str)
raw_debug = property(__GetRawDebug, __SetRawDebug)
def __GetUseStrict(self):
"""Return current state of the strictness mode.
Returns:
str State of the strictness mode.
"""
return self._config['strict']
def __SetUseStrict(self, new_state):
"""Temporarily change strictness mode for a given Client instance.
Args:
new_state: bool New state of the strictness mode.
"""
self._config['strict'] = Utils.BoolTypeConvert(new_state, str)
strict = property(__GetUseStrict, __SetUseStrict)
def __GetXmlParser(self):
"""Return current state of the xml parser in use.
Returns:
bool State of the xml parser in use.
"""
return self._config['xml_parser']
def __SetXmlParser(self, new_state):
"""Temporarily change xml parser in use for a given Client instance.
Args:
new_state: bool New state of the xml parser to use.
"""
SanityCheck.ValidateConfigXmlParser(new_state)
self._config['xml_parser'] = new_state
xml_parser = property(__GetXmlParser, __SetXmlParser)
def CallRawMethod(self, soap_message, url, http_proxy):
"""Call API method directly, using raw SOAP message.
For API calls performed with this method, outgoing data is not run through
library's validation logic.
Args:
soap_message: str SOAP XML message.
url: str URL of the API service for the method to call.
http_proxy: str HTTP proxy to use for this API call.
Returns:
tuple Response from the API method (SOAP XML response message).
"""
pass
def __SetOAuth2Credentials(self, credentials):
"""Sets the OAuth2 credentials into the config.
Args:
credentials: object OAuth2 credentials.
"""
self._headers['oauth2credentials'] = credentials
def __GetOAuth2Credentials(self):
"""Retrieves the OAuth2 credentials from the config.
Returns:
object The OAuth2 credentials.
"""
return self._headers['oauth2credentials']
oauth2credentials = property(__GetOAuth2Credentials, __SetOAuth2Credentials)
def __SetCaCertsFile(self, ca_certs_file):
"""Sets the certificates file to use for validating SSL certificates.
WARNING: Using this feature will monkey-patch a new HTTPS class into
httplib. Be aware that any other part of your application that uses httplib,
directly or indirectly, will be affected by its use.
Args:
ca_certs_file: string Path to a file storing trusted certificates. If this
variable cleared (as in, set to None or something that
evaluates to False), the original httplib.HTTPS class will
be put back in place and certificate validation will cease.
"""
try:
from https import Https
if not ca_certs_file: ca_certs_file = None
Https.MonkeyPatchHttplib(ca_certs_file)
except ImportError:
warnings.warn('Your Python installation does not support SSL certificate'
' validation!')
def __GetCaCertsFile(self):
"""Retrieves the current trusted certificates source file path."""
try:
from https import Https
return Https.GetCurrentCertsFile()
except ImportError:
warnings.warn('Your Python installation does not support SSL certificate'
' validation!')
ca_certs = property(__GetCaCertsFile, __SetCaCertsFile)
def __SetUsingCompression(self, is_using):
"""Sets the config to use HTTP message compression.
Args:
is_using: boolean Whether the client is using HTTP compression or not.
"""
self._config['compress'] = is_using
def __GetUsingCompression(self):
"""Returns if the client is currently set to use HTTP compression.
Returns:
boolean Whether this client is using HTTP comrpession or not
"""
return self._config['compress']
compress = property(__GetUsingCompression, __SetUsingCompression)
| apache-2.0 | -7,514,757,267,147,323,000 | 28.347826 | 80 | 0.666568 | false |
mszewczy/odoo | addons/portal/tests/test_portal.py | 198 | 14169 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.mail.tests.common import TestMail
from openerp.exceptions import AccessError
from openerp.osv.orm import except_orm
from openerp.tools.misc import mute_logger
class test_portal(TestMail):
@classmethod
def setUpClass(cls):
super(test_portal, cls).setUpClass()
cr, uid = cls.cr, cls.uid
# Find Portal group
cls.group_portal_id = cls.env.ref('base.group_portal').id
# Create Chell (portal user)
cls.user_chell_id = cls.res_users.create(cr, uid, {
'name': 'Chell Gladys',
'login': 'chell',
'email': '[email protected]',
'groups_id': [(6, 0, [cls.group_portal_id])]
}, {'no_reset_password': True})
cls.user_chell = cls.res_users.browse(cr, uid, cls.user_chell_id)
cls.partner_chell_id = cls.user_chell.partner_id.id
# Create a PigsPortal group
cls.group_port_id = cls.mail_group.create(cr, uid,
{'name': 'PigsPortal', 'public': 'groups', 'group_public_id': cls.group_portal_id},
{'mail_create_nolog': True})
# Set an email address for the user running the tests, used as Sender for outgoing mails
cls.res_users.write(cr, uid, uid, {'email': 'test@localhost'})
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_00_mail_access_rights(self):
""" Test basic mail_message and mail_group access rights for portal users. """
cr, uid = self.cr, self.uid
mail_compose = self.registry('mail.compose.message')
# Prepare group: Pigs and PigsPortal
pigs_msg_id = self.mail_group.message_post(cr, uid, self.group_pigs_id, body='Message')
port_msg_id = self.mail_group.message_post(cr, uid, self.group_port_id, body='Message')
# Do: Chell browses Pigs -> ko, employee group
chell_pigs = self.mail_group.browse(cr, self.user_chell_id, self.group_pigs_id)
with self.assertRaises(except_orm):
trigger_read = chell_pigs.name
# Do: Chell posts a message on Pigs, crash because can not write on group or is not in the followers
with self.assertRaises(AccessError):
self.mail_group.message_post(cr, self.user_chell_id, self.group_pigs_id, body='Message')
# Do: Chell is added into Pigs followers and browse it -> ok for messages, ko for partners (no read permission)
self.mail_group.message_subscribe_users(cr, uid, [self.group_pigs_id], [self.user_chell_id])
chell_pigs = self.mail_group.browse(cr, self.user_chell_id, self.group_pigs_id)
trigger_read = chell_pigs.name
for message in chell_pigs.message_ids:
trigger_read = message.subject
for partner in chell_pigs.message_follower_ids:
if partner.id == self.partner_chell_id:
# Chell can read her own partner record
continue
with self.assertRaises(except_orm):
trigger_read = partner.name
# Do: Chell comments Pigs, ok because he is now in the followers
self.mail_group.message_post(cr, self.user_chell_id, self.group_pigs_id, body='I love Pigs')
# Do: Chell creates a mail.compose.message record on Pigs, because he uses the wizard
compose_id = mail_compose.create(cr, self.user_chell_id,
{'subject': 'Subject', 'body': 'Body text', 'partner_ids': []},
{'default_composition_mode': 'comment', 'default_model': 'mail.group', 'default_res_id': self.group_pigs_id})
mail_compose.send_mail(cr, self.user_chell_id, [compose_id])
# Do: Chell replies to a Pigs message using the composer
compose_id = mail_compose.create(cr, self.user_chell_id,
{'subject': 'Subject', 'body': 'Body text'},
{'default_composition_mode': 'comment', 'default_parent_id': pigs_msg_id})
mail_compose.send_mail(cr, self.user_chell_id, [compose_id])
# Do: Chell browses PigsPortal -> ok because groups security, ko for partners (no read permission)
chell_port = self.mail_group.browse(cr, self.user_chell_id, self.group_port_id)
trigger_read = chell_port.name
for message in chell_port.message_ids:
trigger_read = message.subject
for partner in chell_port.message_follower_ids:
with self.assertRaises(except_orm):
trigger_read = partner.name
def test_10_mail_invite(self):
cr, uid = self.cr, self.uid
mail_invite = self.registry('mail.wizard.invite')
base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url', default='')
# Carine Poilvache, with email, should receive emails for comments and emails
partner_carine_id = self.res_partner.create(cr, uid, {'name': 'Carine Poilvache', 'email': 'c@c'})
# Do: create a mail_wizard_invite, validate it
self._init_mock_build_email()
context = {'default_res_model': 'mail.group', 'default_res_id': self.group_pigs_id}
mail_invite_id = mail_invite.create(cr, uid, {'partner_ids': [(4, partner_carine_id)], 'send_mail': True}, context)
mail_invite.add_followers(cr, uid, [mail_invite_id])
# Test: Pigs followers should contain Admin and Bert
group_pigs = self.mail_group.browse(cr, uid, self.group_pigs_id)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(set(follower_ids), set([self.partner_admin_id, partner_carine_id]), 'Pigs followers after invite is incorrect')
# Test: partner must have been prepared for signup
partner_carine = self.res_partner.browse(cr, uid, partner_carine_id)
self.assertTrue(partner_carine.signup_valid, 'partner has not been prepared for signup')
self.assertTrue(base_url in partner_carine.signup_url, 'signup url is incorrect')
self.assertTrue(cr.dbname in partner_carine.signup_url, 'signup url is incorrect')
self.assertTrue(partner_carine.signup_token in partner_carine.signup_url, 'signup url is incorrect')
# Test: (pretend to) send email and check subject, body
self.assertEqual(len(self._build_email_kwargs_list), 1, 'sent email number incorrect, should be only for Bert')
for sent_email in self._build_email_kwargs_list:
self.assertEqual(sent_email.get('subject'), 'Invitation to follow Discussion group: Pigs',
'invite: subject of invitation email is incorrect')
self.assertIn('Administrator invited you to follow Discussion group document: Pigs', sent_email.get('body'),
'invite: body of invitation email is incorrect')
self.assertIn(partner_carine.signup_token, sent_email.get('body'),
'invite: body of invitation email does not contain signup token')
def test_20_notification_url(self):
""" Tests designed to test the URL added in notification emails. """
cr, uid, group_pigs = self.cr, self.uid, self.group_pigs
# Partner data
partner_raoul = self.res_partner.browse(cr, uid, self.partner_raoul_id)
partner_bert_id = self.res_partner.create(cr, uid, {'name': 'bert'})
partner_bert = self.res_partner.browse(cr, uid, partner_bert_id)
# Mail data
mail_mail_id = self.mail_mail.create(cr, uid, {'state': 'exception'})
mail = self.mail_mail.browse(cr, uid, mail_mail_id)
# Test: link for nobody -> None
url = self.mail_mail._get_partner_access_link(cr, uid, mail)
self.assertEqual(url, None,
'notification email: mails not send to a specific partner should not have any URL')
# Test: link for partner -> signup URL
url = self.mail_mail._get_partner_access_link(cr, uid, mail, partner=partner_bert)
self.assertIn(partner_bert.signup_token, url,
'notification email: mails send to a not-user partner should contain the signup token')
# Test: link for user -> signin
url = self.mail_mail._get_partner_access_link(cr, uid, mail, partner=partner_raoul)
self.assertIn('action=mail.action_mail_redirect', url,
'notification email: link should contain the redirect action')
self.assertIn('login=%s' % partner_raoul.user_ids[0].login, url,
'notification email: link should contain the user login')
@mute_logger('openerp.addons.mail.mail_thread', 'openerp.models')
def test_21_inbox_redirection(self):
""" Tests designed to test the inbox redirection of emails notification URLs. """
cr, uid, user_admin, group_pigs = self.cr, self.uid, self.user_admin, self.group_pigs
model, act_id = self.ir_model_data.get_object_reference(cr, uid, 'mail', 'action_mail_inbox_feeds')
model, port_act_id = self.ir_model_data.get_object_reference(cr, uid, 'portal', 'action_mail_inbox_feeds_portal')
# Data: post a message on pigs
msg_id = self.group_pigs.message_post(body='My body', partner_ids=[self.partner_bert_id, self.partner_chell_id], type='comment', subtype='mail.mt_comment')
# No specific parameters -> should redirect to Inbox
action = self.mail_thread.message_redirect_action(cr, self.user_raoul_id, {'params': {}})
self.assertEqual(action.get('type'), 'ir.actions.client',
'URL redirection: action without parameters should redirect to client action Inbox')
self.assertEqual(action.get('id'), act_id,
'URL redirection: action without parameters should redirect to client action Inbox')
# Bert has read access to Pigs -> should redirect to form view of Pigs
action = self.mail_thread.message_redirect_action(cr, self.user_raoul_id, {'params': {'message_id': msg_id}})
self.assertEqual(action.get('type'), 'ir.actions.act_window',
'URL redirection: action with message_id for read-accredited user should redirect to Pigs')
self.assertEqual(action.get('res_id'), group_pigs.id,
'URL redirection: action with message_id for read-accredited user should redirect to Pigs')
# Bert has no read access to Pigs -> should redirect to Inbox
action = self.mail_thread.message_redirect_action(cr, self.user_bert_id, {'params': {'message_id': msg_id}})
self.assertEqual(action.get('type'), 'ir.actions.client',
'URL redirection: action without parameters should redirect to client action Inbox')
self.assertEqual(action.get('id'), act_id,
'URL redirection: action without parameters should redirect to client action Inbox')
# Chell has no read access to pigs -> should redirect to Portal Inbox
action = self.mail_thread.message_redirect_action(cr, self.user_chell_id, {'params': {'message_id': msg_id}})
self.assertEqual(action.get('type'), 'ir.actions.client',
'URL redirection: action without parameters should redirect to client action Inbox')
self.assertEqual(action.get('id'), port_act_id,
'URL redirection: action without parameters should redirect to client action Inbox')
def test_30_message_read(self):
cr, uid, group_port_id = self.cr, self.uid, self.group_port_id
# Data: custom subtypes
mt_group_public_id = self.mail_message_subtype.create(cr, uid, {'name': 'group_public', 'description': 'Group changed'})
self.ir_model_data.create(cr, uid, {'name': 'mt_group_public', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_group_public_id})
# Data: post messages with various subtypes
msg1_id = self.mail_group.message_post(cr, uid, group_port_id, body='Body1', type='comment', subtype='mail.mt_comment')
msg2_id = self.mail_group.message_post(cr, uid, group_port_id, body='Body2', type='comment', subtype='mail.mt_group_public')
msg3_id = self.mail_group.message_post(cr, uid, group_port_id, body='Body3', type='comment', subtype='mail.mt_comment')
msg4_id = self.mail_group.message_post(cr, uid, group_port_id, body='Body4', type='comment')
# msg5_id = self.mail_group.message_post(cr, uid, group_port_id, body='Body5', type='notification')
# Do: Chell search messages: should not see internal notes (comment without subtype)
msg_ids = self.mail_message.search(cr, self.user_chell_id, [('model', '=', 'mail.group'), ('res_id', '=', group_port_id)])
self.assertEqual(set(msg_ids), set([msg1_id, msg2_id, msg3_id]),
'mail_message: portal user has access to messages he should not read')
# Do: Chell read messages she can read
self.mail_message.read(cr, self.user_chell_id, msg_ids, ['body', 'type', 'subtype_id'])
# Do: Chell read a message she should not be able to read
with self.assertRaises(except_orm):
self.mail_message.read(cr, self.user_chell_id, [msg4_id], ['body', 'type', 'subtype_id'])
| agpl-3.0 | -6,518,190,989,426,405,000 | 59.551282 | 163 | 0.639636 | false |
hackerkid/zulip | zerver/management/commands/merge_streams.py | 3 | 3556 | from argparse import ArgumentParser
from typing import Any, List
from zerver.lib.actions import (
bulk_add_subscriptions,
bulk_remove_subscriptions,
do_deactivate_stream,
)
from zerver.lib.cache import cache_delete_many, to_dict_cache_key_id
from zerver.lib.management import ZulipBaseCommand
from zerver.models import Message, Subscription, get_stream
def bulk_delete_cache_keys(message_ids_to_clear: List[int]) -> None:
while len(message_ids_to_clear) > 0:
batch = message_ids_to_clear[0:5000]
keys_to_delete = [to_dict_cache_key_id(message_id) for message_id in batch]
cache_delete_many(keys_to_delete)
message_ids_to_clear = message_ids_to_clear[5000:]
class Command(ZulipBaseCommand):
help = """Merge two streams."""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("stream_to_keep", help="name of stream to keep")
parser.add_argument(
"stream_to_destroy", help="name of stream to merge into the stream being kept"
)
self.add_realm_args(parser, True)
def handle(self, *args: Any, **options: str) -> None:
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
stream_to_keep = get_stream(options["stream_to_keep"], realm)
stream_to_destroy = get_stream(options["stream_to_destroy"], realm)
recipient_to_destroy = stream_to_destroy.recipient
recipient_to_keep = stream_to_keep.recipient
# The high-level approach here is to move all the messages to
# the surviving stream, deactivate all the subscriptions on
# the stream to be removed and deactivate the stream, and add
# new subscriptions to the stream to keep for any users who
# were only on the now-deactivated stream.
# Move the messages, and delete the old copies from caches.
message_ids_to_clear = list(
Message.objects.filter(recipient=recipient_to_destroy).values_list("id", flat=True)
)
count = Message.objects.filter(recipient=recipient_to_destroy).update(
recipient=recipient_to_keep
)
print(f"Moved {count} messages")
bulk_delete_cache_keys(message_ids_to_clear)
# Move the Subscription objects. This algorithm doesn't
# preserve any stream settings/colors/etc. from the stream
# being destroyed, but it's convenient.
existing_subs = Subscription.objects.filter(recipient=recipient_to_keep)
users_already_subscribed = {sub.user_profile_id: sub.active for sub in existing_subs}
subs_to_deactivate = Subscription.objects.filter(
recipient=recipient_to_destroy, active=True
)
users_to_activate = [
sub.user_profile
for sub in subs_to_deactivate
if not users_already_subscribed.get(sub.user_profile_id, False)
]
if len(subs_to_deactivate) > 0:
print(f"Deactivating {len(subs_to_deactivate)} subscriptions")
bulk_remove_subscriptions(
[sub.user_profile for sub in subs_to_deactivate],
[stream_to_destroy],
self.get_client(),
acting_user=None,
)
do_deactivate_stream(stream_to_destroy, acting_user=None)
if len(users_to_activate) > 0:
print(f"Adding {len(users_to_activate)} subscriptions")
bulk_add_subscriptions(realm, [stream_to_keep], users_to_activate, acting_user=None)
| apache-2.0 | 5,758,618,571,192,968,000 | 40.835294 | 96 | 0.652418 | false |
QianBIG/odoo | openerp/addons/base/tests/test_ir_values.py | 462 | 6705 | import unittest2
import openerp.tests.common as common
class test_ir_values(common.TransactionCase):
def test_00(self):
# Create some default value for some (non-existing) model, for all users.
ir_values = self.registry('ir.values')
# use the old API
ir_values.set(self.cr, self.uid, 'default', False, 'my_test_field',
['unexisting_model'], 'global value')
# use the new API
ir_values.set_default(self.cr, self.uid, 'other_unexisting_model',
'my_other_test_field', 'conditional value', condition='foo=bar')
# Retrieve them.
ir_values = self.registry('ir.values')
# d is a list of triplets (id, name, value)
# Old API
d = ir_values.get(self.cr, self.uid, 'default', False, ['unexisting_model'])
assert len(d) == 1, "Only one single value should be retrieved for this model"
assert d[0][1] == 'my_test_field', "Can't retrieve the created default value. (1)"
assert d[0][2] == 'global value', "Can't retrieve the created default value. (2)"
# New API, Conditional version
d = ir_values.get_defaults(self.cr, self.uid, 'other_unexisting_model')
assert len(d) == 0, "No value should be retrieved, the condition is not met"
d = ir_values.get_defaults(self.cr, self.uid, 'other_unexisting_model', condition="foo=eggs")
assert len(d) == 0, 'Condition is not met either, no defaults should be returned'
d = ir_values.get_defaults(self.cr, self.uid, 'other_unexisting_model', condition="foo=bar")
assert len(d) == 1, "Only one single value should be retrieved"
assert d[0][1] == 'my_other_test_field', "Can't retrieve the created default value. (5)"
assert d[0][2] == 'conditional value', "Can't retrieve the created default value. (6)"
# Do it again but for a specific user.
ir_values = self.registry('ir.values')
ir_values.set(self.cr, self.uid, 'default', False, 'my_test_field',['unexisting_model'], 'specific value', preserve_user=True)
# Retrieve it and check it is the one for the current user.
ir_values = self.registry('ir.values')
d = ir_values.get(self.cr, self.uid, 'default', False, ['unexisting_model'])
assert len(d) == 1, "Only one default must be returned per field"
assert d[0][1] == 'my_test_field', "Can't retrieve the created default value."
assert d[0][2] == 'specific value', "Can't retrieve the created default value."
# Create some action bindings for a non-existing model.
act_id_1 = self.ref('base.act_values_form_action')
act_id_2 = self.ref('base.act_values_form_defaults')
act_id_3 = self.ref('base.action_res_company_form')
act_id_4 = self.ref('base.action_res_company_tree')
ir_values = self.registry('ir.values')
ir_values.set(self.cr, self.uid, 'action', 'tree_but_open', 'OnDblClick Action', ['unexisting_model'], 'ir.actions.act_window,%d' % act_id_1, isobject=True)
ir_values.set(self.cr, self.uid, 'action', 'tree_but_open', 'OnDblClick Action 2', ['unexisting_model'], 'ir.actions.act_window,%d' % act_id_2, isobject=True)
ir_values.set(self.cr, self.uid, 'action', 'client_action_multi', 'Side Wizard', ['unexisting_model'], 'ir.actions.act_window,%d' % act_id_3, isobject=True)
report_ids = self.registry('ir.actions.report.xml').search(self.cr, self.uid, [], {})
reports = self.registry('ir.actions.report.xml').browse(self.cr, self.uid, report_ids, {})
report_id = [report.id for report in reports if not report.groups_id][0] # assume at least one
ir_values.set(self.cr, self.uid, 'action', 'client_print_multi', 'Nice Report', ['unexisting_model'], 'ir.actions.report.xml,%d' % report_id, isobject=True)
ir_values.set(self.cr, self.uid, 'action', 'client_action_relate', 'Related Stuff', ['unexisting_model'], 'ir.actions.act_window,%d' % act_id_4, isobject=True)
# Replace one action binding to set a new name.
ir_values = self.registry('ir.values')
ir_values.set(self.cr, self.uid, 'action', 'tree_but_open', 'OnDblClick Action New', ['unexisting_model'], 'ir.actions.act_window,%d' % act_id_1, isobject=True)
# Retrieve the action bindings and check they're correct
ir_values = self.registry('ir.values')
actions = ir_values.get(self.cr, self.uid, 'action', 'tree_but_open', ['unexisting_model'])
assert len(actions) == 2, "Mismatching number of bound actions"
#first action
assert len(actions[0]) == 3, "Malformed action definition"
assert actions[0][1] == 'OnDblClick Action 2', 'Bound action does not match definition'
assert isinstance(actions[0][2], dict) and actions[0][2]['id'] == act_id_2, 'Bound action does not match definition'
#second action - this ones comes last because it was re-created with a different name
assert len(actions[1]) == 3, "Malformed action definition"
assert actions[1][1] == 'OnDblClick Action New', 'Re-Registering an action should replace it'
assert isinstance(actions[1][2], dict) and actions[1][2]['id'] == act_id_1, 'Bound action does not match definition'
actions = ir_values.get(self.cr, self.uid, 'action', 'client_action_multi', ['unexisting_model'])
assert len(actions) == 1, "Mismatching number of bound actions"
assert len(actions[0]) == 3, "Malformed action definition"
assert actions[0][1] == 'Side Wizard', 'Bound action does not match definition'
assert isinstance(actions[0][2], dict) and actions[0][2]['id'] == act_id_3, 'Bound action does not match definition'
actions = ir_values.get(self.cr, self.uid, 'action', 'client_print_multi', ['unexisting_model'])
assert len(actions) == 1, "Mismatching number of bound actions"
assert len(actions[0]) == 3, "Malformed action definition"
assert actions[0][1] == 'Nice Report', 'Bound action does not match definition'
assert isinstance(actions[0][2], dict) and actions[0][2]['id'] == report_id, 'Bound action does not match definition'
actions = ir_values.get(self.cr, self.uid, 'action', 'client_action_relate', ['unexisting_model'])
assert len(actions) == 1, "Mismatching number of bound actions"
assert len(actions[0]) == 3, "Malformed action definition"
assert actions[0][1] == 'Related Stuff', 'Bound action does not match definition'
assert isinstance(actions[0][2], dict) and actions[0][2]['id'] == act_id_4, 'Bound action does not match definition'
if __name__ == '__main__':
unittest2.main()
| agpl-3.0 | -4,269,493,996,034,565,000 | 61.663551 | 168 | 0.642953 | false |
orezpraw/partycrasher | partycrasher/more_like_this_response.py | 2 | 5735 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (C) 2016, 2017 Joshua Charles Campbell
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from __future__ import print_function
from operator import itemgetter
import re
import logging
logger = logging.getLogger(__name__)
error = logger.error
warn = logger.warn
info = logger.info
debug = logger.debug
from partycrasher.bucket import Buckets, Bucket, TopMatch
from partycrasher.threshold import Threshold
from partycrasher.pc_exceptions import MissingBucketError
from partycrasher.es.bucket import ESBuckets
from partycrasher.pc_encoder import pretty
class MoreLikeThisHit(object):
def __init__(self, raw_hit):
self.raw_hit = raw_hit
self.score = raw_hit['_score']
assert isinstance(self.score, (float, int))
if '_source' in raw_hit:
self.database_id = raw_hit['_source']['database_id']
self.project = raw_hit['_source']['project']
@property
def prec_top_match(self):
return self.buckets.top_match
@property
def buckets(self):
# TODO: cache this?
crash = self.raw_hit['_source']
try:
buckets = crash['buckets']
except KeyError:
# We couldn't find the bucket field. ASSUME that this means that
# its bucket assignment has not yet propegated to whatever shard
# returned the results.
message = ('Bucket field {!r} not found in crash: '
'{!r}'.format('buckets', crash))
raise MissingBucketError(message)
buckets = ESBuckets(buckets)
return buckets
@property
def explanation(self):
try:
return self.raw_hit['_explanation']['details']
except:
error(json.dumps(body, indent=2, cls=ESCrashEncoder))
error(json.dumps(response, indent=2))
raise
@property
def explanation_summary(self):
explanation = self.explanation
with open('explained', 'w') as debug_file:
print(pretty(self.raw_hit['_explanation']), file=debug_file)
def flatten(explanation):
flattened = []
for subexplanation in explanation:
if subexplanation["description"].startswith("weight"):
flattened.append(subexplanation)
else:
#print(subexplanation["description"])
if "details" in subexplanation:
flattened.extend(flatten(subexplanation["details"]))
return flattened
explanation = flatten(explanation)
explanation = sorted(explanation, key=itemgetter('value'), reverse=True)
#with open("explanation", 'w') as f:
#print(json.dumps(explanation, indent=2), file=f)
summary = []
for i in explanation:
#print(i['description'])
match = re.match(r'^weight\(([^\s:]+):([^\s]+) in .*$', i['description'])
if match is not None:
summary.append({'field': match.group(1), 'term': match.group(2), 'value': i['value']})
#del summary[30:]
#print(json.dumps(summary, indent=2, cls=ESCrashEncoder), file=sys.stderr)
return summary
def as_top_match(self):
return TopMatch(report_id=self.database_id,
score=self.score,
project=self.project)
class MoreLikeThisResponse(object):
# JSON structure:
# matches['hit']['hits] ~> [
# {
# "_score": 8.9,
# "_source": {
# "buckets": {
# "1.0": "***bucket-id-1***",
# "9.0": "***bucket-id-2***"
# }
# }
# }
def __init__(self, response_dict):
self.response_dict = response_dict
self.raw_hits = self.response_dict['hits']['hits']
self.hits = [MoreLikeThisHit(h) for h in self.raw_hits]
if len(self.hits) > 0:
self.top_match = self.hits[0]
else:
self.top_match = None
@property
def explanation(self):
if len(self.hits) > 0:
return self.hits[0].explanation
else:
return None
@property
def explanation_summary(self):
accumulator = {}
for hit in self.hits:
s = hit.explanation_summary
for t in s:
if t['field'] not in accumulator:
accumulator[t['field']] = {}
if t['term'] not in accumulator[t['field']]:
accumulator[t['field']][t['term']] = 0.0
accumulator[t['field']][t['term']] += t['value']
explanation = []
for field in accumulator:
for term in accumulator[field]:
explanation.append({
'field': field,
'term': term,
'value': accumulator[field][term]
})
explanation = sorted(explanation, key=itemgetter('value'), reverse=True)
return explanation
| gpl-3.0 | 4,864,964,501,320,618,000 | 34.184049 | 98 | 0.578553 | false |
tareqalayan/ansible | lib/ansible/modules/network/cumulus/_cl_img_install.py | 25 | 3558 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cl_img_install
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Install a different Cumulus Linux version.
deprecated:
removed_in: "2.5"
why: The image slot system no longer exists in Cumulus Linux.
alternative: n/a
description:
- install a different version of Cumulus Linux in the inactive slot. For
more details go the Image Management User Guide at
U(http://docs.cumulusnetworks.com/).
options:
src:
description:
- The full path to the Cumulus Linux binary image. Can be a local path,
http or https URL. If the code version is in the name of the file,
the module will assume this is the version of code you wish to
install.
required: true
version:
description:
- Inform the module of the exact version one is installing. This
overrides the automatic check of version in the file name. For
example, if the binary file name is called CumulusLinux-2.2.3.bin,
and version is set to '2.5.0', then the module will assume it is
installing '2.5.0' not '2.2.3'. If version is not included, then
the module will assume '2.2.3' is the version to install.
switch_slot:
description:
- Switch slots after installing the image.
To run the installed code, reboot the switch.
type: bool
requirements: ["Cumulus Linux OS"]
'''
EXAMPLES = '''
## Download and install the image from a webserver.
- name: Install image using using http url. Switch slots so the subsequent will load the new version
cl_img_install:
version: 2.0.1
src: http://10.1.1.1/CumulusLinux-2.0.1.bin
switch_slot: yes
## Copy the software from the ansible server to the switch.
## The module will get the code version from the filename
## The code will be installed in the alternate slot but the slot will not be primary
## A subsequent reload will not run the new code
- name: Download cumulus linux to local system
get_url:
src: ftp://cumuluslinux.bin
dest: /root/CumulusLinux-2.0.1.bin
- name: Install image from local filesystem. Get version from the filename.
cl_img_install:
src: /root/CumulusLinux-2.0.1.bin
## If the image name has been changed from the original name, use the `version` option
## to inform the module exactly what code version is been installed
- name: Download cumulus linux to local system
get_url:
src: ftp://CumulusLinux-2.0.1.bin
dest: /root/image.bin
- name: install image and switch slots. Only reboot needed
cl_img_install:
version: 2.0.1
src: /root/image.bin
switch_slot: yes
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
from ansible.module_utils.common.removed import removed_module
if __name__ == '__main__':
removed_module()
| gpl-3.0 | -5,251,614,560,960,120,000 | 31.944444 | 100 | 0.671726 | false |
rdeheele/odoo | addons/event/wizard/event_confirm.py | 339 | 1387 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api
class event_confirm(models.TransientModel):
"""Event Confirmation"""
_name = "event.confirm"
@api.multi
def confirm(self):
events = self.env['event.event'].browse(self._context.get('event_ids', []))
events.do_confirm()
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -2,053,449,945,955,956,200 | 38.628571 | 83 | 0.617159 | false |
dorileo/soletta | data/scripts/template.py | 9 | 5158 | #!/usr/bin/env python3
# This file is part of the Soletta (TM) Project
#
# Copyright (C) 2015 Intel Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import os
import re
import stat
class TemplateFragment:
def __init__(self, tpl_global, context, verbatim, expr):
self.context = context
self.verbatim = verbatim
self.expr = expr
self.tpl_global = tpl_global
self.subst = ""
def __append_subst(self, subst):
self.subst += "%s\n" % subst
def value_of(self, k):
value = self.context.get(k)
if value:
self.__append_subst(value)
def on_value(self, k, v, ontrue, onfalse):
value = self.context.get(k.lower())
if value and value == v:
self.__append_subst(ontrue)
else:
self.__append_subst(onfalse)
def on_set(self, k, ontrue, onfalse):
if self.context.get(k.lower()):
self.__append_subst(ontrue)
else:
self.__append_subst(onfalse)
def println(self, ln):
self.__append_subst(ln)
def include(self, template):
dir_path = os.path.dirname(self.tpl_global["root_tpl"])
path = os.path.join(dir_path, template)
try:
f = open(path)
except:
print("Could not open include file: %s" % path)
return
content = run_template(f.read(), self.tpl_global, self.context, self)
self.__append_subst(content)
def parse_template(raw, tpl_global, context):
result = []
curr = prev = ""
expr = False
for ch in raw:
if ch == "{" and prev == "{":
if curr:
fragment = TemplateFragment(tpl_global, context, curr, expr)
result.append(fragment)
curr = ""
expr = True
elif ch == "}" and prev == "}":
if curr:
fragment = TemplateFragment(tpl_global, context, curr[:len(curr) - 1], expr)
result.append(fragment)
curr = ""
expr = False
else:
curr += ch
prev = ch
if curr:
fragment = TemplateFragment(tpl_global, context, curr, expr)
result.append(fragment)
return result
def load_context(files):
result = {}
for f in files:
lines = f.read()
content = "[context]\n%s" % lines
handle = configparser.ConfigParser(delimiters=('=','?=',':='))
handle.read_string(content)
dc = handle["context"]
result = dict(list(result.items()) + list(dc.items()))
# also consider env vars in the context
for k,v in os.environ.items():
result[k.lower()] = v
return result
def try_subst(verbatim):
p = re.compile("^\@.*\@$")
m = p.match(verbatim)
if not m:
return None
return verbatim.replace("@","")
def run_template(raw, tpl_global, context, nested=None):
fragments = parse_template(raw, tpl_global, context)
for frag in fragments:
if frag.expr:
subst = try_subst(frag.verbatim)
if subst:
subst = context.get(subst.lower(), "")
subst = subst.replace("\"","")
frag.subst = subst
else:
if nested:
tpl_global["st"] = nested
else:
tpl_global["st"] = frag
tpl_global["context"] = context
exec(frag.verbatim, tpl_global)
raw = raw.replace("{{%s}}" % frag.verbatim, frag.subst)
return raw
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--context-files",
help=("The context files path. A context file"
"is a file containing key=value pairs, like"
"the kconfig's .config file"),
type=argparse.FileType("r"), nargs="+",
required=True)
parser.add_argument("--template", help="The template file path",
type=argparse.FileType("r"), required=True)
parser.add_argument("--output", help="The template file path",
type=argparse.FileType("w"), required=True)
args = parser.parse_args()
tpl_global = {"root_tpl": os.path.realpath(args.template.name)}
context = load_context(args.context_files)
output = run_template(args.template.read(), tpl_global, context)
args.output.write(output)
st = os.fstat(args.template.fileno())
os.fchmod(args.output.fileno(), st.st_mode)
| apache-2.0 | 8,964,335,338,637,362,000 | 30.839506 | 92 | 0.565529 | false |
calfonso/ansible | lib/ansible/modules/cloud/amazon/ec2_elb_facts.py | 24 | 8629 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_elb_facts
short_description: Gather facts about EC2 Elastic Load Balancers in AWS
description:
- Gather facts about EC2 Elastic Load Balancers in AWS
version_added: "2.0"
author:
- "Michael Schultz (github.com/mjschultz)"
- "Fernando Jose Pando (@nand0p)"
options:
names:
description:
- List of ELB names to gather facts about. Pass this option to gather facts about a set of ELBs, otherwise, all ELBs are returned.
aliases: ['elb_ids', 'ec2_elbs']
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Output format tries to match ec2_elb_lb module input parameters
# Gather facts about all ELBs
- action:
module: ec2_elb_facts
register: elb_facts
- action:
module: debug
msg: "{{ item.dns_name }}"
with_items: "{{ elb_facts.elbs }}"
# Gather facts about a particular ELB
- action:
module: ec2_elb_facts
names: frontend-prod-elb
register: elb_facts
- action:
module: debug
msg: "{{ elb_facts.elbs.0.dns_name }}"
# Gather facts about a set of ELBs
- action:
module: ec2_elb_facts
names:
- frontend-prod-elb
- backend-prod-elb
register: elb_facts
- action:
module: debug
msg: "{{ item.dns_name }}"
with_items: "{{ elb_facts.elbs }}"
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (
AWSRetry,
connect_to_aws,
ec2_argument_spec,
get_aws_connection_info,
)
try:
import boto.ec2.elb
from boto.ec2.tag import Tag
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class ElbInformation(object):
"""Handles ELB information."""
def __init__(self,
module,
names,
region,
**aws_connect_params):
self.module = module
self.names = names
self.region = region
self.aws_connect_params = aws_connect_params
self.connection = self._get_elb_connection()
def _get_tags(self, elbname):
params = {'LoadBalancerNames.member.1': elbname}
elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)])
return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key'))
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def _get_elb_connection(self):
return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
def _get_elb_listeners(self, listeners):
listener_list = []
for listener in listeners:
listener_dict = {
'load_balancer_port': listener[0],
'instance_port': listener[1],
'protocol': listener[2],
}
try:
ssl_certificate_id = listener[4]
except IndexError:
pass
else:
if ssl_certificate_id:
listener_dict['ssl_certificate_id'] = ssl_certificate_id
listener_list.append(listener_dict)
return listener_list
def _get_health_check(self, health_check):
protocol, port_path = health_check.target.split(':')
try:
port, path = port_path.split('/', 1)
path = '/{0}'.format(path)
except ValueError:
port = port_path
path = None
health_check_dict = {
'ping_protocol': protocol.lower(),
'ping_port': int(port),
'response_timeout': health_check.timeout,
'interval': health_check.interval,
'unhealthy_threshold': health_check.unhealthy_threshold,
'healthy_threshold': health_check.healthy_threshold,
}
if path:
health_check_dict['ping_path'] = path
return health_check_dict
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def _get_elb_info(self, elb):
elb_info = {
'name': elb.name,
'zones': elb.availability_zones,
'dns_name': elb.dns_name,
'canonical_hosted_zone_name': elb.canonical_hosted_zone_name,
'canonical_hosted_zone_name_id': elb.canonical_hosted_zone_name_id,
'hosted_zone_name': elb.canonical_hosted_zone_name,
'hosted_zone_id': elb.canonical_hosted_zone_name_id,
'instances': [instance.id for instance in elb.instances],
'listeners': self._get_elb_listeners(elb.listeners),
'scheme': elb.scheme,
'security_groups': elb.security_groups,
'health_check': self._get_health_check(elb.health_check),
'subnets': elb.subnets,
'instances_inservice': [],
'instances_inservice_count': 0,
'instances_outofservice': [],
'instances_outofservice_count': 0,
'instances_inservice_percent': 0.0,
'tags': self._get_tags(elb.name)
}
if elb.vpc_id:
elb_info['vpc_id'] = elb.vpc_id
if elb.instances:
instance_health = self.connection.describe_instance_health(elb.name)
elb_info['instances_inservice'] = [inst.instance_id for inst in instance_health if inst.state == 'InService']
elb_info['instances_inservice_count'] = len(elb_info['instances_inservice'])
elb_info['instances_outofservice'] = [inst.instance_id for inst in instance_health if inst.state == 'OutOfService']
elb_info['instances_outofservice_count'] = len(elb_info['instances_outofservice'])
try:
elb_info['instances_inservice_percent'] = (
float(elb_info['instances_inservice_count']) /
float(elb_info['instances_inservice_count'] + elb_info['instances_outofservice_count'])
) * 100.
except ZeroDivisionError:
elb_info['instances_inservice_percent'] = 0.
return elb_info
def list_elbs(self):
elb_array, token = [], None
get_elb_with_backoff = AWSRetry.backoff(tries=5, delay=5, backoff=2.0)(self.connection.get_all_load_balancers)
while True:
all_elbs = get_elb_with_backoff(marker=token)
token = all_elbs.next_marker
if all_elbs:
if self.names:
for existing_lb in all_elbs:
if existing_lb.name in self.names:
elb_array.append(existing_lb)
else:
elb_array.extend(all_elbs)
else:
break
if token is None:
break
return list(map(self._get_elb_info, elb_array))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
names={'default': [], 'type': 'list'}
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
try:
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="region must be specified")
names = module.params['names']
elb_information = ElbInformation(
module, names, region, **aws_connect_params)
ec2_facts_result = dict(changed=False,
elbs=elb_information.list_elbs())
except BotoServerError as err:
module.fail_json(msg="{0}: {1}".format(err.error_code, err.error_message),
exception=traceback.format_exc())
module.exit_json(**ec2_facts_result)
if __name__ == '__main__':
main()
| gpl-3.0 | -539,068,063,283,125,200 | 31.685606 | 136 | 0.594623 | false |
xuewei4d/scikit-learn | asv_benchmarks/benchmarks/decomposition.py | 12 | 2754 | from sklearn.decomposition import (PCA, DictionaryLearning,
MiniBatchDictionaryLearning)
from .common import Benchmark, Estimator, Transformer
from .datasets import _olivetti_faces_dataset, _mnist_dataset
from .utils import make_pca_scorers, make_dict_learning_scorers
class PCABenchmark(Transformer, Estimator, Benchmark):
"""
Benchmarks for PCA.
"""
param_names = ['svd_solver']
params = (['full', 'arpack', 'randomized'],)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
return _mnist_dataset()
def make_estimator(self, params):
svd_solver, = params
estimator = PCA(n_components=32,
svd_solver=svd_solver,
random_state=0)
return estimator
def make_scorers(self):
make_pca_scorers(self)
class DictionaryLearningBenchmark(Transformer, Estimator, Benchmark):
"""
Benchmarks for DictionaryLearning.
"""
param_names = ['fit_algorithm', 'n_jobs']
params = (['lars', 'cd'], Benchmark.n_jobs_vals)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
return _olivetti_faces_dataset()
def make_estimator(self, params):
fit_algorithm, n_jobs = params
estimator = DictionaryLearning(n_components=15,
fit_algorithm=fit_algorithm,
alpha=0.1,
max_iter=20,
tol=1e-16,
random_state=0,
n_jobs=n_jobs)
return estimator
def make_scorers(self):
make_dict_learning_scorers(self)
class MiniBatchDictionaryLearningBenchmark(Transformer, Estimator, Benchmark):
"""
Benchmarks for MiniBatchDictionaryLearning
"""
param_names = ['fit_algorithm', 'n_jobs']
params = (['lars', 'cd'], Benchmark.n_jobs_vals)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
return _olivetti_faces_dataset()
def make_estimator(self, params):
fit_algorithm, n_jobs = params
estimator = MiniBatchDictionaryLearning(n_components=15,
fit_algorithm=fit_algorithm,
alpha=0.1,
batch_size=3,
random_state=0,
n_jobs=n_jobs)
return estimator
def make_scorers(self):
make_dict_learning_scorers(self)
| bsd-3-clause | 4,591,949,816,198,792,700 | 28.297872 | 78 | 0.531227 | false |
DeltaEpsilon-HackFMI2/FMICalendar-REST | venv/lib/python2.7/site-packages/rest_framework/tests/test_relations_pk.py | 21 | 22294 | from __future__ import unicode_literals
from django.db import models
from django.test import TestCase
from rest_framework import serializers
from rest_framework.tests.models import (
BlogPost, ManyToManyTarget, ManyToManySource, ForeignKeyTarget, ForeignKeySource,
NullableForeignKeySource, OneToOneTarget, NullableOneToOneSource,
)
from rest_framework.compat import six
# ManyToMany
class ManyToManyTargetSerializer(serializers.ModelSerializer):
class Meta:
model = ManyToManyTarget
fields = ('id', 'name', 'sources')
class ManyToManySourceSerializer(serializers.ModelSerializer):
class Meta:
model = ManyToManySource
fields = ('id', 'name', 'targets')
# ForeignKey
class ForeignKeyTargetSerializer(serializers.ModelSerializer):
class Meta:
model = ForeignKeyTarget
fields = ('id', 'name', 'sources')
class ForeignKeySourceSerializer(serializers.ModelSerializer):
class Meta:
model = ForeignKeySource
fields = ('id', 'name', 'target')
# Nullable ForeignKey
class NullableForeignKeySourceSerializer(serializers.ModelSerializer):
class Meta:
model = NullableForeignKeySource
fields = ('id', 'name', 'target')
# Nullable OneToOne
class NullableOneToOneTargetSerializer(serializers.ModelSerializer):
class Meta:
model = OneToOneTarget
fields = ('id', 'name', 'nullable_source')
# TODO: Add test that .data cannot be accessed prior to .is_valid
class PKManyToManyTests(TestCase):
def setUp(self):
for idx in range(1, 4):
target = ManyToManyTarget(name='target-%d' % idx)
target.save()
source = ManyToManySource(name='source-%d' % idx)
source.save()
for target in ManyToManyTarget.objects.all():
source.targets.add(target)
def test_many_to_many_retrieve(self):
queryset = ManyToManySource.objects.all()
serializer = ManyToManySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'targets': [1]},
{'id': 2, 'name': 'source-2', 'targets': [1, 2]},
{'id': 3, 'name': 'source-3', 'targets': [1, 2, 3]}
]
self.assertEqual(serializer.data, expected)
def test_reverse_many_to_many_retrieve(self):
queryset = ManyToManyTarget.objects.all()
serializer = ManyToManyTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [1, 2, 3]},
{'id': 2, 'name': 'target-2', 'sources': [2, 3]},
{'id': 3, 'name': 'target-3', 'sources': [3]}
]
self.assertEqual(serializer.data, expected)
def test_many_to_many_update(self):
data = {'id': 1, 'name': 'source-1', 'targets': [1, 2, 3]}
instance = ManyToManySource.objects.get(pk=1)
serializer = ManyToManySourceSerializer(instance, data=data)
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(serializer.data, data)
# Ensure source 1 is updated, and everything else is as expected
queryset = ManyToManySource.objects.all()
serializer = ManyToManySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'targets': [1, 2, 3]},
{'id': 2, 'name': 'source-2', 'targets': [1, 2]},
{'id': 3, 'name': 'source-3', 'targets': [1, 2, 3]}
]
self.assertEqual(serializer.data, expected)
def test_reverse_many_to_many_update(self):
data = {'id': 1, 'name': 'target-1', 'sources': [1]}
instance = ManyToManyTarget.objects.get(pk=1)
serializer = ManyToManyTargetSerializer(instance, data=data)
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(serializer.data, data)
# Ensure target 1 is updated, and everything else is as expected
queryset = ManyToManyTarget.objects.all()
serializer = ManyToManyTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [1]},
{'id': 2, 'name': 'target-2', 'sources': [2, 3]},
{'id': 3, 'name': 'target-3', 'sources': [3]}
]
self.assertEqual(serializer.data, expected)
def test_many_to_many_create(self):
data = {'id': 4, 'name': 'source-4', 'targets': [1, 3]}
serializer = ManyToManySourceSerializer(data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'source-4')
# Ensure source 4 is added, and everything else is as expected
queryset = ManyToManySource.objects.all()
serializer = ManyToManySourceSerializer(queryset, many=True)
self.assertFalse(serializer.fields['targets'].read_only)
expected = [
{'id': 1, 'name': 'source-1', 'targets': [1]},
{'id': 2, 'name': 'source-2', 'targets': [1, 2]},
{'id': 3, 'name': 'source-3', 'targets': [1, 2, 3]},
{'id': 4, 'name': 'source-4', 'targets': [1, 3]},
]
self.assertEqual(serializer.data, expected)
def test_reverse_many_to_many_create(self):
data = {'id': 4, 'name': 'target-4', 'sources': [1, 3]}
serializer = ManyToManyTargetSerializer(data=data)
self.assertFalse(serializer.fields['sources'].read_only)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'target-4')
# Ensure target 4 is added, and everything else is as expected
queryset = ManyToManyTarget.objects.all()
serializer = ManyToManyTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [1, 2, 3]},
{'id': 2, 'name': 'target-2', 'sources': [2, 3]},
{'id': 3, 'name': 'target-3', 'sources': [3]},
{'id': 4, 'name': 'target-4', 'sources': [1, 3]}
]
self.assertEqual(serializer.data, expected)
class PKForeignKeyTests(TestCase):
def setUp(self):
target = ForeignKeyTarget(name='target-1')
target.save()
new_target = ForeignKeyTarget(name='target-2')
new_target.save()
for idx in range(1, 4):
source = ForeignKeySource(name='source-%d' % idx, target=target)
source.save()
def test_foreign_key_retrieve(self):
queryset = ForeignKeySource.objects.all()
serializer = ForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': 1},
{'id': 2, 'name': 'source-2', 'target': 1},
{'id': 3, 'name': 'source-3', 'target': 1}
]
self.assertEqual(serializer.data, expected)
def test_reverse_foreign_key_retrieve(self):
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [1, 2, 3]},
{'id': 2, 'name': 'target-2', 'sources': []},
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_update(self):
data = {'id': 1, 'name': 'source-1', 'target': 2}
instance = ForeignKeySource.objects.get(pk=1)
serializer = ForeignKeySourceSerializer(instance, data=data)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.data, data)
serializer.save()
# Ensure source 1 is updated, and everything else is as expected
queryset = ForeignKeySource.objects.all()
serializer = ForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': 2},
{'id': 2, 'name': 'source-2', 'target': 1},
{'id': 3, 'name': 'source-3', 'target': 1}
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_update_incorrect_type(self):
data = {'id': 1, 'name': 'source-1', 'target': 'foo'}
instance = ForeignKeySource.objects.get(pk=1)
serializer = ForeignKeySourceSerializer(instance, data=data)
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {'target': ['Incorrect type. Expected pk value, received %s.' % six.text_type.__name__]})
def test_reverse_foreign_key_update(self):
data = {'id': 2, 'name': 'target-2', 'sources': [1, 3]}
instance = ForeignKeyTarget.objects.get(pk=2)
serializer = ForeignKeyTargetSerializer(instance, data=data)
self.assertTrue(serializer.is_valid())
# We shouldn't have saved anything to the db yet since save
# hasn't been called.
queryset = ForeignKeyTarget.objects.all()
new_serializer = ForeignKeyTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [1, 2, 3]},
{'id': 2, 'name': 'target-2', 'sources': []},
]
self.assertEqual(new_serializer.data, expected)
serializer.save()
self.assertEqual(serializer.data, data)
# Ensure target 2 is update, and everything else is as expected
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [2]},
{'id': 2, 'name': 'target-2', 'sources': [1, 3]},
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_create(self):
data = {'id': 4, 'name': 'source-4', 'target': 2}
serializer = ForeignKeySourceSerializer(data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'source-4')
# Ensure source 4 is added, and everything else is as expected
queryset = ForeignKeySource.objects.all()
serializer = ForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': 1},
{'id': 2, 'name': 'source-2', 'target': 1},
{'id': 3, 'name': 'source-3', 'target': 1},
{'id': 4, 'name': 'source-4', 'target': 2},
]
self.assertEqual(serializer.data, expected)
def test_reverse_foreign_key_create(self):
data = {'id': 3, 'name': 'target-3', 'sources': [1, 3]}
serializer = ForeignKeyTargetSerializer(data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'target-3')
# Ensure target 3 is added, and everything else is as expected
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [2]},
{'id': 2, 'name': 'target-2', 'sources': []},
{'id': 3, 'name': 'target-3', 'sources': [1, 3]},
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_update_with_invalid_null(self):
data = {'id': 1, 'name': 'source-1', 'target': None}
instance = ForeignKeySource.objects.get(pk=1)
serializer = ForeignKeySourceSerializer(instance, data=data)
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {'target': ['This field is required.']})
def test_foreign_key_with_empty(self):
"""
Regression test for #1072
https://github.com/tomchristie/django-rest-framework/issues/1072
"""
serializer = NullableForeignKeySourceSerializer()
self.assertEqual(serializer.data['target'], None)
class PKNullableForeignKeyTests(TestCase):
def setUp(self):
target = ForeignKeyTarget(name='target-1')
target.save()
for idx in range(1, 4):
if idx == 3:
target = None
source = NullableForeignKeySource(name='source-%d' % idx, target=target)
source.save()
def test_foreign_key_retrieve_with_null(self):
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': 1},
{'id': 2, 'name': 'source-2', 'target': 1},
{'id': 3, 'name': 'source-3', 'target': None},
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_create_with_valid_null(self):
data = {'id': 4, 'name': 'source-4', 'target': None}
serializer = NullableForeignKeySourceSerializer(data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'source-4')
# Ensure source 4 is created, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': 1},
{'id': 2, 'name': 'source-2', 'target': 1},
{'id': 3, 'name': 'source-3', 'target': None},
{'id': 4, 'name': 'source-4', 'target': None}
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_create_with_valid_emptystring(self):
"""
The emptystring should be interpreted as null in the context
of relationships.
"""
data = {'id': 4, 'name': 'source-4', 'target': ''}
expected_data = {'id': 4, 'name': 'source-4', 'target': None}
serializer = NullableForeignKeySourceSerializer(data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, expected_data)
self.assertEqual(obj.name, 'source-4')
# Ensure source 4 is created, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': 1},
{'id': 2, 'name': 'source-2', 'target': 1},
{'id': 3, 'name': 'source-3', 'target': None},
{'id': 4, 'name': 'source-4', 'target': None}
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_update_with_valid_null(self):
data = {'id': 1, 'name': 'source-1', 'target': None}
instance = NullableForeignKeySource.objects.get(pk=1)
serializer = NullableForeignKeySourceSerializer(instance, data=data)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.data, data)
serializer.save()
# Ensure source 1 is updated, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': None},
{'id': 2, 'name': 'source-2', 'target': 1},
{'id': 3, 'name': 'source-3', 'target': None}
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_update_with_valid_emptystring(self):
"""
The emptystring should be interpreted as null in the context
of relationships.
"""
data = {'id': 1, 'name': 'source-1', 'target': ''}
expected_data = {'id': 1, 'name': 'source-1', 'target': None}
instance = NullableForeignKeySource.objects.get(pk=1)
serializer = NullableForeignKeySourceSerializer(instance, data=data)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.data, expected_data)
serializer.save()
# Ensure source 1 is updated, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': None},
{'id': 2, 'name': 'source-2', 'target': 1},
{'id': 3, 'name': 'source-3', 'target': None}
]
self.assertEqual(serializer.data, expected)
# reverse foreign keys MUST be read_only
# In the general case they do not provide .remove() or .clear()
# and cannot be arbitrarily set.
# def test_reverse_foreign_key_update(self):
# data = {'id': 1, 'name': 'target-1', 'sources': [1]}
# instance = ForeignKeyTarget.objects.get(pk=1)
# serializer = ForeignKeyTargetSerializer(instance, data=data)
# self.assertTrue(serializer.is_valid())
# self.assertEqual(serializer.data, data)
# serializer.save()
# # Ensure target 1 is updated, and everything else is as expected
# queryset = ForeignKeyTarget.objects.all()
# serializer = ForeignKeyTargetSerializer(queryset, many=True)
# expected = [
# {'id': 1, 'name': 'target-1', 'sources': [1]},
# {'id': 2, 'name': 'target-2', 'sources': []},
# ]
# self.assertEqual(serializer.data, expected)
class PKNullableOneToOneTests(TestCase):
def setUp(self):
target = OneToOneTarget(name='target-1')
target.save()
new_target = OneToOneTarget(name='target-2')
new_target.save()
source = NullableOneToOneSource(name='source-1', target=new_target)
source.save()
def test_reverse_foreign_key_retrieve_with_null(self):
queryset = OneToOneTarget.objects.all()
serializer = NullableOneToOneTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'nullable_source': None},
{'id': 2, 'name': 'target-2', 'nullable_source': 1},
]
self.assertEqual(serializer.data, expected)
# The below models and tests ensure that serializer fields corresponding
# to a ManyToManyField field with a user-specified ``through`` model are
# set to read only
class ManyToManyThroughTarget(models.Model):
name = models.CharField(max_length=100)
class ManyToManyThrough(models.Model):
source = models.ForeignKey('ManyToManyThroughSource')
target = models.ForeignKey(ManyToManyThroughTarget)
class ManyToManyThroughSource(models.Model):
name = models.CharField(max_length=100)
targets = models.ManyToManyField(ManyToManyThroughTarget,
related_name='sources',
through='ManyToManyThrough')
class ManyToManyThroughTargetSerializer(serializers.ModelSerializer):
class Meta:
model = ManyToManyThroughTarget
fields = ('id', 'name', 'sources')
class ManyToManyThroughSourceSerializer(serializers.ModelSerializer):
class Meta:
model = ManyToManyThroughSource
fields = ('id', 'name', 'targets')
class PKManyToManyThroughTests(TestCase):
def setUp(self):
self.source = ManyToManyThroughSource.objects.create(
name='through-source-1')
self.target = ManyToManyThroughTarget.objects.create(
name='through-target-1')
def test_many_to_many_create(self):
data = {'id': 2, 'name': 'source-2', 'targets': [self.target.pk]}
serializer = ManyToManyThroughSourceSerializer(data=data)
self.assertTrue(serializer.fields['targets'].read_only)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(obj.name, 'source-2')
self.assertEqual(obj.targets.count(), 0)
def test_many_to_many_reverse_create(self):
data = {'id': 2, 'name': 'target-2', 'sources': [self.source.pk]}
serializer = ManyToManyThroughTargetSerializer(data=data)
self.assertTrue(serializer.fields['sources'].read_only)
self.assertTrue(serializer.is_valid())
serializer.save()
obj = serializer.save()
self.assertEqual(obj.name, 'target-2')
self.assertEqual(obj.sources.count(), 0)
# Regression tests for #694 (`source` attribute on related fields)
class PrimaryKeyRelatedFieldSourceTests(TestCase):
def test_related_manager_source(self):
"""
Relational fields should be able to use manager-returning methods as their source.
"""
BlogPost.objects.create(title='blah')
field = serializers.PrimaryKeyRelatedField(many=True, source='get_blogposts_manager')
class ClassWithManagerMethod(object):
def get_blogposts_manager(self):
return BlogPost.objects
obj = ClassWithManagerMethod()
value = field.field_to_native(obj, 'field_name')
self.assertEqual(value, [1])
def test_related_queryset_source(self):
"""
Relational fields should be able to use queryset-returning methods as their source.
"""
BlogPost.objects.create(title='blah')
field = serializers.PrimaryKeyRelatedField(many=True, source='get_blogposts_queryset')
class ClassWithQuerysetMethod(object):
def get_blogposts_queryset(self):
return BlogPost.objects.all()
obj = ClassWithQuerysetMethod()
value = field.field_to_native(obj, 'field_name')
self.assertEqual(value, [1])
def test_dotted_source(self):
"""
Source argument should support dotted.source notation.
"""
BlogPost.objects.create(title='blah')
field = serializers.PrimaryKeyRelatedField(many=True, source='a.b.c')
class ClassWithQuerysetMethod(object):
a = {
'b': {
'c': BlogPost.objects.all()
}
}
obj = ClassWithQuerysetMethod()
value = field.field_to_native(obj, 'field_name')
self.assertEqual(value, [1])
| mit | -5,196,732,579,960,990,000 | 39.46098 | 134 | 0.606531 | false |
lbdreyer/iris | lib/iris/tests/unit/aux_factory/test_AuxCoordFactory.py | 5 | 6412 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Unit tests for `iris.aux_factory.AuxCoordFactory`.
"""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
import iris
from iris._lazy_data import as_lazy_data, is_lazy_data
from iris.aux_factory import AuxCoordFactory
from iris.coords import AuxCoord
class Test__nd_points(tests.IrisTest):
def test_numpy_scalar_coord__zero_ndim(self):
points = np.array(1)
coord = AuxCoord(points)
result = AuxCoordFactory._nd_points(coord, (), 0)
expected = np.array([1])
self.assertArrayEqual(result, expected)
def test_numpy_scalar_coord(self):
value = 1
points = np.array(value)
coord = AuxCoord(points)
result = AuxCoordFactory._nd_points(coord, (), 2)
expected = np.array(value).reshape(1, 1)
self.assertArrayEqual(result, expected)
def test_numpy_simple(self):
points = np.arange(12).reshape(4, 3)
coord = AuxCoord(points)
result = AuxCoordFactory._nd_points(coord, (0, 1), 2)
expected = points
self.assertArrayEqual(result, expected)
def test_numpy_complex(self):
points = np.arange(12).reshape(4, 3)
coord = AuxCoord(points)
result = AuxCoordFactory._nd_points(coord, (3, 2), 5)
expected = points.T[np.newaxis, np.newaxis, ..., np.newaxis]
self.assertArrayEqual(result, expected)
def test_lazy_simple(self):
raw_points = np.arange(12).reshape(4, 3)
points = as_lazy_data(raw_points, raw_points.shape)
coord = AuxCoord(points)
self.assertTrue(is_lazy_data(coord.core_points()))
result = AuxCoordFactory._nd_points(coord, (0, 1), 2)
# Check we haven't triggered the loading of the coordinate values.
self.assertTrue(is_lazy_data(coord.core_points()))
self.assertTrue(is_lazy_data(result))
expected = raw_points
self.assertArrayEqual(result, expected)
def test_lazy_complex(self):
raw_points = np.arange(12).reshape(4, 3)
points = as_lazy_data(raw_points, raw_points.shape)
coord = AuxCoord(points)
self.assertTrue(is_lazy_data(coord.core_points()))
result = AuxCoordFactory._nd_points(coord, (3, 2), 5)
# Check we haven't triggered the loading of the coordinate values.
self.assertTrue(is_lazy_data(coord.core_points()))
self.assertTrue(is_lazy_data(result))
expected = raw_points.T[np.newaxis, np.newaxis, ..., np.newaxis]
self.assertArrayEqual(result, expected)
class Test__nd_bounds(tests.IrisTest):
def test_numpy_scalar_coord__zero_ndim(self):
points = np.array(0.5)
bounds = np.arange(2)
coord = AuxCoord(points, bounds=bounds)
result = AuxCoordFactory._nd_bounds(coord, (), 0)
expected = bounds
self.assertArrayEqual(result, expected)
def test_numpy_scalar_coord(self):
points = np.array(0.5)
bounds = np.arange(2).reshape(1, 2)
coord = AuxCoord(points, bounds=bounds)
result = AuxCoordFactory._nd_bounds(coord, (), 2)
expected = bounds[np.newaxis]
self.assertArrayEqual(result, expected)
def test_numpy_simple(self):
points = np.arange(12).reshape(4, 3)
bounds = np.arange(24).reshape(4, 3, 2)
coord = AuxCoord(points, bounds=bounds)
result = AuxCoordFactory._nd_bounds(coord, (0, 1), 2)
expected = bounds
self.assertArrayEqual(result, expected)
def test_numpy_complex(self):
points = np.arange(12).reshape(4, 3)
bounds = np.arange(24).reshape(4, 3, 2)
coord = AuxCoord(points, bounds=bounds)
result = AuxCoordFactory._nd_bounds(coord, (3, 2), 5)
expected = bounds.transpose((1, 0, 2)).reshape(1, 1, 3, 4, 1, 2)
self.assertArrayEqual(result, expected)
def test_lazy_simple(self):
raw_points = np.arange(12).reshape(4, 3)
points = as_lazy_data(raw_points, raw_points.shape)
raw_bounds = np.arange(24).reshape(4, 3, 2)
bounds = as_lazy_data(raw_bounds, raw_bounds.shape)
coord = AuxCoord(points, bounds=bounds)
self.assertTrue(is_lazy_data(coord.core_bounds()))
result = AuxCoordFactory._nd_bounds(coord, (0, 1), 2)
# Check we haven't triggered the loading of the coordinate values.
self.assertTrue(is_lazy_data(coord.core_bounds()))
self.assertTrue(is_lazy_data(result))
expected = raw_bounds
self.assertArrayEqual(result, expected)
def test_lazy_complex(self):
raw_points = np.arange(12).reshape(4, 3)
points = as_lazy_data(raw_points, raw_points.shape)
raw_bounds = np.arange(24).reshape(4, 3, 2)
bounds = as_lazy_data(raw_bounds, raw_bounds.shape)
coord = AuxCoord(points, bounds=bounds)
self.assertTrue(is_lazy_data(coord.core_bounds()))
result = AuxCoordFactory._nd_bounds(coord, (3, 2), 5)
# Check we haven't triggered the loading of the coordinate values.
self.assertTrue(is_lazy_data(coord.core_bounds()))
self.assertTrue(is_lazy_data(result))
expected = raw_bounds.transpose((1, 0, 2)).reshape(1, 1, 3, 4, 1, 2)
self.assertArrayEqual(result, expected)
@tests.skip_data
class Test_lazy_aux_coords(tests.IrisTest):
def setUp(self):
path = tests.get_data_path(
["NetCDF", "testing", "small_theta_colpex.nc"]
)
self.cube = iris.load_cube(path, "air_potential_temperature")
def _check_lazy(self):
coords = self.cube.aux_coords + self.cube.derived_coords
for coord in coords:
self.assertTrue(coord.has_lazy_points())
if coord.has_bounds():
self.assertTrue(coord.has_lazy_bounds())
def test_lazy_coord_loading(self):
# Test that points and bounds arrays stay lazy upon cube loading.
self._check_lazy()
def test_lazy_coord_printing(self):
# Test that points and bounds arrays stay lazy after cube printing.
_ = str(self.cube)
self._check_lazy()
if __name__ == "__main__":
tests.main()
| lgpl-3.0 | -5,600,862,077,784,357,000 | 37.626506 | 76 | 0.636307 | false |
gymnasium/edx-platform | openedx/core/djangoapps/catalog/management/commands/tests/test_create_catalog_integrations.py | 13 | 4190 | """
Test cases for catalog_integrations command.
"""
from django.test import TestCase
from django.core.management import call_command, CommandError
from openedx.core.djangoapps.catalog.models import CatalogIntegration
from openedx.core.djangoapps.catalog.tests.mixins import CatalogIntegrationMixin
class TestCreateCatalogIntegrations(CatalogIntegrationMixin, TestCase):
""" Test the create_catalog_integrations command """
def test_without_required(self):
''' Test that required values are supplied '''
# test without service_username
with self.assertRaises(CommandError):
call_command(
"create_catalog_integrations",
"--internal_api_url", self.catalog_integration_defaults['internal_api_url'],
)
# test without internal_api_url
with self.assertRaises(CommandError):
call_command(
"create_catalog_integrations",
"--service_username", self.catalog_integration_defaults['service_username'],
)
def test_with_required(self):
''' Test with required arguments supplied'''
initial = CatalogIntegration.current()
# test with both required args
call_command(
"create_catalog_integrations",
"--internal_api_url", self.catalog_integration_defaults['internal_api_url'],
"--service_username", self.catalog_integration_defaults['service_username']
)
current = CatalogIntegration.current()
# assert current has changed
self.assertNotEqual(
initial,
current
)
self.assertEqual(
current.enabled,
False
)
self.assertEqual(
current.internal_api_url,
self.catalog_integration_defaults['internal_api_url']
)
self.assertEqual(
current.service_username,
self.catalog_integration_defaults['service_username']
)
def test_with_optional(self):
''' Test with optionals arguments supplied'''
initial = CatalogIntegration.current()
# test --enabled
call_command(
"create_catalog_integrations",
"--internal_api_url", self.catalog_integration_defaults['internal_api_url'],
"--service_username", self.catalog_integration_defaults['service_username'],
"--enabled"
)
current = CatalogIntegration.current()
# assert current has changed
self.assertNotEqual(
initial,
current
)
self.assertEqual(
current.enabled,
True
)
self.assertEqual(
current.internal_api_url,
self.catalog_integration_defaults['internal_api_url']
)
self.assertEqual(
current.service_username,
self.catalog_integration_defaults['service_username']
)
# test with all args
call_command(
"create_catalog_integrations",
"--internal_api_url", self.catalog_integration_defaults['internal_api_url'],
"--service_username", self.catalog_integration_defaults['service_username'],
"--enabled",
"--cache_ttl", 500,
"--long_term_cache_ttl", 500,
"--page_size", 500
)
current = CatalogIntegration.current()
# assert current has changed
self.assertNotEqual(
initial,
current
)
self.assertEqual(
current.enabled,
True
)
self.assertEqual(
current.internal_api_url,
self.catalog_integration_defaults['internal_api_url']
)
self.assertEqual(
current.service_username,
self.catalog_integration_defaults['service_username']
)
self.assertEqual(
current.cache_ttl,
500
)
self.assertEqual(
current.long_term_cache_ttl,
500
)
self.assertEqual(
current.page_size,
500
)
| agpl-3.0 | 8,953,776,738,526,257,000 | 28.097222 | 92 | 0.577327 | false |
Seklfreak/Robyul-Red-DiscordBot | cogs/mirror.py | 2 | 6343 | import discord
from discord.ext import commands
from __main__ import send_cmd_help
import os
from .utils.dataIO import dataIO
from .utils import checks
import re
import aiohttp
import json
from .utils.chat_formatting import pagify
import asyncio
__author__ = "Sebastian Winkler <[email protected]>"
__version__ = "1.0"
class Mirror:
"""Mirrors discord chats between servers!"""
def __init__(self, bot):
self.bot = bot
self.mirrored_channels_file_path = "data/mirror/mirrored_channels.json"
self.mirrored_channels = dataIO.load_json(self.mirrored_channels_file_path)
@commands.group(pass_context=True, no_pm=True, name="mirror")
@checks.mod_or_permissions(administrator=True)
async def _mirror(self, context):
"""Manages mirrored channels"""
if context.invoked_subcommand is None:
await send_cmd_help(context)
@_mirror.command(pass_context=True, name="list")
@checks.mod_or_permissions(administrator=True)
async def _list(self, context):
"""List active channel mirrors"""
message = ""
i = 0
for mirrored_channel_entry in self.mirrored_channels:
message += ":satellite: `#{0}`: `mode={1[mode]}`, connected channels:\n".format(i, mirrored_channel_entry)
for channel_entry in mirrored_channel_entry["channels"]:
mirrored_channel = self.bot.get_channel(channel_entry["channel_id"])
message += "`#{1.name} ({1.id})` on `{1.server.name} ({1.server.id})` (`webhook {0[webhook_id]}`): {1.mention}\n".format(channel_entry, mirrored_channel)
i += 1
for page in pagify(message, delims=["\n"]):
await self.bot.say(page)
async def mirror_message(self, message):
server = message.server
author = message.author
channel = message.channel
if message.server is None:
return
if message.channel.is_private:
return
if author == self.bot.user:
return
if author.bot == True:
return
if self._is_command(message.content):
return
for mirrored_channel_entry in self.mirrored_channels:
channel_gets_mirrored = False
for mirrored_channel in mirrored_channel_entry["channels"]:
if channel.id == mirrored_channel["channel_id"]:
channel_gets_mirrored = True
if channel_gets_mirrored == False:
continue
if mirrored_channel_entry["mode"] == "media":
links = []
if len(message.attachments) > 0:
for attachment in message.attachments:
links.append(attachment["url"])
if len(message.content) > 0:
if "http" in message.content:
for item in message.content.split(" "):
linksFound = re.findall("(?P<url><?https?://[^\s]+>?)", item)
if linksFound != None:
for linkFound in linksFound:
if not (linkFound[0] == "<" and linkFound[len(linkFound)-1] == ">"):
if linkFound[0] == "<":
links.append(linkFound[1:len(linkFound)])
else:
links.append(linkFound)
if len(links) > 0:
channels_to_mirror_to = []
for mirrored_channel in mirrored_channel_entry["channels"]:
if channel.id != mirrored_channel["channel_id"]:
channels_to_mirror_to.append(mirrored_channel)
for target_channel_data in channels_to_mirror_to:
for link in links:
target_channel = self.bot.get_channel(mirrored_channel["channel_id"])
if target_channel != None:
message = "posted {0} in `#{1.name}` on the `{1.server.name}` server ({1.mention})".format(link, channel)
await self._post_mirrored_message(message, author, channel, target_channel_data["webhook_id"], target_channel_data["webhook_token"])
async def _post_mirrored_message(self, message, author, source_channel, target_webhook_id, target_webhook_token):
headers = {"user-agent": "Red-cog-Mirror/"+__version__, "content-type": "application/json"}
# use webhook
conn = aiohttp.TCPConnector(verify_ssl=False)
session = aiohttp.ClientSession(connector=conn)
url = "https://discordapp.com/api/webhooks/{0}/{1}".format(target_webhook_id, target_webhook_token)
payload = {"username": author.name, "avatar_url": author.avatar_url, "content": message}
async with session.post(url, data=json.dumps(payload), headers=headers) as r:
result = await r.json()
session.close()
if result != None:
print("mirroring message webhook unexpected result:", result)
if "retry_after" in result and result["retry_after"] != "":
retry_delay = int(result["retry_after"])
print("Will retry in", retry_delay, "seconds")
await asyncio.sleep(retry_delay)
await self._post_mirrored_message(message, author, source_channel, target_webhook_id, target_webhook_token)
def _is_command(self, msg):
for p in self.bot.settings.prefixes:
if msg.startswith(p):
return True
return False
def check_folders():
folders = ("data", "data/mirror/")
for folder in folders:
if not os.path.exists(folder):
print("Creating " + folder + " folder...")
os.makedirs(folder)
def check_files():
mirrored_channels = []
if not os.path.isfile("data/mirror/mirrored_channels.json"):
print("Creating empty mirrored_channels.json, please fill in details...")
dataIO.save_json("data/mirror/mirrored_channels.json", mirrored_channels)
def setup(bot):
check_folders()
check_files()
n = Mirror(bot)
bot.add_listener(n.mirror_message, "on_message")
bot.add_cog(n) | gpl-3.0 | 32,658,877,504,135,616 | 42.452055 | 169 | 0.566136 | false |
ZhangXinNan/tensorflow | tensorflow/contrib/kfac/examples/convnet_mnist_multi_tower_main.py | 15 | 1573 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Train a ConvNet on MNIST using K-FAC.
Multi tower training mode. See `convnet.train_mnist_multitower` for details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow as tf
from tensorflow.contrib.kfac.examples import convnet
FLAGS = flags.FLAGS
flags.DEFINE_string("data_dir", "/tmp/multitower_1/mnist", "local mnist dir")
flags.DEFINE_integer("num_towers", 2,
"Number of towers for multi tower training.")
def main(unused_argv):
_ = unused_argv
assert FLAGS.num_towers > 1
devices = ["/gpu:{}".format(tower_id) for tower_id in range(FLAGS.num_towers)]
convnet.train_mnist_multitower(
FLAGS.data_dir,
num_epochs=200,
num_towers=FLAGS.num_towers,
devices=devices)
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 | -2,450,845,588,777,690,600 | 31.770833 | 80 | 0.684043 | false |
maximus0/thrift | test/py/TestSyntax.py | 99 | 1318 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys, glob
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--genpydir', type='string', dest='genpydir', default='gen-py')
options, args = parser.parse_args()
del sys.argv[1:] # clean up hack so unittest doesn't complain
sys.path.insert(0, options.genpydir)
sys.path.insert(0, glob.glob('../../lib/py/build/lib.*')[0])
# Just import these generated files to make sure they are syntactically valid
from DebugProtoTest import EmptyService
from DebugProtoTest import Inherited
| apache-2.0 | 708,962,482,295,138,300 | 38.939394 | 81 | 0.766313 | false |
neumerance/cloudloon2 | openstack_dashboard/dashboards/admin/images/forms.py | 10 | 1025 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.dashboards.project.images_and_snapshots \
.images import forms
class AdminCreateImageForm(forms.CreateImageForm):
pass
class AdminUpdateImageForm(forms.UpdateImageForm):
pass
| apache-2.0 | 1,242,934,383,963,419,000 | 33.166667 | 78 | 0.754146 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.